diff --git a/Documentation/ABI/testing/sysfs-ata b/Documentation/ABI/testing/sysfs-ata index 9ab0ef1dd1c7..299e0d1dc161 100644 --- a/Documentation/ABI/testing/sysfs-ata +++ b/Documentation/ABI/testing/sysfs-ata @@ -107,13 +107,14 @@ Description: described in ATA8 7.16 and 7.17. Only valid if the device is not a PM. - pio_mode: (RO) Transfer modes supported by the device when - in PIO mode. Mostly used by PATA device. + pio_mode: (RO) PIO transfer mode used by the device. + Mostly used by PATA devices. - xfer_mode: (RO) Current transfer mode + xfer_mode: (RO) Current transfer mode. Mostly used by + PATA devices. - dma_mode: (RO) Transfer modes supported by the device when - in DMA mode. Mostly used by PATA device. + dma_mode: (RO) DMA transfer mode used by the device. + Mostly used by PATA devices. class: (RO) Device class. Can be "ata" for disk, "atapi" for packet device, "pmp" for PM, or diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio index df42bed09f25..53f07fc41b96 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio +++ b/Documentation/ABI/testing/sysfs-bus-iio @@ -142,7 +142,7 @@ Description: Raw capacitance measurement from channel Y. Units after application of scale and offset are nanofarads. -What: /sys/.../iio:deviceX/in_capacitanceY-in_capacitanceZ_raw +What: /sys/.../iio:deviceX/in_capacitanceY-capacitanceZ_raw KernelVersion: 3.2 Contact: linux-iio@vger.kernel.org Description: diff --git a/Documentation/ABI/testing/sysfs-bus-iio-vf610 b/Documentation/ABI/testing/sysfs-bus-iio-vf610 index 308a6756d3bf..491ead804488 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio-vf610 +++ b/Documentation/ABI/testing/sysfs-bus-iio-vf610 @@ -1,4 +1,4 @@ -What: /sys/bus/iio/devices/iio:deviceX/conversion_mode +What: /sys/bus/iio/devices/iio:deviceX/in_conversion_mode KernelVersion: 4.2 Contact: linux-iio@vger.kernel.org Description: diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 1a9b6ac22c38..3a06e3b322f1 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -519,6 +519,8 @@ What: /sys/devices/system/cpu/vulnerabilities /sys/devices/system/cpu/vulnerabilities/srbds /sys/devices/system/cpu/vulnerabilities/tsx_async_abort /sys/devices/system/cpu/vulnerabilities/itlb_multihit + /sys/devices/system/cpu/vulnerabilities/mmio_stale_data + /sys/devices/system/cpu/vulnerabilities/retbleed Date: January 2018 Contact: Linux kernel mailing list Description: Information about CPU vulnerabilities diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkback b/Documentation/ABI/testing/sysfs-driver-xen-blkback index ac2947b98950..3d5de44cbbee 100644 --- a/Documentation/ABI/testing/sysfs-driver-xen-blkback +++ b/Documentation/ABI/testing/sysfs-driver-xen-blkback @@ -42,5 +42,5 @@ KernelVersion: 5.10 Contact: SeongJae Park Description: Whether to enable the persistent grants feature or not. Note - that this option only takes effect on newly created backends. + that this option only takes effect on newly connected backends. The default is Y (enable). diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkfront b/Documentation/ABI/testing/sysfs-driver-xen-blkfront index 28008905615f..1f7659aa085c 100644 --- a/Documentation/ABI/testing/sysfs-driver-xen-blkfront +++ b/Documentation/ABI/testing/sysfs-driver-xen-blkfront @@ -15,5 +15,5 @@ KernelVersion: 5.10 Contact: SeongJae Park Description: Whether to enable the persistent grants feature or not. Note - that this option only takes effect on newly created frontends. + that this option only takes effect on newly connected frontends. The default is Y (enable). diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index 759a8291ff4a..75f4bbb06c4d 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -56,8 +56,9 @@ Description: Controls the in-place-update policy. 0x04 F2FS_IPU_UTIL 0x08 F2FS_IPU_SSR_UTIL 0x10 F2FS_IPU_FSYNC - 0x20 F2FS_IPU_ASYNC, + 0x20 F2FS_IPU_ASYNC 0x40 F2FS_IPU_NOCACHE + 0x80 F2FS_IPU_HONOR_OPU_WRITE ==== ================= Refer segment.h for details. @@ -431,6 +432,7 @@ Description: Show status of f2fs superblock in real time. 0x800 SBI_QUOTA_SKIP_FLUSH skip flushing quota in current CP 0x1000 SBI_QUOTA_NEED_REPAIR quota file may be corrupted 0x2000 SBI_IS_RESIZEFS resizefs is in process + 0x4000 SBI_IS_FREEZING freefs is in process ====== ===================== ================================= What: /sys/fs/f2fs//ckpt_thread_ioprio @@ -512,3 +514,22 @@ Date: July 2021 Contact: "Daeho Jeong" Description: You can control for which gc mode the "gc_reclaimed_segments" node shows. Refer to the description of the modes in "gc_reclaimed_segments". + +What: /sys/fs/f2fs//hot_data_age_threshold +Date: November 2022 +Contact: "Ping Xiong" +Description: When DATA SEPARATION is on, it controls the age threshold to indicate + the data blocks as hot. By default it was initialized as 262144 blocks + (equals to 1GB). + +What: /sys/fs/f2fs//warm_data_age_threshold +Date: November 2022 +Contact: "Ping Xiong" +Description: When DATA SEPARATION is on, it controls the age threshold to indicate + the data blocks as warm. By default it was initialized as 2621440 blocks + (equals to 10GB). + +What: /sys/fs/f2fs//last_age_weight +Date: January 2023 +Contact: "Ping Xiong" +Description: When DATA SEPARATION is on, it controls the weight of last data block age. diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst index ca4dbdd9016d..2adec1e6520a 100644 --- a/Documentation/admin-guide/hw-vuln/index.rst +++ b/Documentation/admin-guide/hw-vuln/index.rst @@ -15,3 +15,4 @@ are configurable at compile, boot or run time. tsx_async_abort multihit.rst special-register-buffer-data-sampling.rst + processor_mmio_stale_data.rst diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst new file mode 100644 index 000000000000..c98fd11907cc --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst @@ -0,0 +1,260 @@ +========================================= +Processor MMIO Stale Data Vulnerabilities +========================================= + +Processor MMIO Stale Data Vulnerabilities are a class of memory-mapped I/O +(MMIO) vulnerabilities that can expose data. The sequences of operations for +exposing data range from simple to very complex. Because most of the +vulnerabilities require the attacker to have access to MMIO, many environments +are not affected. System environments using virtualization where MMIO access is +provided to untrusted guests may need mitigation. These vulnerabilities are +not transient execution attacks. However, these vulnerabilities may propagate +stale data into core fill buffers where the data can subsequently be inferred +by an unmitigated transient execution attack. Mitigation for these +vulnerabilities includes a combination of microcode update and software +changes, depending on the platform and usage model. Some of these mitigations +are similar to those used to mitigate Microarchitectural Data Sampling (MDS) or +those used to mitigate Special Register Buffer Data Sampling (SRBDS). + +Data Propagators +================ +Propagators are operations that result in stale data being copied or moved from +one microarchitectural buffer or register to another. Processor MMIO Stale Data +Vulnerabilities are operations that may result in stale data being directly +read into an architectural, software-visible state or sampled from a buffer or +register. + +Fill Buffer Stale Data Propagator (FBSDP) +----------------------------------------- +Stale data may propagate from fill buffers (FB) into the non-coherent portion +of the uncore on some non-coherent writes. Fill buffer propagation by itself +does not make stale data architecturally visible. Stale data must be propagated +to a location where it is subject to reading or sampling. + +Sideband Stale Data Propagator (SSDP) +------------------------------------- +The sideband stale data propagator (SSDP) is limited to the client (including +Intel Xeon server E3) uncore implementation. The sideband response buffer is +shared by all client cores. For non-coherent reads that go to sideband +destinations, the uncore logic returns 64 bytes of data to the core, including +both requested data and unrequested stale data, from a transaction buffer and +the sideband response buffer. As a result, stale data from the sideband +response and transaction buffers may now reside in a core fill buffer. + +Primary Stale Data Propagator (PSDP) +------------------------------------ +The primary stale data propagator (PSDP) is limited to the client (including +Intel Xeon server E3) uncore implementation. Similar to the sideband response +buffer, the primary response buffer is shared by all client cores. For some +processors, MMIO primary reads will return 64 bytes of data to the core fill +buffer including both requested data and unrequested stale data. This is +similar to the sideband stale data propagator. + +Vulnerabilities +=============== +Device Register Partial Write (DRPW) (CVE-2022-21166) +----------------------------------------------------- +Some endpoint MMIO registers incorrectly handle writes that are smaller than +the register size. Instead of aborting the write or only copying the correct +subset of bytes (for example, 2 bytes for a 2-byte write), more bytes than +specified by the write transaction may be written to the register. On +processors affected by FBSDP, this may expose stale data from the fill buffers +of the core that created the write transaction. + +Shared Buffers Data Sampling (SBDS) (CVE-2022-21125) +---------------------------------------------------- +After propagators may have moved data around the uncore and copied stale data +into client core fill buffers, processors affected by MFBDS can leak data from +the fill buffer. It is limited to the client (including Intel Xeon server E3) +uncore implementation. + +Shared Buffers Data Read (SBDR) (CVE-2022-21123) +------------------------------------------------ +It is similar to Shared Buffer Data Sampling (SBDS) except that the data is +directly read into the architectural software-visible state. It is limited to +the client (including Intel Xeon server E3) uncore implementation. + +Affected Processors +=================== +Not all the CPUs are affected by all the variants. For instance, most +processors for the server market (excluding Intel Xeon E3 processors) are +impacted by only Device Register Partial Write (DRPW). + +Below is the list of affected Intel processors [#f1]_: + + =================== ============ ========= + Common name Family_Model Steppings + =================== ============ ========= + HASWELL_X 06_3FH 2,4 + SKYLAKE_L 06_4EH 3 + BROADWELL_X 06_4FH All + SKYLAKE_X 06_55H 3,4,6,7,11 + BROADWELL_D 06_56H 3,4,5 + SKYLAKE 06_5EH 3 + ICELAKE_X 06_6AH 4,5,6 + ICELAKE_D 06_6CH 1 + ICELAKE_L 06_7EH 5 + ATOM_TREMONT_D 06_86H All + LAKEFIELD 06_8AH 1 + KABYLAKE_L 06_8EH 9 to 12 + ATOM_TREMONT 06_96H 1 + ATOM_TREMONT_L 06_9CH 0 + KABYLAKE 06_9EH 9 to 13 + COMETLAKE 06_A5H 2,3,5 + COMETLAKE_L 06_A6H 0,1 + ROCKETLAKE 06_A7H 1 + =================== ============ ========= + +If a CPU is in the affected processor list, but not affected by a variant, it +is indicated by new bits in MSR IA32_ARCH_CAPABILITIES. As described in a later +section, mitigation largely remains the same for all the variants, i.e. to +clear the CPU fill buffers via VERW instruction. + +New bits in MSRs +================ +Newer processors and microcode update on existing affected processors added new +bits to IA32_ARCH_CAPABILITIES MSR. These bits can be used to enumerate +specific variants of Processor MMIO Stale Data vulnerabilities and mitigation +capability. + +MSR IA32_ARCH_CAPABILITIES +-------------------------- +Bit 13 - SBDR_SSDP_NO - When set, processor is not affected by either the + Shared Buffers Data Read (SBDR) vulnerability or the sideband stale + data propagator (SSDP). +Bit 14 - FBSDP_NO - When set, processor is not affected by the Fill Buffer + Stale Data Propagator (FBSDP). +Bit 15 - PSDP_NO - When set, processor is not affected by Primary Stale Data + Propagator (PSDP). +Bit 17 - FB_CLEAR - When set, VERW instruction will overwrite CPU fill buffer + values as part of MD_CLEAR operations. Processors that do not + enumerate MDS_NO (meaning they are affected by MDS) but that do + enumerate support for both L1D_FLUSH and MD_CLEAR implicitly enumerate + FB_CLEAR as part of their MD_CLEAR support. +Bit 18 - FB_CLEAR_CTRL - Processor supports read and write to MSR + IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]. On such processors, the FB_CLEAR_DIS + bit can be set to cause the VERW instruction to not perform the + FB_CLEAR action. Not all processors that support FB_CLEAR will support + FB_CLEAR_CTRL. + +MSR IA32_MCU_OPT_CTRL +--------------------- +Bit 3 - FB_CLEAR_DIS - When set, VERW instruction does not perform the FB_CLEAR +action. This may be useful to reduce the performance impact of FB_CLEAR in +cases where system software deems it warranted (for example, when performance +is more critical, or the untrusted software has no MMIO access). Note that +FB_CLEAR_DIS has no impact on enumeration (for example, it does not change +FB_CLEAR or MD_CLEAR enumeration) and it may not be supported on all processors +that enumerate FB_CLEAR. + +Mitigation +========== +Like MDS, all variants of Processor MMIO Stale Data vulnerabilities have the +same mitigation strategy to force the CPU to clear the affected buffers before +an attacker can extract the secrets. + +This is achieved by using the otherwise unused and obsolete VERW instruction in +combination with a microcode update. The microcode clears the affected CPU +buffers when the VERW instruction is executed. + +Kernel reuses the MDS function to invoke the buffer clearing: + + mds_clear_cpu_buffers() + +On MDS affected CPUs, the kernel already invokes CPU buffer clear on +kernel/userspace, hypervisor/guest and C-state (idle) transitions. No +additional mitigation is needed on such CPUs. + +For CPUs not affected by MDS or TAA, mitigation is needed only for the attacker +with MMIO capability. Therefore, VERW is not required for kernel/userspace. For +virtualization case, VERW is only needed at VMENTER for a guest with MMIO +capability. + +Mitigation points +----------------- +Return to user space +^^^^^^^^^^^^^^^^^^^^ +Same mitigation as MDS when affected by MDS/TAA, otherwise no mitigation +needed. + +C-State transition +^^^^^^^^^^^^^^^^^^ +Control register writes by CPU during C-state transition can propagate data +from fill buffer to uncore buffers. Execute VERW before C-state transition to +clear CPU fill buffers. + +Guest entry point +^^^^^^^^^^^^^^^^^ +Same mitigation as MDS when processor is also affected by MDS/TAA, otherwise +execute VERW at VMENTER only for MMIO capable guests. On CPUs not affected by +MDS/TAA, guest without MMIO access cannot extract secrets using Processor MMIO +Stale Data vulnerabilities, so there is no need to execute VERW for such guests. + +Mitigation control on the kernel command line +--------------------------------------------- +The kernel command line allows to control the Processor MMIO Stale Data +mitigations at boot time with the option "mmio_stale_data=". The valid +arguments for this option are: + + ========== ================================================================= + full If the CPU is vulnerable, enable mitigation; CPU buffer clearing + on exit to userspace and when entering a VM. Idle transitions are + protected as well. It does not automatically disable SMT. + full,nosmt Same as full, with SMT disabled on vulnerable CPUs. This is the + complete mitigation. + off Disables mitigation completely. + ========== ================================================================= + +If the CPU is affected and mmio_stale_data=off is not supplied on the kernel +command line, then the kernel selects the appropriate mitigation. + +Mitigation status information +----------------------------- +The Linux kernel provides a sysfs interface to enumerate the current +vulnerability status of the system: whether the system is vulnerable, and +which mitigations are active. The relevant sysfs file is: + + /sys/devices/system/cpu/vulnerabilities/mmio_stale_data + +The possible values in this file are: + + .. list-table:: + + * - 'Not affected' + - The processor is not vulnerable + * - 'Vulnerable' + - The processor is vulnerable, but no mitigation enabled + * - 'Vulnerable: Clear CPU buffers attempted, no microcode' + - The processor is vulnerable, but microcode is not updated. The + mitigation is enabled on a best effort basis. + * - 'Mitigation: Clear CPU buffers' + - The processor is vulnerable and the CPU buffer clearing mitigation is + enabled. + * - 'Unknown: No mitigations' + - The processor vulnerability status is unknown because it is + out of Servicing period. Mitigation is not attempted. + +Definitions: +------------ + +Servicing period: The process of providing functional and security updates to +Intel processors or platforms, utilizing the Intel Platform Update (IPU) +process or other similar mechanisms. + +End of Servicing Updates (ESU): ESU is the date at which Intel will no +longer provide Servicing, such as through IPU or other similar update +processes. ESU dates will typically be aligned to end of quarter. + +If the processor is vulnerable then the following information is appended to +the above information: + + ======================== =========================================== + 'SMT vulnerable' SMT is enabled + 'SMT disabled' SMT is disabled + 'SMT Host state unknown' Kernel runs in a VM, Host SMT state unknown + ======================== =========================================== + +References +---------- +.. [#f1] Affected Processors + https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst index 6bd97cd50d62..7e061ed449aa 100644 --- a/Documentation/admin-guide/hw-vuln/spectre.rst +++ b/Documentation/admin-guide/hw-vuln/spectre.rst @@ -422,6 +422,14 @@ The possible values in this file are: 'RSB filling' Protection of RSB on context switch enabled ============= =========================================== + - EIBRS Post-barrier Return Stack Buffer (PBRSB) protection status: + + =========================== ======================================================= + 'PBRSB-eIBRS: SW sequence' CPU is affected and protection of RSB on VMEXIT enabled + 'PBRSB-eIBRS: Vulnerable' CPU is vulnerable + 'PBRSB-eIBRS: Not affected' CPU is not affected by PBRSB + =========================== ======================================================= + Full mitigation might require a microcode update from the CPU vendor. When the necessary microcode is not available, the kernel will report vulnerability. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 858dd038499d..c841b5208771 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2533,6 +2533,12 @@ memblock=debug [KNL] Enable memblock debug messages. + android12_only.will_be_removed_soon.memblock_nomap_remove= [KNL] + Setting this to true through kernel command line will + call memblock_remove on the regions marked with no-map + property thereby saving memory by removing page structs + for those regions. By default this is set to false. + load_ramdisk= [RAM] [Deprecated] lockd.nlm_grace_period=P [NFS] Assign grace period. @@ -2926,6 +2932,8 @@ kvm.nx_huge_pages=off [X86] no_entry_flush [PPC] no_uaccess_flush [PPC] + mmio_stale_data=off [X86] + retbleed=off [X86] Exceptions: This does not have any effect on @@ -2947,6 +2955,8 @@ Equivalent to: l1tf=flush,nosmt [X86] mds=full,nosmt [X86] tsx_async_abort=full,nosmt [X86] + mmio_stale_data=full,nosmt [X86] + retbleed=auto,nosmt [X86] mminit_loglevel= [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this @@ -2956,6 +2966,40 @@ log everything. Information is printed at KERN_DEBUG so loglevel=8 may also need to be specified. + mmio_stale_data= + [X86,INTEL] Control mitigation for the Processor + MMIO Stale Data vulnerabilities. + + Processor MMIO Stale Data is a class of + vulnerabilities that may expose data after an MMIO + operation. Exposed data could originate or end in + the same CPU buffers as affected by MDS and TAA. + Therefore, similar to MDS and TAA, the mitigation + is to clear the affected CPU buffers. + + This parameter controls the mitigation. The + options are: + + full - Enable mitigation on vulnerable CPUs + + full,nosmt - Enable mitigation and disable SMT on + vulnerable CPUs. + + off - Unconditionally disable mitigation + + On MDS or TAA affected machines, + mmio_stale_data=off can be prevented by an active + MDS or TAA mitigation as these vulnerabilities are + mitigated with the same mechanism so in order to + disable this mitigation, you need to specify + mds=off and tsx_async_abort=off too. + + Not specifying this option is equivalent to + mmio_stale_data=full. + + For details see: + Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst + module.sig_enforce [KNL] When CONFIG_MODULE_SIG is set, this means that modules without (valid) signatures will fail to load. @@ -3372,8 +3416,7 @@ difficult since unequal pointers can no longer be compared. However, if this command-line option is specified, then all normal pointers will have their true - value printed. Pointers printed via %pK may still be - hashed. This option should only be specified when + value printed. This option should only be specified when debugging the kernel. Please do not use on production kernels. @@ -4130,6 +4173,12 @@ fully seed the kernel's CRNG. Default is controlled by CONFIG_RANDOM_TRUST_CPU. + random.trust_bootloader={on,off} + [KNL] Enable or disable trusting the use of a + seed passed by the bootloader (if available) to + fully seed the kernel's CRNG. Default is controlled + by CONFIG_RANDOM_TRUST_BOOTLOADER. + ras=option[,option,...] [KNL] RAS-specific options cec_disable [X86] @@ -4709,6 +4758,43 @@ retain_initrd [RAM] Keep initrd memory after extraction + retbleed= [X86] Control mitigation of RETBleed (Arbitrary + Speculative Code Execution with Return Instructions) + vulnerability. + + AMD-based UNRET and IBPB mitigations alone do not stop + sibling threads from influencing the predictions of other + sibling threads. For that reason, STIBP is used on pro- + cessors that support it, and mitigate SMT on processors + that don't. + + off - no mitigation + auto - automatically select a migitation + auto,nosmt - automatically select a mitigation, + disabling SMT if necessary for + the full mitigation (only on Zen1 + and older without STIBP). + ibpb - On AMD, mitigate short speculation + windows on basic block boundaries too. + Safe, highest perf impact. It also + enables STIBP if present. Not suitable + on Intel. + ibpb,nosmt - Like "ibpb" above but will disable SMT + when STIBP is not available. This is + the alternative for systems which do not + have STIBP. + unret - Force enable untrained return thunks, + only effective on AMD f15h-f17h based + systems. + unret,nosmt - Like unret, but will disable SMT when STIBP + is not available. This is the alternative for + systems which do not have STIBP. + + Selecting 'auto' will choose a mitigation method at run + time according to the CPU. + + Not specifying this option is equivalent to retbleed=auto. + rfkill.default_state= 0 "airplane mode". All wifi, bluetooth, wimax, gps, fm, etc. communication is blocked by default. @@ -5058,6 +5144,7 @@ eibrs - enhanced IBRS eibrs,retpoline - enhanced IBRS + Retpolines eibrs,lfence - enhanced IBRS + LFENCE + ibrs - use IBRS to protect kernel Not specifying this option is equivalent to spectre_v2=auto. diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst index 10fde58d0869..3596e3714ec1 100644 --- a/Documentation/admin-guide/pm/cpuidle.rst +++ b/Documentation/admin-guide/pm/cpuidle.rst @@ -685,8 +685,8 @@ the ``menu`` governor to be used on the systems that use the ``ladder`` governor by default this way, for example. The other kernel command line parameters controlling CPU idle time management -described below are only relevant for the *x86* architecture and some of -them affect Intel processors only. +described below are only relevant for the *x86* architecture and references +to ``intel_idle`` affect Intel processors only. The *x86* architecture support code recognizes three kernel command line options related to CPU idle time management: ``idle=poll``, ``idle=halt``, @@ -708,10 +708,13 @@ idle, so it very well may hurt single-thread computations performance as well as energy-efficiency. Thus using it for performance reasons may not be a good idea at all.] -The ``idle=nomwait`` option disables the ``intel_idle`` driver and causes -``acpi_idle`` to be used (as long as all of the information needed by it is -there in the system's ACPI tables), but it is not allowed to use the -``MWAIT`` instruction of the CPUs to ask the hardware to enter idle states. +The ``idle=nomwait`` option prevents the use of ``MWAIT`` instruction of +the CPU to enter idle states. When this option is used, the ``acpi_idle`` +driver will use the ``HLT`` instruction instead of ``MWAIT``. On systems +running Intel processors, this option disables the ``intel_idle`` driver +and forces the use of the ``acpi_idle`` driver instead. Note that in either +case, ``acpi_idle`` driver will function only if all the information needed +by it is in the system's ACPI tables. In addition to the architecture-level kernel command line options affecting CPU idle time management, there are parameters affecting individual ``CPUIdle`` diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index e338306f4587..a4b1ebc2e70b 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -1006,28 +1006,22 @@ This is a directory, with the following entries: * ``boot_id``: a UUID generated the first time this is retrieved, and unvarying after that; +* ``uuid``: a UUID generated every time this is retrieved (this can + thus be used to generate UUIDs at will); + * ``entropy_avail``: the pool's entropy count, in bits; * ``poolsize``: the entropy pool size, in bits; * ``urandom_min_reseed_secs``: obsolete (used to determine the minimum - number of seconds between urandom pool reseeding). - -* ``uuid``: a UUID generated every time this is retrieved (this can - thus be used to generate UUIDs at will); + number of seconds between urandom pool reseeding). This file is + writable for compatibility purposes, but writing to it has no effect + on any RNG behavior; * ``write_wakeup_threshold``: when the entropy count drops below this (as a number of bits), processes waiting to write to ``/dev/random`` - are woken up. - -If ``drivers/char/random.c`` is built with ``ADD_INTERRUPT_BENCH`` -defined, these additional entries are present: - -* ``add_interrupt_avg_cycles``: the average number of cycles between - interrupts used to feed the pool; - -* ``add_interrupt_avg_deviation``: the standard deviation seen on the - number of cycles between interrupts used to feed the pool. + are woken up. This file is writable for compatibility purposes, but + writing to it has no effect on any RNG behavior. randomize_va_space diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst index f2ab8a5b6a4b..7f553859dba8 100644 --- a/Documentation/admin-guide/sysctl/net.rst +++ b/Documentation/admin-guide/sysctl/net.rst @@ -271,7 +271,7 @@ poll cycle or the number of packets processed reaches netdev_budget. netdev_max_backlog ------------------ -Maximum number of packets, queued on the INPUT side, when the interface +Maximum number of packets, queued on the INPUT side, when the interface receives packets faster than kernel can process them. netdev_rss_key diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index 805c073fd5e6..3ec065727d9b 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -76,10 +76,14 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A57 | #1319537 | ARM64_ERRATUM_1319367 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A57 | #1742098 | ARM64_ERRATUM_1742098 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A72 | #853709 | N/A | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A72 | #1319367 | ARM64_ERRATUM_1319367 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A72 | #1655431 | ARM64_ERRATUM_1742098 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A76 | #1188873,1418040| ARM64_ERRATUM_1418040 | @@ -94,6 +98,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A510 | #2051678 | ARM64_ERRATUM_2051678 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 | @@ -166,6 +172,9 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1024718 | +----------------+-----------------+-----------------+-----------------------------+ +| Qualcomm Tech. | Kryo4xx Gold | N/A | ARM64_ERRATUM_1286807 | ++----------------+-----------------+-----------------+-----------------------------+ + +----------------+-----------------+-----------------+-----------------------------+ | Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 | +----------------+-----------------+-----------------+-----------------------------+ diff --git a/Documentation/atomic_bitops.txt b/Documentation/atomic_bitops.txt index 093cdaefdb37..d8b101c97031 100644 --- a/Documentation/atomic_bitops.txt +++ b/Documentation/atomic_bitops.txt @@ -59,7 +59,7 @@ Like with atomic_t, the rule of thumb is: - RMW operations that have a return value are fully ordered. - RMW operations that are conditional are unordered on FAILURE, - otherwise the above rules apply. In the case of test_and_{}_bit() operations, + otherwise the above rules apply. In the case of test_and_set_bit_lock(), if the bit in memory is unchanged by the operation then it is deemed to have failed. diff --git a/Documentation/conf.py b/Documentation/conf.py index ed2b43ec7754..f47c3f1682db 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -176,7 +176,7 @@ finally: # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = 'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: diff --git a/Documentation/core-api/dma-attributes.rst b/Documentation/core-api/dma-attributes.rst index 17706dc91ec9..1887d92e8e92 100644 --- a/Documentation/core-api/dma-attributes.rst +++ b/Documentation/core-api/dma-attributes.rst @@ -130,11 +130,3 @@ accesses to DMA buffers in both privileged "supervisor" and unprivileged subsystem that the buffer is fully accessible at the elevated privilege level (and ideally inaccessible or at least read-only at the lesser-privileged levels). - -DMA_ATTR_OVERWRITE ------------------- - -This is a hint to the DMA-mapping subsystem that the device is expected to -overwrite the entire mapped size, thus the caller does not require any of the -previous buffer contents to be preserved. This allows bounce-buffering -implementations to optimise DMA_FROM_DEVICE transfers. diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml index c97d4a580f47..42ec1d5fed38 100644 --- a/Documentation/devicetree/bindings/arm/qcom.yaml +++ b/Documentation/devicetree/bindings/arm/qcom.yaml @@ -123,8 +123,8 @@ properties: - const: qcom,msm8974 - items: - - const: qcom,msm8916-mtp/1 - const: qcom,msm8916-mtp + - const: qcom,msm8916-mtp/1 - const: qcom,msm8916 - items: diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml index 5a5b2214f0ca..005e0edd4609 100644 --- a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml +++ b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml @@ -22,16 +22,32 @@ properties: const: qcom,gcc-msm8996 clocks: + minItems: 3 items: - description: XO source - description: Second XO source - description: Sleep clock source + - description: PCIe 0 PIPE clock (optional) + - description: PCIe 1 PIPE clock (optional) + - description: PCIe 2 PIPE clock (optional) + - description: USB3 PIPE clock (optional) + - description: UFS RX symbol 0 clock (optional) + - description: UFS RX symbol 1 clock (optional) + - description: UFS TX symbol 0 clock (optional) clock-names: + minItems: 3 items: - const: cxo - const: cxo2 - const: sleep_clk + - const: pcie_0_pipe_clk_src + - const: pcie_1_pipe_clk_src + - const: pcie_2_pipe_clk_src + - const: usb3_phy_pipe_clk_src + - const: ufs_rx_symbol_0_clk_src + - const: ufs_rx_symbol_1_clk_src + - const: ufs_tx_symbol_0_clk_src '#clock-cells': const: 1 diff --git a/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml b/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml index 0cebaaefda03..419c3b2ac5a6 100644 --- a/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml +++ b/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml @@ -72,6 +72,7 @@ examples: dc-gpios = <&gpio 43 GPIO_ACTIVE_HIGH>; reset-gpios = <&gpio 80 GPIO_ACTIVE_HIGH>; rotation = <270>; + backlight = <&backlight>; }; }; diff --git a/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml b/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml index 372679dbd216..7e250ce136ee 100644 --- a/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml +++ b/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml @@ -61,7 +61,7 @@ if: then: properties: clocks: - maxItems: 2 + minItems: 2 required: - clock-names diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt index 8a9f3559335b..7e14e26676ec 100644 --- a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt +++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt @@ -34,8 +34,8 @@ Example: Use specific request line passing from dma For example, MMC request line is 5 - sdhci: sdhci@98e00000 { - compatible = "moxa,moxart-sdhci"; + mmc: mmc@98e00000 { + compatible = "moxa,moxart-mmc"; reg = <0x98e00000 0x5C>; interrupts = <5 0>; clocks = <&clk_apb>; diff --git a/Documentation/devicetree/bindings/gpio/gpio-altera.txt b/Documentation/devicetree/bindings/gpio/gpio-altera.txt index 146e554b3c67..2a80e272cd66 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-altera.txt +++ b/Documentation/devicetree/bindings/gpio/gpio-altera.txt @@ -9,8 +9,9 @@ Required properties: - The second cell is reserved and is currently unused. - gpio-controller : Marks the device node as a GPIO controller. - interrupt-controller: Mark the device node as an interrupt controller -- #interrupt-cells : Should be 1. The interrupt type is fixed in the hardware. +- #interrupt-cells : Should be 2. The interrupt type is fixed in the hardware. - The first cell is the GPIO offset number within the GPIO controller. + - The second cell is the interrupt trigger type and level flags. - interrupts: Specify the interrupt. - altr,interrupt-type: Specifies the interrupt trigger type the GPIO hardware is synthesized. This field is required if the Altera GPIO controller @@ -38,6 +39,6 @@ gpio_altr: gpio@ff200000 { altr,interrupt-type = ; #gpio-cells = <2>; gpio-controller; - #interrupt-cells = <1>; + #interrupt-cells = <2>; interrupt-controller; }; diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml index 56c78317f9b7..57b9d2344b8a 100644 --- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml +++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.yaml @@ -82,10 +82,9 @@ allOf: - mediatek,mt2701-smi-common then: properties: - clock: - items: - minItems: 3 - maxItems: 3 + clocks: + minItems: 3 + maxItems: 3 clock-names: items: - const: apb @@ -101,10 +100,9 @@ allOf: then: properties: - clock: - items: - minItems: 4 - maxItems: 4 + clocks: + minItems: 4 + maxItems: 4 clock-names: items: - const: apb @@ -114,10 +112,9 @@ allOf: else: # for gen2 HW that don't have gals properties: - clock: - items: - minItems: 2 - maxItems: 2 + clocks: + minItems: 2 + maxItems: 2 clock-names: items: - const: apb diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml index 06b623b34f48..2335a52f6f5a 100644 --- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml +++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml @@ -77,11 +77,11 @@ allOf: then: properties: - clock: - items: - minItems: 3 - maxItems: 3 + clocks: + minItems: 2 + maxItems: 3 clock-names: + minItems: 2 items: - const: apb - const: smi @@ -89,10 +89,9 @@ allOf: else: properties: - clock: - items: - minItems: 2 - maxItems: 2 + clocks: + minItems: 2 + maxItems: 2 clock-names: items: - const: apb @@ -106,7 +105,6 @@ allOf: - mediatek,mt2701-smi-larb - mediatek,mt2712-smi-larb - mediatek,mt6779-smi-larb - - mediatek,mt8167-smi-larb then: required: diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml index c78ab7e2eee7..fa83c69820f8 100644 --- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml @@ -58,7 +58,7 @@ patternProperties: $ref: "/schemas/types.yaml#/definitions/string" enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1, EMMCG4, - EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWQSPID, FWSPIWP, + EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1, GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1, I2C10, I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, diff --git a/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml b/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml index c2b0a8b6da1e..7cebd9ddfffd 100644 --- a/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml +++ b/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml @@ -47,12 +47,6 @@ properties: description: Properties for single LDO regulator. - properties: - regulator-name: - pattern: "^LDO[1-5]$" - description: - should be "LDO1", ..., "LDO5" - unevaluatedProperties: false "^BUCK[1-6]$": @@ -62,11 +56,6 @@ properties: Properties for single BUCK regulator. properties: - regulator-name: - pattern: "^BUCK[1-6]$" - description: - should be "BUCK1", ..., "BUCK6" - nxp,dvs-run-voltage: $ref: "/schemas/types.yaml#/definitions/uint32" minimum: 600000 diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml b/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml index ef5698f426b2..392204a08e96 100644 --- a/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml +++ b/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml @@ -45,6 +45,7 @@ properties: maxItems: 2 interconnect-names: + minItems: 1 items: - const: qspi-config - const: qspi-memory diff --git a/Documentation/driver-api/vfio.rst b/Documentation/driver-api/vfio.rst index f1a4d3c3ba0b..d3a02300913a 100644 --- a/Documentation/driver-api/vfio.rst +++ b/Documentation/driver-api/vfio.rst @@ -249,18 +249,23 @@ VFIO bus driver API VFIO bus drivers, such as vfio-pci make use of only a few interfaces into VFIO core. When devices are bound and unbound to the driver, -the driver should call vfio_add_group_dev() and vfio_del_group_dev() -respectively:: +the driver should call vfio_register_group_dev() and +vfio_unregister_group_dev() respectively:: - extern int vfio_add_group_dev(struct device *dev, - const struct vfio_device_ops *ops, - void *device_data); + void vfio_init_group_dev(struct vfio_device *device, + struct device *dev, + const struct vfio_device_ops *ops, + void *device_data); + int vfio_register_group_dev(struct vfio_device *device); + void vfio_unregister_group_dev(struct vfio_device *device); - extern void *vfio_del_group_dev(struct device *dev); - -vfio_add_group_dev() indicates to the core to begin tracking the -iommu_group of the specified dev and register the dev as owned by -a VFIO bus driver. The driver provides an ops structure for callbacks +The driver should embed the vfio_device in its own structure and call +vfio_init_group_dev() to pre-configure it before going to registration. +vfio_register_group_dev() indicates to the core to begin tracking the +iommu_group of the specified dev and register the dev as owned by a VFIO bus +driver. Once vfio_register_group_dev() returns it is possible for userspace to +start accessing the driver, thus the driver should ensure it is completely +ready before calling it. The driver provides an ops structure for callbacks similar to a file operations structure:: struct vfio_device_ops { @@ -276,7 +281,7 @@ similar to a file operations structure:: }; Each function is passed the device_data that was originally registered -in the vfio_add_group_dev() call above. This allows the bus driver +in the vfio_register_group_dev() call above. This allows the bus driver an easy place to store its opaque, private data. The open/release callbacks are issued when a new file descriptor is created for a device (via VFIO_GROUP_GET_DEVICE_FD). The ioctl interface provides diff --git a/Documentation/filesystems/ext4/attributes.rst b/Documentation/filesystems/ext4/attributes.rst index 54386a010a8d..871d2da7a0a9 100644 --- a/Documentation/filesystems/ext4/attributes.rst +++ b/Documentation/filesystems/ext4/attributes.rst @@ -76,7 +76,7 @@ The beginning of an extended attribute block is in - Checksum of the extended attribute block. * - 0x14 - \_\_u32 - - h\_reserved[2] + - h\_reserved[3] - Zero. The checksum is calculated against the FS UUID, the 64-bit block number diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst index b91e5a8444d5..532c59becb15 100644 --- a/Documentation/filesystems/f2fs.rst +++ b/Documentation/filesystems/f2fs.rst @@ -300,6 +300,15 @@ inlinecrypt When possible, encrypt/decrypt the contents of encrypted Documentation/block/inline-encryption.rst. atgc Enable age-threshold garbage collection, it provides high effectiveness and efficiency on background GC. +memory=%s Control memory mode. This supports "normal" and "low" modes. + "low" mode is introduced to support low memory devices. + Because of the nature of low memory devices, in this mode, f2fs + will try to save memory sometimes by sacrificing performance. + "normal" mode is the default mode and same as before. +age_extent_cache Enable an age extent cache based on rb-tree. It records + data block update frequency of the extent per inode, in + order to provide better temperature hints for data block + allocation. ======================== ============================================================ Debugfs Entries diff --git a/Documentation/firmware-guide/acpi/apei/einj.rst b/Documentation/firmware-guide/acpi/apei/einj.rst index e588bccf5158..344284236a81 100644 --- a/Documentation/firmware-guide/acpi/apei/einj.rst +++ b/Documentation/firmware-guide/acpi/apei/einj.rst @@ -168,7 +168,7 @@ An error injection example:: 0x00000008 Memory Correctable 0x00000010 Memory Uncorrectable non-fatal # echo 0x12345000 > param1 # Set memory address for injection - # echo $((-1 << 12)) > param2 # Mask 0xfffffffffffff000 - anywhere in this page + # echo 0xfffffffffffff000 > param2 # Mask - anywhere in this page # echo 0x8 > error_type # Choose correctable memory error # echo 1 > error_inject # Inject now diff --git a/Documentation/input/joydev/joystick.rst b/Documentation/input/joydev/joystick.rst index 9746fd76cc58..f38c330c028e 100644 --- a/Documentation/input/joydev/joystick.rst +++ b/Documentation/input/joydev/joystick.rst @@ -517,6 +517,7 @@ All I-Force devices are supported by the iforce module. This includes: * AVB Mag Turbo Force * AVB Top Shot Pegasus * AVB Top Shot Force Feedback Racing Wheel +* Boeder Force Feedback Wheel * Logitech WingMan Force * Logitech WingMan Force Wheel * Guillemot Race Leader Force Feedback diff --git a/Documentation/leds/well-known-leds.txt b/Documentation/leds/well-known-leds.txt new file mode 100644 index 000000000000..2160382c86be --- /dev/null +++ b/Documentation/leds/well-known-leds.txt @@ -0,0 +1,72 @@ +-*- org -*- + +It is somehow important to provide consistent interface to the +userland. LED devices have one problem there, and that is naming of +directories in /sys/class/leds. It would be nice if userland would +just know right "name" for given LED function, but situation got more +complex. + +Anyway, if backwards compatibility is not an issue, new code should +use one of the "good" names from this list, and you should extend the +list where applicable. + +Legacy names are listed, too; in case you are writing application that +wants to use particular feature, you should probe for good name, first, +but then try the legacy ones, too. + +Notice there's a list of functions in include/dt-bindings/leds/common.h . + +* Gamepads and joysticks + +Game controllers may feature LEDs to indicate a player number. This is commonly +used on game consoles in which multiple controllers can be connected to a system. +The "player LEDs" are then programmed with a pattern to indicate a particular +player. For example, a game controller with 4 LEDs, may be programmed with "x---" +to indicate player 1, "-x--" to indicate player 2 etcetera where "x" means on. +Input drivers can utilize the LED class to expose the individual player LEDs +of a game controller using the function "player". +Note: tracking and management of Player IDs is the responsibility of user space, +though drivers may pick a default value. + +Good: "input*:*:player-{1,2,3,4,5} + +* Keyboards + +Good: "input*:*:capslock" +Good: "input*:*:scrolllock" +Good: "input*:*:numlock" +Legacy: "shift-key-light" (Motorola Droid 4, capslock) + +Set of common keyboard LEDs, going back to PC AT or so. + +Legacy: "tpacpi::thinklight" (IBM/Lenovo Thinkpads) +Legacy: "lp5523:kb{1,2,3,4,5,6}" (Nokia N900) + +Frontlight/backlight of main keyboard. + +Legacy: "button-backlight" (Motorola Droid 4) + +Some phones have touch buttons below screen; it is different from main +keyboard. And this is their backlight. + +* Sound subsystem + +Good: "platform:*:mute" +Good: "platform:*:micmute" + +LEDs on notebook body, indicating that sound input / output is muted. + +* System notification + +Legacy: "status-led:{red,green,blue}" (Motorola Droid 4) +Legacy: "lp5523:{r,g,b}" (Nokia N900) + +Phones usually have multi-color status LED. + +* Power management + +Good: "platform:*:charging" (allwinner sun50i) + +* Screen + +Good: ":backlight" (Motorola Droid 4) diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index 40278344dc48..eedce7474ad3 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -988,7 +988,7 @@ cipso_cache_enable - BOOLEAN cipso_cache_bucket_size - INTEGER The CIPSO label cache consists of a fixed size hash table with each hash bucket containing a number of cache entries. This variable limits - the number of entries in each hash bucket; the larger the value the + the number of entries in each hash bucket; the larger the value is, the more CIPSO label mappings that can be cached. When the number of entries in a given hash bucket reaches this limit adding new entries causes the oldest entry in the bucket to be removed to make room. @@ -1093,7 +1093,7 @@ ip_autobind_reuse - BOOLEAN option should only be set by experts. Default: 0 -ip_dynaddr - BOOLEAN +ip_dynaddr - INTEGER If set non-zero, enables support for dynamic addresses. If set to a non-zero value larger than 1, a kernel log message will be printed when dynamic address rewriting @@ -2642,7 +2642,14 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max Default: 4K sctp_wmem - vector of 3 INTEGERs: min, default, max - Currently this tunable has no effect. + Only the first value ("min") is used, "default" and "max" are + ignored. + + min: Minimum size of send buffer that can be used by SCTP sockets. + It is guaranteed to each SCTP socket (but not association) even + under moderate memory pressure. + + Default: 4K addr_scope_policy - INTEGER Control IPv4 address scoping - draft-stewart-tsvwg-sctp-ipv4-00 diff --git a/Documentation/networking/netdevices.rst b/Documentation/networking/netdevices.rst index 5a85fcc80c76..557b97483437 100644 --- a/Documentation/networking/netdevices.rst +++ b/Documentation/networking/netdevices.rst @@ -10,18 +10,177 @@ Introduction The following is a random collection of documentation regarding network devices. -struct net_device allocation rules -================================== +struct net_device lifetime rules +================================ Network device structures need to persist even after module is unloaded and must be allocated with alloc_netdev_mqs() and friends. If device has registered successfully, it will be freed on last use -by free_netdev(). This is required to handle the pathologic case cleanly -(example: rmmod mydriver needs_free_netdev = true; + } + + static void my_destructor(struct net_device *dev) + { + some_obj_destroy(priv->obj); + some_uninit(priv); + } + + int create_link() + { + struct my_device_priv *priv; + int err; + + ASSERT_RTNL(); + + dev = alloc_netdev(sizeof(*priv), "net%d", NET_NAME_UNKNOWN, my_setup); + if (!dev) + return -ENOMEM; + priv = netdev_priv(dev); + + /* Implicit constructor */ + err = some_init(priv); + if (err) + goto err_free_dev; + + priv->obj = some_obj_create(); + if (!priv->obj) { + err = -ENOMEM; + goto err_some_uninit; + } + /* End of constructor, set the destructor: */ + dev->priv_destructor = my_destructor; + + err = register_netdevice(dev); + if (err) + /* register_netdevice() calls destructor on failure */ + goto err_free_dev; + + /* If anything fails now unregister_netdevice() (or unregister_netdev()) + * will take care of calling my_destructor and free_netdev(). + */ + + return 0; + + err_some_uninit: + some_uninit(priv); + err_free_dev: + free_netdev(dev); + return err; + } + +If struct net_device.priv_destructor is set it will be called by the core +some time after unregister_netdevice(), it will also be called if +register_netdevice() fails. The callback may be invoked with or without +``rtnl_lock`` held. + +There is no explicit constructor callback, driver "constructs" the private +netdev state after allocating it and before registration. + +Setting struct net_device.needs_free_netdev makes core call free_netdevice() +automatically after unregister_netdevice() when all references to the device +are gone. It only takes effect after a successful call to register_netdevice() +so if register_netdevice() fails driver is responsible for calling +free_netdev(). + +free_netdev() is safe to call on error paths right after unregister_netdevice() +or when register_netdevice() fails. Parts of netdev (de)registration process +happen after ``rtnl_lock`` is released, therefore in those cases free_netdev() +will defer some of the processing until ``rtnl_lock`` is released. + +Devices spawned from struct rtnl_link_ops should never free the +struct net_device directly. + +.ndo_init and .ndo_uninit +~~~~~~~~~~~~~~~~~~~~~~~~~ + +``.ndo_init`` and ``.ndo_uninit`` callbacks are called during net_device +registration and de-registration, under ``rtnl_lock``. Drivers can use +those e.g. when parts of their init process need to run under ``rtnl_lock``. + +``.ndo_init`` runs before device is visible in the system, ``.ndo_uninit`` +runs during de-registering after device is closed but other subsystems +may still have outstanding references to the netdevice. MTU === diff --git a/Documentation/process/code-of-conduct-interpretation.rst b/Documentation/process/code-of-conduct-interpretation.rst index e899f14a4ba2..43da2cc2e3b9 100644 --- a/Documentation/process/code-of-conduct-interpretation.rst +++ b/Documentation/process/code-of-conduct-interpretation.rst @@ -51,7 +51,7 @@ the Technical Advisory Board (TAB) or other maintainers if you're uncertain how to handle situations that come up. It will not be considered a violation report unless you want it to be. If you are uncertain about approaching the TAB or any other maintainers, please -reach out to our conflict mediator, Mishi Choudhary . +reach out to our conflict mediator, Joanna Lee . In the end, "be kind to each other" is really what the end goal is for everybody. We know everyone is human and we all fail at times, but the diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst index 5a267f5d1a50..edd263e0992d 100644 --- a/Documentation/process/submitting-patches.rst +++ b/Documentation/process/submitting-patches.rst @@ -71,7 +71,7 @@ as you intend it to. The maintainer will thank you if you write your patch description in a form which can be easily pulled into Linux's source code management -system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`. +system, ``git``, as a "commit log". See :ref:`the_canonical_patch_format`. Solve only one problem per patch. If your description starts to get long, that's a sign that you probably need to split up your patch. diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst index f99be8062bc8..a9ffc4f3ee69 100644 --- a/Documentation/trace/histogram.rst +++ b/Documentation/trace/histogram.rst @@ -39,7 +39,7 @@ Documentation written by Tom Zanussi will use the event's kernel stacktrace as the key. The keywords 'keys' or 'key' can be used to specify keys, and the keywords 'values', 'vals', or 'val' can be used to specify values. Compound - keys consisting of up to two fields can be specified by the 'keys' + keys consisting of up to three fields can be specified by the 'keys' keyword. Hashing a compound key produces a unique entry in the table for each unique combination of component keys, and can be useful for providing more fine-grained summaries of event data. diff --git a/Documentation/virt/kvm/devices/vm.rst b/Documentation/virt/kvm/devices/vm.rst index 0aa5b1cfd700..60acc39e0e93 100644 --- a/Documentation/virt/kvm/devices/vm.rst +++ b/Documentation/virt/kvm/devices/vm.rst @@ -215,6 +215,7 @@ KVM_S390_VM_TOD_EXT). :Parameters: address of a buffer in user space to store the data (u8) to :Returns: -EFAULT if the given address is not accessible from kernel space; -EINVAL if setting the TOD clock extension to != 0 is not supported + -EOPNOTSUPP for a PV guest (TOD managed by the ultravisor) 3.2. ATTRIBUTE: KVM_S390_VM_TOD_LOW ----------------------------------- @@ -224,6 +225,7 @@ the POP (u64). :Parameters: address of a buffer in user space to store the data (u64) to :Returns: -EFAULT if the given address is not accessible from kernel space + -EOPNOTSUPP for a PV guest (TOD managed by the ultravisor) 3.3. ATTRIBUTE: KVM_S390_VM_TOD_EXT ----------------------------------- @@ -237,6 +239,7 @@ it, it is stored as 0 and not allowed to be set to a value != 0. (kvm_s390_vm_tod_clock) to :Returns: -EFAULT if the given address is not accessible from kernel space; -EINVAL if setting the TOD clock extension to != 0 is not supported + -EOPNOTSUPP for a PV guest (TOD managed by the ultravisor) 4. GROUP: KVM_S390_VM_CRYPTO ============================ diff --git a/Documentation/vm/memory-model.rst b/Documentation/vm/memory-model.rst index 9daadf9faba1..ce398a7dc6cd 100644 --- a/Documentation/vm/memory-model.rst +++ b/Documentation/vm/memory-model.rst @@ -51,8 +51,7 @@ call :c:func:`free_area_init` function. Yet, the mappings array is not usable until the call to :c:func:`memblock_free_all` that hands all the memory to the page allocator. -If an architecture enables `CONFIG_ARCH_HAS_HOLES_MEMORYMODEL` option, -it may free parts of the `mem_map` array that do not cover the +An architecture may free parts of the `mem_map` array that do not cover the actual physical pages. In such case, the architecture specific :c:func:`pfn_valid` implementation should take the holes in the `mem_map` into account. diff --git a/MAINTAINERS b/MAINTAINERS index 18d7e173ce34..63a1fdd5ed98 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14708,6 +14708,8 @@ F: arch/mips/generic/board-ranchu.c RANDOM NUMBER DRIVER M: "Theodore Ts'o" +M: Jason A. Donenfeld +T: git https://git.kernel.org/pub/scm/linux/kernel/git/crng/random.git S: Maintained F: drivers/char/random.c @@ -19283,7 +19285,8 @@ F: arch/x86/xen/*swiotlb* F: drivers/xen/*swiotlb* XFS FILESYSTEM -M: Darrick J. Wong +M: Amir Goldstein +M: Darrick J. Wong M: linux-xfs@vger.kernel.org L: linux-xfs@vger.kernel.org S: Supported diff --git a/Makefile b/Makefile index 5ad3bc7f4e91..49d1d03a895e 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 10 -SUBLEVEL = 110 +SUBLEVEL = 160 EXTRAVERSION = NAME = Dare mighty things @@ -488,6 +488,8 @@ CC = $(abspath $(srctree)/scripts/gcc-wrapper.py) $(CROSS_COMPILE)gcc endif endif +PAHOLE_FLAGS = $(shell PAHOLE=$(PAHOLE) $(srctree)/scripts/pahole-flags.sh) + CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF) NOSTDINC_FLAGS := @@ -542,6 +544,7 @@ export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL +export PAHOLE_FLAGS # Files to ignore in find ... statements @@ -696,12 +699,21 @@ ifdef CONFIG_FUNCTION_TRACER CC_FLAGS_FTRACE := -pg endif -RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register -RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register -RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk -RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline -RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) -RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG))) +ifdef CONFIG_CC_IS_GCC +RETPOLINE_CFLAGS := $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) +RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix) +RETPOLINE_VDSO_CFLAGS := $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register) +endif +ifdef CONFIG_CC_IS_CLANG +RETPOLINE_CFLAGS := -mretpoline-external-thunk +RETPOLINE_VDSO_CFLAGS := -mretpoline +endif + +ifdef CONFIG_RETHUNK +RETHUNK_CFLAGS := -mfunction-return=thunk-extern +RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS) +endif + export RETPOLINE_CFLAGS export RETPOLINE_VDSO_CFLAGS @@ -792,6 +804,9 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong KBUILD_CFLAGS += $(stackp-flags-y) +KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror +KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) + ifdef CONFIG_CC_IS_CLANG KBUILD_CPPFLAGS += -Qunused-arguments KBUILD_CFLAGS += -Wno-format-invalid-specifier @@ -833,12 +848,12 @@ endif # Initialize all stack variables with a zero value. ifdef CONFIG_INIT_STACK_ALL_ZERO -# Future support for zero initialization is still being debated, see -# https://bugs.llvm.org/show_bug.cgi?id=45497. These flags are subject to being -# renamed or dropped. KBUILD_CFLAGS += -ftrivial-auto-var-init=zero +ifdef CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER +# https://github.com/llvm/llvm-project/issues/44842 KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang endif +endif DEBUG_CFLAGS := @@ -856,7 +871,9 @@ else DEBUG_CFLAGS += -g endif -ifneq ($(LLVM_IAS),1) +ifeq ($(LLVM_IAS),1) +KBUILD_AFLAGS += -g +else KBUILD_AFLAGS += -Wa,-gdwarf-2 endif @@ -1044,6 +1061,9 @@ KBUILD_CFLAGS += $(KCFLAGS) KBUILD_LDFLAGS_MODULE += --build-id=sha1 LDFLAGS_vmlinux += --build-id=sha1 +KBUILD_LDFLAGS += -z noexecstack +KBUILD_LDFLAGS += $(call ld-option,--no-warn-rwx-segments) + ifeq ($(CONFIG_STRIP_ASM_SYMS),y) LDFLAGS_vmlinux += $(call ld-option, -X,) endif @@ -1217,7 +1237,7 @@ endif $(Q)$(MAKE) $(hdr-inst)=$(hdr-prefix)arch/$(SRCARCH)/include/uapi ifeq ($(KBUILD_EXTMOD),) -core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ +core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ io_uring/ vmlinux-dirs := $(patsubst %/,%,$(filter %/, \ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ @@ -1227,13 +1247,11 @@ vmlinux-alldirs := $(sort $(vmlinux-dirs) Documentation \ $(patsubst %/,%,$(filter %/, $(core-) \ $(drivers-) $(libs-)))) -subdir-modorder := $(addsuffix modules.order,$(filter %/, \ - $(core-y) $(core-m) $(libs-y) $(libs-m) \ - $(drivers-y) $(drivers-m))) - build-dirs := $(vmlinux-dirs) clean-dirs := $(vmlinux-alldirs) +subdir-modorder := $(addsuffix /modules.order, $(build-dirs)) + # Externally visible symbols (used by link-vmlinux.sh) KBUILD_VMLINUX_OBJS := $(head-y) $(patsubst %/,%/built-in.a, $(core-y)) KBUILD_VMLINUX_OBJS += $(addsuffix built-in.a, $(filter %/, $(libs-y))) @@ -1262,7 +1280,7 @@ KBUILD_MODULES := 1 autoksyms_recursive: descend modules.order $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \ - "$(MAKE) -f $(srctree)/Makefile vmlinux" + "$(MAKE) -f $(srctree)/Makefile autoksyms_recursive" endif autoksyms_h := $(if $(CONFIG_TRIM_UNUSED_KSYMS), include/generated/autoksyms.h) diff --git a/android/abi_gki_aarch64.xml b/android/abi_gki_aarch64.xml index fbbcdb424bf5..1bc86e8755d1 100644 --- a/android/abi_gki_aarch64.xml +++ b/android/abi_gki_aarch64.xml @@ -18,6 +18,7 @@ + @@ -68,6 +69,7 @@ + @@ -208,6 +210,8 @@ + + @@ -251,6 +255,8 @@ + + @@ -298,6 +304,8 @@ + + @@ -314,9 +322,11 @@ + + @@ -329,7 +339,10 @@ + + + @@ -389,10 +402,18 @@ + + + + + + + + @@ -427,6 +448,7 @@ + @@ -435,6 +457,10 @@ + + + + @@ -444,9 +470,14 @@ + + + + + @@ -458,6 +489,8 @@ + + @@ -469,10 +502,20 @@ + + + + + + + + + + @@ -487,7 +530,10 @@ + + + @@ -495,34 +541,55 @@ + + + + + + + + + + + + + + + + + + + + + @@ -540,9 +607,11 @@ + + @@ -550,11 +619,16 @@ + + + + + @@ -577,12 +651,15 @@ + + + @@ -659,6 +736,7 @@ + @@ -725,6 +803,7 @@ + @@ -742,6 +821,7 @@ + @@ -762,6 +842,7 @@ + @@ -825,6 +906,7 @@ + @@ -880,6 +962,7 @@ + @@ -1053,6 +1136,7 @@ + @@ -1095,6 +1179,7 @@ + @@ -1245,11 +1330,13 @@ + + @@ -1281,6 +1368,7 @@ + @@ -1312,6 +1400,7 @@ + @@ -1391,6 +1480,7 @@ + @@ -1404,6 +1494,7 @@ + @@ -1464,6 +1555,7 @@ + @@ -1560,6 +1652,7 @@ + @@ -1627,9 +1720,11 @@ + + @@ -1638,6 +1733,7 @@ + @@ -1661,6 +1757,7 @@ + @@ -1699,6 +1796,7 @@ + @@ -1811,6 +1909,7 @@ + @@ -1952,6 +2051,9 @@ + + + @@ -2040,6 +2142,7 @@ + @@ -2055,7 +2158,11 @@ + + + + @@ -2081,6 +2188,7 @@ + @@ -2155,6 +2263,7 @@ + @@ -2183,6 +2292,7 @@ + @@ -2266,6 +2376,7 @@ + @@ -2293,6 +2404,8 @@ + + @@ -2389,6 +2502,8 @@ + + @@ -2404,6 +2519,7 @@ + @@ -2448,7 +2564,13 @@ + + + + + + @@ -2465,6 +2587,7 @@ + @@ -2623,6 +2746,7 @@ + @@ -2736,6 +2860,7 @@ + @@ -2899,6 +3024,7 @@ + @@ -3060,6 +3186,7 @@ + @@ -3260,6 +3387,7 @@ + @@ -3275,6 +3403,7 @@ + @@ -3425,6 +3554,7 @@ + @@ -3493,6 +3623,7 @@ + @@ -3545,6 +3676,7 @@ + @@ -3566,6 +3698,7 @@ + @@ -3581,7 +3714,9 @@ + + @@ -3639,6 +3774,7 @@ + @@ -3780,13 +3916,21 @@ + + + + + + + + @@ -3825,6 +3969,7 @@ + @@ -3964,6 +4109,7 @@ + @@ -3989,12 +4135,14 @@ + + @@ -4004,15 +4152,32 @@ + + + + + + + + + + + + + + + + + @@ -4029,6 +4194,7 @@ + @@ -4050,6 +4216,7 @@ + @@ -4081,6 +4248,8 @@ + + @@ -4136,6 +4305,7 @@ + @@ -4146,6 +4316,7 @@ + @@ -4209,14 +4380,17 @@ + + + @@ -4260,6 +4434,7 @@ + @@ -4328,6 +4503,7 @@ + @@ -4343,9 +4519,11 @@ + + @@ -4367,6 +4545,7 @@ + @@ -4376,6 +4555,7 @@ + @@ -4435,6 +4615,7 @@ + @@ -4480,6 +4661,8 @@ + + @@ -4600,6 +4783,7 @@ + @@ -4622,9 +4806,14 @@ + + + + + @@ -4633,6 +4822,7 @@ + @@ -4710,8 +4900,10 @@ + + @@ -4769,6 +4961,7 @@ + @@ -4857,7 +5050,10 @@ + + + @@ -4890,6 +5086,7 @@ + @@ -4930,6 +5127,7 @@ + @@ -4967,6 +5165,8 @@ + + @@ -5009,6 +5209,7 @@ + @@ -5119,6 +5320,7 @@ + @@ -5213,6 +5415,7 @@ + @@ -5333,6 +5536,7 @@ + @@ -5524,11 +5728,14 @@ + + + @@ -5613,6 +5820,7 @@ + @@ -5656,6 +5864,7 @@ + @@ -5734,6 +5943,7 @@ + @@ -5748,6 +5958,7 @@ + @@ -5792,14 +6003,18 @@ + + + + @@ -6062,6 +6277,9 @@ + + + @@ -6073,6 +6291,8 @@ + + @@ -6092,12 +6312,14 @@ + + @@ -6110,7 +6332,10 @@ + + + @@ -6176,10 +6401,18 @@ + + + + + + + + @@ -6214,6 +6447,7 @@ + @@ -6222,6 +6456,10 @@ + + + + @@ -6231,9 +6469,15 @@ + + + + + + @@ -6246,6 +6490,8 @@ + + @@ -6257,11 +6503,21 @@ + + + + + + + + + + @@ -6277,7 +6533,11 @@ + + + + @@ -6285,19 +6545,28 @@ + + + + + + + + + @@ -6306,6 +6575,12 @@ + + + + + + @@ -6313,8 +6588,13 @@ + + + + + @@ -6322,6 +6602,7 @@ + @@ -6340,10 +6621,12 @@ + + @@ -6351,12 +6634,17 @@ + + + + + @@ -6381,6 +6669,8 @@ + + @@ -6388,6 +6678,7 @@ + @@ -6426,6 +6717,10 @@ + + + + @@ -6548,6 +6843,7 @@ + @@ -6559,6 +6855,7 @@ + @@ -6641,6 +6938,7 @@ + @@ -6901,11 +7199,12 @@ - + + @@ -6917,24 +7216,24 @@ - + - + - + - + - + - + - + @@ -7030,6 +7329,9 @@ + + + @@ -7144,15 +7446,15 @@ - + - + - + - + @@ -7526,71 +7828,71 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -7814,6 +8116,15 @@ + + + + + + + + + @@ -8047,7 +8358,12 @@ - + + + + + + @@ -8280,7 +8596,7 @@ - + @@ -8368,12 +8684,12 @@ - + - + - + @@ -8459,26 +8775,26 @@ - + - + - + - + - + - + - + - + @@ -9192,18 +9508,18 @@ - + - + - + - + - + @@ -9262,12 +9578,12 @@ - + - + - + @@ -9393,30 +9709,30 @@ - + - + - + - + - + - + - + - + - + @@ -9539,7 +9855,7 @@ - + @@ -9548,7 +9864,7 @@ - + @@ -9605,7 +9921,7 @@ - + @@ -9694,21 +10010,21 @@ - + - + - + - + - + - + @@ -9763,11 +10079,20 @@ + + + + + + + + + @@ -9870,6 +10195,11 @@ + + + + + @@ -10338,12 +10668,12 @@ - + - + - + @@ -10551,9 +10881,9 @@ - + - + @@ -10803,6 +11133,7 @@ + @@ -11067,6 +11398,17 @@ + + + + + + + + + + + @@ -11507,6 +11849,14 @@ + + + + + + + + @@ -11569,6 +11919,7 @@ + @@ -11692,6 +12043,17 @@ + + + + + + + + + + + @@ -11708,18 +12070,35 @@ - + - + + + + - + + + + - + + + + + + + + + + + + - + @@ -12255,26 +12634,26 @@ - + - + - + - + - + - + - + - + @@ -12758,7 +13137,7 @@ - + @@ -13207,18 +13586,18 @@ - + - + - + - + - + @@ -13248,12 +13627,12 @@ - + - + - + @@ -14060,18 +14439,18 @@ - + - + - + - + - + @@ -14224,12 +14603,12 @@ - + - + - + @@ -14373,33 +14752,33 @@ - + - + - + - + - + - + - + - + - + - + @@ -14513,42 +14892,42 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -14592,9 +14971,9 @@ - + - + @@ -14793,47 +15172,103 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -14864,15 +15299,15 @@ - + - + - + - + @@ -14894,89 +15329,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -15002,24 +15354,24 @@ - + - + - + - + - + - + - + @@ -15028,60 +15380,60 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -15092,51 +15444,51 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -15578,45 +15930,45 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -15920,33 +16272,33 @@ - + - + - + - + - + - + - + - + - + - + @@ -16089,12 +16441,12 @@ - + - + - + @@ -16118,18 +16470,18 @@ - + - + - + - + - + @@ -16525,24 +16877,24 @@ - + - + - + - + - + - + - + @@ -16799,12 +17151,12 @@ - + - + - + @@ -17559,15 +17911,15 @@ - + - + - + - + @@ -17641,102 +17993,102 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -17923,27 +18275,27 @@ - + - + - + - + - + - + - + - + @@ -17986,12 +18338,12 @@ - + - + - + @@ -18225,10 +18577,7 @@ - - - - + @@ -18289,48 +18638,48 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -18550,42 +18899,42 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -19039,24 +19388,24 @@ - + - + - + - + - + - + - + @@ -19065,27 +19414,53 @@ - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + @@ -19511,7 +19886,7 @@ - + @@ -19522,6 +19897,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -19647,6 +20045,7 @@ + @@ -19868,9 +20267,9 @@ - + - + @@ -19905,75 +20304,75 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -20035,21 +20434,21 @@ - + - + - + - + - + - + @@ -21063,6 +21462,20 @@ + + + + + + + + + + + + + + @@ -21191,7 +21604,7 @@ - + @@ -21524,9 +21937,9 @@ - + - + @@ -21594,14 +22007,22 @@ - + - + - + + + + + + + + + @@ -21852,18 +22273,18 @@ - + - + - + - + - + @@ -21914,7 +22335,7 @@ - + @@ -22032,6 +22453,7 @@ + @@ -22304,222 +22726,222 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -22555,6 +22977,17 @@ + + + + + + + + + + + @@ -22840,12 +23273,12 @@ - + - + - + @@ -23101,7 +23534,7 @@ - + @@ -23247,6 +23680,7 @@ + @@ -24050,25 +24484,25 @@ - + - + - + - + - + - + - + @@ -24313,66 +24747,66 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -24393,6 +24827,23 @@ + + + + + + + + + + + + + + + + + @@ -24482,12 +24933,12 @@ - + - + - + @@ -24861,66 +25312,66 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -24989,12 +25440,12 @@ - + - + - + @@ -25135,18 +25586,18 @@ - + - + - + - + - + @@ -25174,12 +25625,12 @@ - + - + - + @@ -25297,6 +25748,7 @@ + @@ -25388,10 +25840,10 @@ - + - + @@ -25892,7 +26344,7 @@ - + @@ -26198,41 +26650,41 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -26276,15 +26728,15 @@ - + - + - + - + @@ -26307,6 +26759,7 @@ + @@ -26499,6 +26952,25 @@ + + + + + + + + + + + + + + + + + + + @@ -26651,6 +27123,7 @@ + @@ -26825,12 +27298,12 @@ - + - + - + @@ -26933,6 +27406,9 @@ + + + @@ -27124,6 +27600,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -27172,6 +27668,7 @@ + @@ -27199,6 +27696,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -27221,21 +27738,21 @@ - + - + - + - + - + - + @@ -27381,12 +27898,12 @@ - + - + - + @@ -27791,12 +28308,12 @@ - + - + - + @@ -27920,100 +28437,100 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -28043,6 +28560,7 @@ + @@ -29749,12 +30267,12 @@ - + - + - + @@ -30039,15 +30557,15 @@ - + - + - + - + @@ -30194,9 +30712,9 @@ - + - + @@ -30490,12 +31008,12 @@ - + - + - + @@ -30552,18 +31070,18 @@ - + - + - + - + - + @@ -30973,6 +31491,11 @@ + + + + + @@ -31014,6 +31537,20 @@ + + + + + + + + + + + + + + @@ -31106,30 +31643,30 @@ - + - + - + - + - + - + - + - + - + @@ -31394,6 +31931,7 @@ + @@ -32354,7 +32892,41 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -32419,15 +32991,15 @@ - + - + - + - + @@ -32808,24 +33380,24 @@ - + - + - + - + - + - + - + @@ -33313,6 +33885,17 @@ + + + + + + + + + + + @@ -33600,12 +34183,12 @@ - + - + - + @@ -33664,72 +34247,72 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -33925,6 +34508,9 @@ + + + @@ -33932,7 +34518,7 @@ - + @@ -34073,12 +34659,12 @@ - + - + - + @@ -34342,9 +34928,9 @@ - + - + @@ -34866,10 +35452,14 @@ + + + + @@ -35147,15 +35737,15 @@ - + - + - + - + @@ -35769,6 +36359,10 @@ + + + + @@ -36477,12 +37071,12 @@ - + - + - + @@ -36870,7 +37464,7 @@ - + @@ -37369,7 +37963,7 @@ - + @@ -37661,63 +38255,63 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -37728,153 +38322,153 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -38033,6 +38627,20 @@ + + + + + + + + + + + + + + @@ -38199,51 +38807,51 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -38366,12 +38974,12 @@ - + - + - + @@ -38487,15 +39095,15 @@ - + - + - + @@ -38503,6 +39111,14 @@ + + + + + + + + @@ -38531,36 +39147,36 @@ - + - + - + - + - + - + - + - + - + - + - + @@ -38928,60 +39544,60 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -39350,15 +39966,15 @@ - + - + - + - + @@ -39538,6 +40154,17 @@ + + + + + + + + + + + @@ -39910,6 +40537,7 @@ + @@ -39949,15 +40577,15 @@ - + - + - + - + @@ -40057,30 +40685,30 @@ - + - + - + - + - + - + - + - + - + @@ -40106,6 +40734,7 @@ + @@ -40124,24 +40753,24 @@ - + - + - + - + - + - + - + @@ -40404,6 +41033,14 @@ + + + + + + + + @@ -40817,79 +41454,79 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -40969,21 +41606,21 @@ - + - + - + - + - + - + @@ -40994,6 +41631,7 @@ + @@ -41115,132 +41753,132 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -41383,6 +42021,7 @@ + @@ -41541,435 +42180,435 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -42104,21 +42743,21 @@ - + - + - + - + - + - + @@ -42141,6 +42780,7 @@ + @@ -42902,39 +43542,39 @@ - + - + - + - + - + - + - + - + - + - + - + - + @@ -43089,6 +43729,23 @@ + + + + + + + + + + + + + + + + + @@ -43136,12 +43793,12 @@ - + - + - + @@ -43683,6 +44340,7 @@ + @@ -43842,9 +44500,59 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + @@ -43906,7 +44614,7 @@ - + @@ -43916,81 +44624,81 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -44547,6 +45255,17 @@ + + + + + + + + + + + @@ -44606,7 +45325,7 @@ - + @@ -45205,6 +45924,7 @@ + @@ -45359,9 +46079,9 @@ - + - + @@ -45683,6 +46403,7 @@ + @@ -46027,21 +46748,21 @@ - + - + - + - + - + - + @@ -46088,6 +46809,7 @@ + @@ -46200,6 +46922,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -47037,36 +47785,36 @@ - + - + - + - + - + - + - + - + - + - + - + @@ -47684,17 +48432,17 @@ - + - + - + - + @@ -48134,24 +48882,25 @@ - + + - + - + - + - + - + - + @@ -48220,13 +48969,13 @@ - + - + - + @@ -48276,7 +49025,7 @@ - + @@ -48317,6 +49066,7 @@ + @@ -48380,12 +49130,12 @@ - + - + - + @@ -48466,9 +49216,9 @@ - + - + @@ -48493,76 +49243,76 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -48577,6 +49327,7 @@ + @@ -48642,18 +49393,18 @@ - + - + - + - + - + @@ -48837,18 +49588,18 @@ - + - + - + - + - + @@ -49292,24 +50043,24 @@ - + - + - + - + - + - + - + @@ -49561,6 +50312,7 @@ + @@ -49972,51 +50724,51 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -50040,7 +50792,7 @@ - + @@ -50222,6 +50974,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -50408,18 +51195,18 @@ - + - + - + - + - + @@ -50623,34 +51410,34 @@ - + - + - + - + - + - + - + - + - + - + @@ -50807,6 +51594,7 @@ + @@ -50822,12 +51610,12 @@ - + - + - + @@ -51045,24 +51833,24 @@ - + - + - + - + - + - + - + @@ -51087,12 +51875,12 @@ - + - + - + @@ -51158,6 +51946,14 @@ + + + + + + + + @@ -51182,6 +51978,7 @@ + @@ -51458,162 +52255,162 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -51645,6 +52442,20 @@ + + + + + + + + + + + + + + @@ -52614,6 +53425,7 @@ + @@ -52649,81 +53461,81 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -52852,18 +53664,18 @@ - + - + - + - + - + @@ -52968,12 +53780,12 @@ - + - + - + @@ -52997,6 +53809,7 @@ + @@ -53013,6 +53826,13 @@ + + + + + + + @@ -53053,13 +53873,13 @@ - + - + - + @@ -53281,7 +54101,7 @@ - + @@ -53297,12 +54117,12 @@ - + - + - + @@ -53369,18 +54189,18 @@ - + - + - + - + - + @@ -53434,15 +54254,15 @@ - + - + - + - + @@ -53635,6 +54455,7 @@ + @@ -53720,6 +54541,20 @@ + + + + + + + + + + + + + + @@ -53800,15 +54635,15 @@ - + - + - + - + @@ -54244,27 +55079,27 @@ - + - + - + - + - + - + - + - + @@ -54533,6 +55368,17 @@ + + + + + + + + + + + @@ -55412,21 +56258,21 @@ - + - + - + - + - + - + @@ -55494,12 +56340,12 @@ - + - + - + @@ -55851,30 +56697,30 @@ - + - + - + - + - + - + - + - + - + @@ -56233,18 +57079,18 @@ - + - + - + - + - + @@ -56302,41 +57148,42 @@ - + - + - + - + - + - + - + - + - + - + - + - + + @@ -56456,114 +57303,114 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -56574,18 +57421,18 @@ - + - + - + - + - + @@ -56936,12 +57783,12 @@ - + - + - + @@ -57207,6 +58054,14 @@ + + + + + + + + @@ -58005,27 +58860,27 @@ - + - + - + - + - + - + - + - + @@ -58042,6 +58897,9 @@ + + + @@ -58159,15 +59017,15 @@ - + - + - + - + @@ -58534,36 +59392,36 @@ - + - + - + - + - + - + - + - + - + - + - + @@ -58691,6 +59549,7 @@ + @@ -59352,18 +60211,18 @@ - + - + - + - + - + @@ -59800,6 +60659,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -59829,9 +60726,9 @@ - + - + @@ -59849,6 +60746,7 @@ + @@ -60314,24 +61212,24 @@ - + - + - + - + - + - + - + @@ -60352,6 +61250,14 @@ + + + + + + + + @@ -60539,12 +61445,12 @@ - + - + - + @@ -60576,96 +61482,96 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -60719,7 +61625,7 @@ - + @@ -61136,6 +62042,7 @@ + @@ -61144,18 +62051,18 @@ - + - + - + - + - + @@ -61397,9 +62304,9 @@ - + - + @@ -61476,18 +62383,18 @@ - + - + - + - + - + @@ -61528,12 +62435,12 @@ - + - + - + @@ -61625,6 +62532,11 @@ + + + + + @@ -61852,36 +62764,36 @@ - + - + - + - + - + - + - + - + - + - + - + @@ -61910,12 +62822,12 @@ - + - + - + @@ -62164,21 +63076,21 @@ - + - + - + - + - + - + @@ -62254,6 +63166,7 @@ + @@ -62359,24 +63272,24 @@ - + - + - + - + - + - + - + @@ -62705,7 +63618,7 @@ - + @@ -62829,12 +63742,12 @@ - + - + - + @@ -62939,12 +63852,12 @@ - + - + - + @@ -63007,12 +63920,12 @@ - + - + - + @@ -63352,7 +64265,7 @@ - + @@ -63431,6 +64344,7 @@ + @@ -63439,102 +64353,102 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -64131,12 +65045,12 @@ - + - + - + @@ -64159,27 +65073,27 @@ - + - + - + - + - + - + - + - + @@ -64231,6 +65145,11 @@ + + + + + @@ -65195,7 +66114,7 @@ - + @@ -65206,6 +66125,7 @@ + @@ -65484,33 +66404,33 @@ - + - + - + - + - + - + - + - + - + - + @@ -65573,6 +66493,13 @@ + + + + + + + @@ -65794,6 +66721,20 @@ + + + + + + + + + + + + + + @@ -65848,45 +66789,45 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -66092,6 +67033,17 @@ + + + + + + + + + + + @@ -66101,6 +67053,7 @@ + @@ -66383,28 +67336,28 @@ - + - + - + - + - + - + - + - + @@ -66736,6 +67689,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -67156,27 +68142,27 @@ - + - + - + - + - + - + - + - + @@ -67223,9 +68209,9 @@ - + - + @@ -67307,36 +68293,36 @@ - + - + - + - + - + - + - + - + - + - + - + @@ -67506,14 +68492,7 @@ - - - - - - - - + @@ -67584,30 +68563,30 @@ - + - + - + - + - + - + - + - + - + @@ -67684,18 +68663,18 @@ - + - + - + - + - + @@ -68059,6 +69038,7 @@ + @@ -68126,12 +69106,12 @@ - + - + - + @@ -68391,18 +69371,18 @@ - + - + - + - + - + @@ -68420,6 +69400,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -68500,12 +69530,12 @@ - + - + - + @@ -69506,7 +70536,7 @@ - + @@ -69594,7 +70624,7 @@ - + @@ -69626,12 +70656,12 @@ - + - + - + @@ -69689,84 +70719,84 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -70104,6 +71134,17 @@ + + + + + + + + + + + @@ -70474,26 +71515,26 @@ - + - + - + - + - + - + - + @@ -71224,15 +72265,15 @@ - + - + - + - + @@ -71319,45 +72360,45 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -71453,6 +72494,14 @@ + + + + + + + + @@ -71537,18 +72586,18 @@ - + - + - + - + - + @@ -71660,6 +72709,7 @@ + @@ -71926,291 +72976,291 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -72282,12 +73332,12 @@ - + - + - + @@ -72343,6 +73393,7 @@ + @@ -72376,12 +73427,12 @@ - + - + - + @@ -72449,18 +73500,18 @@ - + - + - + - + - + @@ -72748,12 +73799,12 @@ - + - + - + @@ -72806,7 +73857,7 @@ - + @@ -72936,15 +73987,15 @@ - + - + - + - + @@ -73044,15 +74095,15 @@ - + - + - + - + @@ -73084,6 +74135,7 @@ + @@ -73320,7 +74372,7 @@ - + @@ -73495,6 +74547,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -73538,54 +74616,54 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -73691,30 +74769,30 @@ - + - + - + - + - + - + - + - + - + @@ -73988,12 +75066,12 @@ - + - + - + @@ -74297,12 +75375,12 @@ - + - + - + @@ -74354,53 +75432,53 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -74495,6 +75573,11 @@ + + + + + @@ -75134,6 +76217,7 @@ + @@ -75254,12 +76338,12 @@ - + - + - + @@ -75350,18 +76434,18 @@ - + - + - + - + - + @@ -76091,12 +77175,12 @@ - + - + - + @@ -76231,120 +77315,120 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -76690,9 +77774,9 @@ - + - + @@ -76750,15 +77834,15 @@ - + - + - + - + @@ -76771,6 +77855,7 @@ + @@ -76802,6 +77887,17 @@ + + + + + + + + + + + @@ -76905,18 +78001,18 @@ - + - + - + - + - + @@ -77096,18 +78192,18 @@ - + - + - + - + - + @@ -77161,33 +78257,41 @@ - + - + - + - + - + - + - + - + - + + + + + + + + + @@ -77478,234 +78582,234 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -78072,9 +79176,9 @@ - + - + @@ -78088,6 +79192,17 @@ + + + + + + + + + + + @@ -78136,6 +79251,7 @@ + @@ -78630,7 +79746,7 @@ - + @@ -78702,121 +79818,121 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -78855,21 +79971,21 @@ - + - + - + - + - + - + @@ -79007,6 +80123,7 @@ + @@ -79326,12 +80443,12 @@ - + - + - + @@ -79638,24 +80755,24 @@ - + - + - + - + - + - + - + @@ -79750,15 +80867,15 @@ - + - + - + - + @@ -79968,7 +81085,7 @@ - + @@ -80032,6 +81149,7 @@ + @@ -80147,12 +81265,12 @@ - + - + - + @@ -80343,81 +81461,81 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -80457,12 +81575,12 @@ - + - + - + @@ -80691,6 +81809,7 @@ + @@ -81016,15 +82135,15 @@ - + - + - + - + @@ -81253,21 +82372,25 @@ - + - + - + - + - + + + + + @@ -81324,15 +82447,15 @@ - + - + - + - + @@ -81694,12 +82817,12 @@ - + - + - + @@ -81800,24 +82923,24 @@ - + - + - + - + - + - + - + @@ -82044,6 +83167,10 @@ + + + + @@ -82282,73 +83409,73 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -82406,54 +83533,54 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -82562,12 +83689,12 @@ - + - + - + @@ -82738,12 +83865,12 @@ - + - + - + @@ -82760,6 +83887,14 @@ + + + + + + + + @@ -82774,240 +83909,240 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -83267,6 +84402,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -83491,213 +84658,213 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -83706,615 +84873,615 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -84338,7 +85505,7 @@ - + @@ -85042,12 +86209,12 @@ - + - + - + @@ -85386,72 +86553,72 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -85997,15 +87164,15 @@ - + - + - + - + @@ -86363,6 +87530,7 @@ + @@ -86687,15 +87855,15 @@ - + - + - + - + @@ -87074,39 +88242,40 @@ + - + - + - + - + - + - + - + - + - + - + @@ -87134,36 +88303,36 @@ - + - + - + - + - + - + - + - + - + - + - + @@ -87614,12 +88783,12 @@ - + - + - + @@ -87674,7 +88843,7 @@ - + @@ -88107,18 +89276,18 @@ - + - + - + - + - + @@ -88159,6 +89328,15 @@ + + + + + + + + + @@ -88273,6 +89451,7 @@ + @@ -88387,45 +89566,45 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -88586,711 +89765,711 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -89382,6 +90561,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -90009,6 +91223,7 @@ + @@ -90033,6 +91248,7 @@ + @@ -90566,6 +91782,7 @@ + @@ -90619,6 +91836,7 @@ + @@ -90638,6 +91856,7 @@ + @@ -90911,15 +92130,15 @@ - + - + - + - + @@ -91690,21 +92909,32 @@ - + - + - + - + - + + + + + + + + + + + + @@ -91731,51 +92961,51 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -92024,7 +93254,20 @@ - + + + + + + + + + + + + + + @@ -92041,18 +93284,18 @@ - + - + - + - + - + @@ -92094,6 +93337,7 @@ + @@ -92290,15 +93534,15 @@ - + - + - + - + @@ -92377,18 +93621,18 @@ - + - + - + - + - + @@ -92517,7 +93761,6 @@ - @@ -92606,21 +93849,21 @@ - + - + - + - + - + - + @@ -92650,6 +93893,17 @@ + + + + + + + + + + + @@ -92657,99 +93911,99 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -92936,7 +94190,7 @@ - + @@ -92996,18 +94250,18 @@ - + - + - + - + - + @@ -93124,6 +94378,7 @@ + @@ -93269,9 +94524,9 @@ - + - + @@ -93675,6 +94930,11 @@ + + + + + @@ -93794,6 +95054,7 @@ + @@ -93833,6 +95094,14 @@ + + + + + + + + @@ -93989,21 +95258,21 @@ - + - + - + - + - + - + @@ -94011,16 +95280,17 @@ - + - + - + + @@ -94304,6 +95574,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -94477,6 +95770,7 @@ + @@ -95475,12 +96769,12 @@ - + - + - + @@ -95534,6 +96828,7 @@ + @@ -95709,225 +97004,225 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -96271,12 +97566,12 @@ - + - + - + @@ -96573,16 +97868,22 @@ - + - + - + + + + + + + @@ -97203,51 +98504,68 @@ - + - + - + + + + + + + - + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + @@ -97465,6 +98783,10 @@ + + + + @@ -97631,15 +98953,15 @@ - + - + - + - + @@ -97661,12 +98983,12 @@ - + - + - + @@ -97777,6 +99099,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -99313,15 +100658,16 @@ - + + - + - + - + @@ -99448,6 +100794,7 @@ + @@ -99634,25 +100981,25 @@ - + - + - + - + - + - + - + @@ -99970,23 +101317,23 @@ - + - + - + - + - + - + - + @@ -100014,12 +101361,12 @@ - + - + - + @@ -100034,12 +101381,12 @@ - + - + - + @@ -100214,42 +101561,42 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -100373,6 +101720,23 @@ + + + + + + + + + + + + + + + + + @@ -100485,21 +101849,21 @@ - + - + - + - + - + - + @@ -100647,12 +102011,12 @@ - + - + - + @@ -101651,52 +103015,58 @@ + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -101755,6 +103125,14 @@ + + + + + + + + @@ -102610,6 +103988,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -102846,199 +104274,199 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -103584,21 +105012,21 @@ - + - + - + - + - + - + @@ -103997,33 +105425,33 @@ - + - + - + - + - + - + - + - + - + - + @@ -104130,18 +105558,18 @@ - + - + - + - + - + @@ -104205,45 +105633,45 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -104286,15 +105714,15 @@ - + - + - + - + @@ -104528,6 +105956,7 @@ + @@ -105607,92 +107036,92 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -106452,6 +107881,7 @@ + @@ -106515,66 +107945,66 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -106757,7 +108187,35 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -107034,30 +108492,30 @@ - + - + - + - + - + - + - + - + - + @@ -107127,12 +108585,12 @@ - + - + - + @@ -107720,15 +109178,15 @@ - + - + - + - + @@ -108485,9 +109943,9 @@ - + - + @@ -108867,44 +110325,44 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -109088,33 +110546,33 @@ - + - + - + - + - + - + - + - + - + - + @@ -109143,12 +110601,12 @@ - + - + - + @@ -109167,9 +110625,9 @@ - + - + @@ -109438,15 +110896,15 @@ - + - + - + - + @@ -109733,24 +111191,24 @@ - + - + - + - + - + - + - + @@ -109808,6 +111266,7 @@ + @@ -109866,6 +111325,14 @@ + + + + + + + + @@ -109958,6 +111425,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -110526,6 +112013,14 @@ + + + + + + + + @@ -110703,15 +112198,15 @@ - + - + - + - + @@ -110950,6 +112445,7 @@ + @@ -110974,15 +112470,15 @@ - + - + - + - + @@ -111132,9 +112628,9 @@ - + - + @@ -111156,87 +112652,87 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -111546,12 +113042,12 @@ - + - + - + @@ -111800,7 +113296,7 @@ - + @@ -111854,15 +113350,15 @@ - + - + - + - + @@ -111907,66 +113403,66 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -112337,7 +113833,7 @@ - + @@ -112345,99 +113841,99 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -112736,18 +114232,18 @@ - + - + - + - + - + @@ -112805,15 +114301,15 @@ - + - + - + - + @@ -113161,12 +114657,12 @@ - + - + - + @@ -113266,33 +114762,33 @@ - + - + - + - + - + - + - + - + - + - + @@ -113416,39 +114912,39 @@ - + - + - + - + - + - + - + - + - + - + - + - + @@ -113774,174 +115270,174 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -113985,15 +115481,15 @@ - + - + - + - + @@ -114323,81 +115819,81 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -114451,15 +115947,15 @@ - + - + - + - + @@ -114554,36 +116050,36 @@ - + - + - + - + - + - + - + - + - + - + - + @@ -114833,8 +116329,11 @@ - - + + + + + @@ -114846,9 +116345,9 @@ - - - + + + @@ -114861,11 +116360,11 @@ - - - - - + + + + + @@ -114890,69 +116389,75 @@ - - - - - + + + + - - - - - + + + + + - - - - - - - - - - - - - - - - + + + + + - - - - - + + + + - - - - - - + + + + - - - - - - - - - + + + + - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -114960,9 +116465,9 @@ - - - + + + @@ -115053,9 +116558,9 @@ - - - + + + @@ -115130,14 +116635,14 @@ - - - + + + - - - + + + @@ -115155,55 +116660,55 @@ - - - - + + + + - - - + + + - - - - - - - + + + + + + + - - - - - - - + + + + + + + - - - - + + + + - - - - + + + + - - - - + + + + @@ -115213,9 +116718,9 @@ - - - + + + @@ -115228,6 +116733,11 @@ + + + + + @@ -115325,10 +116835,10 @@ - - - - + + + + @@ -115349,21 +116859,21 @@ - - - - + + + + - - - - + + + + - - - + + + @@ -115463,16 +116973,16 @@ - - - - + + + + - - - + + + @@ -115490,64 +117000,64 @@ - - - + + + - - - - - + + + + + - - - - + + + + - - - - + + + + - - + + - - - - - - + + + + + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -115588,16 +117098,16 @@ - - - - + + + + - - - - + + + + @@ -115682,10 +117192,10 @@ - - - - + + + + @@ -115700,9 +117210,9 @@ - - - + + + @@ -115711,10 +117221,10 @@ - - - - + + + + @@ -115727,13 +117237,13 @@ - - - - - - - + + + + + + + @@ -115827,8 +117337,8 @@ - - + + @@ -115861,9 +117371,9 @@ - - - + + + @@ -115965,21 +117475,21 @@ - - + + - - + + - - - + + + @@ -115988,8 +117498,8 @@ - - + + @@ -115999,11 +117509,11 @@ - - - - - + + + + + @@ -116031,16 +117541,16 @@ - - - - - - - + + + + + + + - + @@ -116050,20 +117560,29 @@ - - + + - - + + - - + + + - - + + + + + + + + + + @@ -116084,15 +117603,15 @@ - - - + + + - - - - + + + + @@ -116123,34 +117642,34 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -116168,13 +117687,13 @@ - - - + + + - - + + @@ -116244,8 +117763,8 @@ - - + + @@ -116254,12 +117773,12 @@ - - - - - - + + + + + + @@ -116284,17 +117803,25 @@ - - - - + + + + - - - + + + + + + + + + + + @@ -116383,12 +117910,12 @@ - - + + - - + + @@ -116407,16 +117934,16 @@ - - - - + + + + - - - - + + + + @@ -116428,10 +117955,10 @@ - - - - + + + + @@ -116556,12 +118083,25 @@ - - - - - - + + + + + + + + + + + + + + + + + + + @@ -116576,11 +118116,11 @@ - - - - - + + + + + @@ -116634,10 +118174,10 @@ - - - - + + + + @@ -116668,6 +118208,12 @@ + + + + + + @@ -116689,6 +118235,14 @@ + + + + + + + + @@ -116767,6 +118321,21 @@ + + + + + + + + + + + + + + + @@ -116776,6 +118345,12 @@ + + + + + + @@ -116997,9 +118572,9 @@ - - - + + + @@ -117008,14 +118583,14 @@ - - - + + + - - - + + + @@ -117062,19 +118637,19 @@ - - - - - + + + + + - - - - - - + + + + + + @@ -117154,6 +118729,12 @@ + + + + + + @@ -117161,6 +118742,13 @@ + + + + + + + @@ -117185,6 +118773,51 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -117192,11 +118825,11 @@ - - - - - + + + + + @@ -117222,62 +118855,62 @@ - - - - - + + + + + - - - - + + + + - - - + + + - - - - - + + + + + - - - - - - - - - - - - - - - - - - - - + - + - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + @@ -117300,70 +118933,70 @@ - - - - + + + + - - - - - - + + + + + + - - - - + + + + - - - - + + + + - - - - - + + + + + - - - - + + + + - - - - - - + + + + + + - - - + + + - - - - - + + + + + - - - - - + + + + + @@ -117386,16 +119019,16 @@ - - - - + + + + - - - - + + + + @@ -117418,6 +119051,12 @@ + + + + + + @@ -117442,25 +119081,25 @@ - - - - - - - - + + + + + + + + - - - + + + - - - - + + + + @@ -117469,6 +119108,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -117526,6 +119188,28 @@ + + + + + + + + + + + + + + + + + + + + + + @@ -117533,20 +119217,33 @@ + + + + + - - - - - - - - + + + + + + + + + + + + + + + + @@ -117579,10 +119276,10 @@ - - - - + + + + @@ -117591,22 +119288,22 @@ - - - + + + - - - - + + + + - - - - - + + + + + @@ -117621,6 +119318,19 @@ + + + + + + + + + + + + + @@ -117680,16 +119390,58 @@ - - - + + + - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -117703,11 +119455,36 @@ - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -117779,11 +119556,11 @@ - - - - - + + + + + @@ -117798,6 +119575,20 @@ + + + + + + + + + + + + + + @@ -117808,6 +119599,11 @@ + + + + + @@ -117815,77 +119611,88 @@ - - - + + + - - - - + + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - - - - - - - + + + + - - - - + + + + + + + + - - - - - + + + + - - - + + + + + - - - - - + + + - - - - + + + + + + + + + + + + + + + + @@ -117893,6 +119700,12 @@ + + + + + + @@ -117915,6 +119728,12 @@ + + + + + + @@ -117923,19 +119742,48 @@ - - - - - - - + + + + + - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -117961,20 +119809,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - + + + + + + + + @@ -117992,6 +119875,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -118007,16 +119916,23 @@ - - - - + + + + - - - - + + + + + + + + + + + @@ -118042,15 +119958,15 @@ - - - + + + - - - - + + + + @@ -118116,6 +120032,11 @@ + + + + + @@ -118137,16 +120058,21 @@ + + + + + - - - - + + + + @@ -118154,10 +120080,10 @@ - - - - + + + + @@ -118166,10 +120092,10 @@ - - - - + + + + @@ -118181,6 +120107,17 @@ + + + + + + + + + + + @@ -118199,10 +120136,21 @@ - - - - + + + + + + + + + + + + + + + @@ -118211,6 +120159,11 @@ + + + + + @@ -118229,12 +120182,12 @@ - - - - - - + + + + + + @@ -118253,41 +120206,41 @@ - - - - - + + + + + - - - - - + + + + + - - - - - - - - + + + + + + + + - - - - - + + + + + - - - - + + + + @@ -118356,6 +120309,21 @@ + + + + + + + + + + + + + + + @@ -118398,6 +120366,11 @@ + + + + + @@ -118799,12 +120772,14 @@ + + - + - + @@ -118812,18 +120787,20 @@ - + + + @@ -118836,7 +120813,10 @@ + + + @@ -118850,7 +120830,7 @@ - + @@ -118872,10 +120852,10 @@ - + - - + + @@ -118887,8 +120867,8 @@ - - + + @@ -118902,52 +120882,65 @@ + + + + + + + + - + - - - - - - - - - + + + + + + + + + - - - - - - - - - - + + + + + + + + + + - - + + + - - - + + + + + + + @@ -118957,21 +120950,29 @@ + + + + + - + + - + - - - + + + + + @@ -118982,12 +120983,22 @@ - - + + + + + + + + + + - + + + @@ -119000,61 +121011,86 @@ - + + + + + - - - - - - - - - - - - + + + + + + + + + + + + + + + - + + - - + + + + + + + + + + + + + - + + + + + + - - + + + - - + + @@ -119066,37 +121102,44 @@ + + - + - + - + + + - - + + + + + - + - - - - - + + + + + @@ -119107,13 +121150,16 @@ + + - + + @@ -119152,6 +121198,10 @@ + + + + @@ -119188,10 +121238,10 @@ - - - - + + + + @@ -119241,6 +121291,10 @@ + + + + @@ -119288,9 +121342,9 @@ - - - + + + @@ -119360,11 +121414,11 @@ - - - - - + + + + + @@ -119403,48 +121457,48 @@ - - - - - - - + - - - - - - - + - + + + + + + + + + + + + + + + + + + + - + - - - - - - @@ -119580,37 +121634,42 @@ - + + + + + + - - - - - - + + + + + + - - - - + + + + - - + + - - - + + + @@ -119626,8 +121685,8 @@ - - + + @@ -119676,9 +121735,13 @@ - - - + + + + + + + @@ -119780,13 +121843,20 @@ - - - - - - - + + + + + + + + + + + + + + @@ -119795,23 +121865,23 @@ - - - + + + - - - - - - + + + + + + - - - - + + + + @@ -119833,9 +121903,9 @@ - - - + + + @@ -119879,7 +121949,7 @@ - + @@ -119893,8 +121963,8 @@ - - + + @@ -119936,8 +122006,8 @@ - - + + @@ -120083,10 +122153,10 @@ - - - - + + + + @@ -120129,8 +122199,15 @@ - - + + + + + + + + + @@ -120147,22 +122224,22 @@ - - - - - + + + + + - - + + - - - - - + + + + + @@ -120175,84 +122252,84 @@ - - - + + + - - - - + + + + - - - - + + + + - - - - - - - + + + + + + + - - + + - - - - + + + + - - - - - + + + + + - - - - + + + + - - - - - + + + + + - - - - - + + + + + - - - - + + + + - - - - + + + + - - - + + + @@ -120349,16 +122426,16 @@ - - + + - - + + - - + + @@ -120366,22 +122443,22 @@ - - - + + + - - - + + + - - + + - - + + @@ -120397,15 +122474,15 @@ - - + + - - - - - + + + + + @@ -120418,27 +122495,31 @@ + + + + - - - + + + - - + + - - - + + + - - - + + + @@ -120461,22 +122542,22 @@ - - + + - - - + + + - - + + - - - + + + @@ -120501,9 +122582,9 @@ - - - + + + @@ -120516,10 +122597,10 @@ - - - - + + + + @@ -120836,15 +122917,15 @@ - - - + + + - - - - + + + + @@ -120861,7 +122942,7 @@ - + @@ -120988,8 +123069,8 @@ - - + + @@ -121042,19 +123123,19 @@ - - + + - - + + - - - - - + + + + + @@ -121083,13 +123164,13 @@ - - + + - - - + + + @@ -121097,13 +123178,13 @@ - - + + - - - + + + @@ -121114,9 +123195,9 @@ - - - + + + @@ -121124,9 +123205,9 @@ - - - + + + @@ -121156,16 +123237,16 @@ - - + + - - + + - - + + @@ -121357,35 +123438,39 @@ - - - + + + - - - - - + + + + + - - - + + + - - - + + + - - - - + + + + + + + + @@ -121445,12 +123530,12 @@ - - + + - - + + @@ -121498,14 +123583,14 @@ - - + + - - + + @@ -121517,16 +123602,16 @@ - - + + - - + + - - + + @@ -121562,9 +123647,9 @@ - - - + + + @@ -121590,19 +123675,24 @@ - - - + + + - - - - + + + + + + + + + - - + + @@ -121621,9 +123711,9 @@ - - - + + + @@ -121634,27 +123724,27 @@ - - - + + + - - - + + + - - + + - - - + + + @@ -121732,42 +123822,42 @@ - - - + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - + + - - + + - - + + @@ -121802,26 +123892,26 @@ - - - - - + + + + + - - - + + + - - + + - - - - + + + + @@ -121910,8 +124000,8 @@ - - + + @@ -121984,20 +124074,20 @@ - - - - + + + + - + - + - - + + @@ -122021,10 +124111,10 @@ - + - + @@ -122217,7 +124307,7 @@ - + @@ -122275,15 +124365,15 @@ - + - + - - - + + + @@ -122349,6 +124439,10 @@ + + + + @@ -122379,6 +124473,12 @@ + + + + + + @@ -122559,6 +124659,10 @@ + + + + @@ -122716,6 +124820,10 @@ + + + + @@ -122765,14 +124873,14 @@ - - - + + + - - - + + + @@ -122797,14 +124905,14 @@ - - + + - + @@ -122869,26 +124977,26 @@ - - - - + + + + - - + + - - - - + + + + - - - - + + + + @@ -122905,10 +125013,10 @@ - - - - + + + + @@ -123087,11 +125195,11 @@ - - - - - + + + + + @@ -123105,11 +125213,11 @@ - - - - - + + + + + @@ -123122,8 +125230,8 @@ - - + + @@ -123148,12 +125256,18 @@ + + + + + + - - + + @@ -123162,16 +125276,16 @@ - - - - + + + + - - - - + + + + @@ -123195,20 +125309,20 @@ - - + + - - - - + + + + - - - + + + @@ -123220,6 +125334,11 @@ + + + + + @@ -123235,9 +125354,9 @@ - - - + + + @@ -123279,23 +125398,23 @@ - - - + + + - - + + - - - + + + - - - + + + @@ -123507,6 +125626,10 @@ + + + + @@ -123563,38 +125686,38 @@ - - - - + + + + - - - - + + + + - - + + - - - - + + + + - - - + + + - - - + + + @@ -123612,11 +125735,11 @@ - - - - - + + + + + @@ -123721,8 +125844,8 @@ - - + + @@ -123731,67 +125854,67 @@ - - - + + + - - + + - - + + - - - - - - + + + + + + - - - + + + - - - + + + - - - - - - - + + + + + + + - - + + - - - + + + - - - - + + + + - - - - + + + + @@ -123831,38 +125954,38 @@ - - + + - - - - + + + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -123908,42 +126031,42 @@ - - + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -123956,34 +126079,34 @@ - - - - + + + + - - - - + + + + - - - - - + + + + + - - - - - + + + + + - - + + @@ -124000,6 +126123,13 @@ + + + + + + + @@ -124047,9 +126177,9 @@ - - - + + + @@ -124064,9 +126194,9 @@ - - - + + + @@ -124105,19 +126235,19 @@ - - - + + + - - - + + + - - - + + + @@ -124266,9 +126396,9 @@ - - - + + + @@ -124290,13 +126420,13 @@ - - - + + + - - + + @@ -124304,8 +126434,8 @@ - - + + @@ -124399,6 +126529,12 @@ + + + + + + @@ -124415,6 +126551,11 @@ + + + + + @@ -124447,10 +126588,10 @@ - - - - + + + + @@ -124465,6 +126606,12 @@ + + + + + + @@ -124492,9 +126639,9 @@ - - - + + + @@ -124586,20 +126733,25 @@ - - - - - - - - + + + - - - - + + + + + + + + + + + + + + @@ -124706,15 +126858,15 @@ - - + + - - - - - + + + + + @@ -124741,9 +126893,9 @@ - - - + + + @@ -124818,16 +126970,20 @@ - - + + + + + + - - + + - - + + @@ -124919,105 +127075,105 @@ - - - + + + - - - + + + - - - - - + + + + + - - - + + + - - - - - + + + + + - - - + + + - - - - - + + + + + - - + + - - - + + + - - + + - - - + + + - - - + + + - - - - + + + + - - + + - - + + - - + + - - - - + + + + - - + + - - + + - - - + + + @@ -125378,22 +127534,22 @@ - - + + - - - + + + - - + + - - - + + + @@ -125449,6 +127605,9 @@ + + + @@ -125471,16 +127630,16 @@ - - + + - - + + - - + + @@ -125492,25 +127651,25 @@ - - + + - - + + - - + + - - - + + + @@ -125522,157 +127681,157 @@ - - + + - - + + - - - + + + - - + + - - - + + + - - + + - - - + + + - - + + - - - - + + + + - - + + - - - + + + - - + + - - - - + + + + - - - - + + + + - - - + + + - - - + + + - - + + - - + + - - - - - + + + + + - - + + - - - + + + - - - - - + + + + + - - - - - + + + + + - - + + - - - + + + - - - - + + + + - - - - + + + + - - - + + + - - - + + + - - + + @@ -125779,13 +127938,13 @@ - - - - - - - + + + + + + + @@ -125816,52 +127975,52 @@ - - - + + + - - - - + + + + - - + + - - - + + + - - + + - - - + + + - - - + + + - - - - + + + + - - + + - - + + @@ -125905,45 +128064,45 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - + + - - - - - - - + + + + + + + - - - - - - + + + + + + @@ -125959,77 +128118,77 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - + + - - + + - - - + + + - - - + + + - - - - - - - - - - - - - + + + + + + + + + + + + + - - + + - - - - + + + + - - - + + + - - - + + + @@ -126149,6 +128308,20 @@ + + + + + + + + + + + + + + @@ -126599,6 +128772,11 @@ + + + + + @@ -126630,9 +128808,9 @@ - - - + + + @@ -126660,17 +128838,17 @@ - - + + - - - - - - - + + + + + + + @@ -126678,12 +128856,33 @@ + + + + + + + + + + + + + + + + + + + + + @@ -126813,6 +129012,12 @@ + + + + + + @@ -126888,49 +129093,49 @@ - - + + - - - + + + - - - + + + - - - + + + - - + + - - + + - - - + + + - - - - + + + + - - - - - + + + + + @@ -126973,59 +129178,59 @@ - - + + - - + + - - - - + + + + - - - + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - - + + + + - - + + @@ -127034,53 +129239,53 @@ - - - + + + - - + + - - + + - - - + + + - - - - - + + + + + - - - - - - + + + + + + - - - + + + - - - - + + + + - - - + + + @@ -127089,11 +129294,11 @@ - - - - - + + + + + @@ -127105,23 +129310,23 @@ - - + + - - - + + + - - - - + + + + @@ -127137,26 +129342,26 @@ - - - - + + + + - - - - + + + + - - - - - + + + + + @@ -127191,6 +129396,11 @@ + + + + + @@ -127259,10 +129469,10 @@ - - - - + + + + @@ -127331,6 +129541,11 @@ + + + + + @@ -127712,26 +129927,26 @@ - - - + + + - - + + - - - - + + + + - - - - - + + + + + @@ -127740,6 +129955,10 @@ + + + + @@ -127906,6 +130125,14 @@ + + + + + + + + @@ -128050,8 +130277,8 @@ - - + + @@ -128129,18 +130356,18 @@ - - + + - - - - + + + + @@ -128155,12 +130382,12 @@ - - + + - - + + @@ -128182,10 +130409,10 @@ - - - - + + + + @@ -128327,13 +130554,13 @@ - - + + - - - + + + @@ -128426,6 +130653,16 @@ + + + + + + + + + + @@ -128471,21 +130708,21 @@ - - + + - - + + - - - + + + - - + + @@ -128499,6 +130736,12 @@ + + + + + + @@ -128547,11 +130790,11 @@ - - - - - + + + + + @@ -128595,12 +130838,12 @@ - - + + - - + + @@ -128609,15 +130852,15 @@ - + - - + + @@ -128634,10 +130877,10 @@ - - - - + + + + @@ -128663,20 +130906,20 @@ - - - + + + - - - - - + + + + + @@ -128717,9 +130960,9 @@ - - - + + + @@ -128737,6 +130980,16 @@ + + + + + + + + + + @@ -128744,7 +130997,31 @@ + + + + + + + + + + + + + + + + + + + + + + + + @@ -128752,26 +131029,26 @@ - - + + - - + + - - - - + + + + @@ -128785,20 +131062,20 @@ - - + + - - + + - - + + - - + + @@ -128813,32 +131090,38 @@ - - - + + + + + + + + + - - + + - - - + + + - - - + + + - - - + + + @@ -128849,8 +131132,8 @@ - - + + @@ -128897,8 +131180,8 @@ - - + + @@ -128908,9 +131191,9 @@ - - - + + + @@ -128939,15 +131222,15 @@ - - + + - - - - - + + + + + @@ -128955,14 +131238,14 @@ - - - + + + - - - + + + @@ -128980,14 +131263,14 @@ - - - + + + - - - + + + @@ -129007,22 +131290,22 @@ - - - - + + + + - - - - - - + + + + + + - - + + @@ -129037,10 +131320,10 @@ - - - - + + + + @@ -129050,15 +131333,15 @@ - - - - + + + + - - - + + + @@ -129068,32 +131351,32 @@ - - - + + + - - - - + + + + - - - - + + + + - - - + + + - - - - + + + + @@ -129141,14 +131424,14 @@ - - + + - - - - + + + + @@ -129180,12 +131463,12 @@ - - - - - - + + + + + + @@ -129373,8 +131656,8 @@ - - + + @@ -129402,9 +131685,9 @@ - - - + + + @@ -129421,9 +131704,9 @@ - - - + + + @@ -129449,8 +131732,8 @@ - - + + @@ -129465,10 +131748,10 @@ - - - - + + + + @@ -129486,17 +131769,17 @@ - - - + + + - - - - - - + + + + + + @@ -129507,12 +131790,12 @@ - - - - - - + + + + + + @@ -129524,53 +131807,53 @@ - - + + - - + + - - + + - - - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - - + + + + + @@ -129583,25 +131866,25 @@ - - - + + + - - - + + + - - + + - - - - - + + + + + @@ -129635,15 +131918,15 @@ - - + + - - + + - + @@ -129662,10 +131945,13 @@ - - - - + + + + + + + @@ -129673,20 +131959,20 @@ - - - + + + - - - + + + - + - + @@ -129694,20 +131980,20 @@ - - - + + + - + - - + + - - + + @@ -129721,46 +132007,46 @@ - - - + + + - - - - - - + + + + + + - - - - - - + + + + + + - - - - - + + + + + - - - - - - - - + + + + + + + + @@ -129774,12 +132060,12 @@ - - + + - - + + @@ -129787,8 +132073,8 @@ - - + + @@ -129878,12 +132164,12 @@ - - - - - - + + + + + + @@ -129891,35 +132177,35 @@ - - - - + + + + - - - + + + - - - + + + - - - - - - - - - + + + + + + + + + @@ -129927,9 +132213,9 @@ - - - + + + @@ -129948,117 +132234,117 @@ - - - + + + - - + + - - - + + + - - + + - + + + + + + - - - - - - - - - + + + + - - - - - - - - - + - - + + - - + + - - + + + + + + + + + + - - - - - + + + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - + + - - - - + + + + @@ -130143,14 +132429,14 @@ - - + + - - - + + + @@ -130159,34 +132445,34 @@ - - - - - - + + + + + + - - - + + + - - - + + + - - + + @@ -130213,6 +132499,12 @@ + + + + + + @@ -130237,63 +132529,63 @@ - - - - + + + + - - - - - - - - + + + + + + + + - - + + - - + + - + - - + + - - + + - - + + - - - + + + - - + + - - - - - - + + + + + + @@ -130306,16 +132598,16 @@ - - - - - - + + + + + + - - + + @@ -130402,12 +132694,12 @@ - - + + - - + + @@ -130470,9 +132762,9 @@ - - - + + + @@ -130513,10 +132805,10 @@ - - - - + + + + @@ -130799,8 +133091,8 @@ - - + + @@ -130810,8 +133102,8 @@ - - + + @@ -130911,30 +133203,30 @@ - - - - - - + + + + + + - - - + + + - - + + - - + + @@ -130954,28 +133246,28 @@ - - - + + + - - + + - - + + - - + + - - - - - + + + + + @@ -131008,9 +133300,9 @@ - - - + + + @@ -131028,23 +133320,29 @@ - - - - - + + + + + - - - + + + - - - + + + + + + + + + @@ -131068,21 +133366,21 @@ - - - + + + - - - + + + - - - - - + + + + + @@ -131126,8 +133424,8 @@ - - + + @@ -131153,8 +133451,8 @@ - - + + @@ -131164,17 +133462,17 @@ - + - - - - + + + + @@ -131196,18 +133494,18 @@ - - + + - - + + - - - - + + + + @@ -131215,12 +133513,12 @@ - - + + - - + + @@ -131228,28 +133526,28 @@ - - - - + + + + - - + + - + - - + + - - - - - + + + + + @@ -131267,8 +133565,8 @@ - - + + @@ -131313,35 +133611,35 @@ - - + + - - + + - - + + - - + + - - - - - - - + + + + + + + - - - - + + + + @@ -131349,9 +133647,9 @@ - - - + + + @@ -131359,20 +133657,20 @@ - - + + - - + + - - + + - - + + @@ -131416,11 +133714,11 @@ - + - - + + @@ -131501,19 +133799,19 @@ - - + + - - - + + + - - - - + + + + @@ -131546,17 +133844,17 @@ - - + + - - - + + + @@ -131624,8 +133922,8 @@ - - + + @@ -131655,13 +133953,13 @@ - - - + + + - - + + @@ -131743,10 +134041,10 @@ - - - - + + + + @@ -131756,8 +134054,8 @@ - - + + @@ -131879,6 +134177,14 @@ + + + + + + + + @@ -132010,10 +134316,10 @@ - - - - + + + + @@ -132027,14 +134333,14 @@ - - - + + + - - - + + + @@ -132070,20 +134376,20 @@ - - - + + + - - - - + + + + - - - + + + @@ -132130,8 +134436,8 @@ - - + + @@ -132149,9 +134455,9 @@ - - - + + + @@ -132235,10 +134541,10 @@ - - - - + + + + @@ -132258,7 +134564,7 @@ - + @@ -132303,9 +134609,9 @@ - - - + + + @@ -132313,25 +134619,25 @@ - - - - + + + + - - - - - + + + + + - - + + @@ -132367,8 +134673,8 @@ - - + + @@ -132382,28 +134688,28 @@ - - + + - - + + - - - - + + + + - - + + - - - - + + + + @@ -132412,17 +134718,17 @@ - - - - - + + + + + - - - - + + + + @@ -132436,26 +134742,26 @@ - - - - - - + + + + + + - - - - - - - - + + + + + + + + - - + + @@ -132558,10 +134864,10 @@ - - - - + + + + @@ -132583,19 +134889,19 @@ - - + + - - - - + + + + - - + + @@ -132847,51 +135153,51 @@ - + - - + + - - + + - - + + - + - + - - + + - + - - + + - + - - + + - - + + - - + + @@ -132900,19 +135206,19 @@ - - + + - - - + + + - - - - + + + + @@ -132935,11 +135241,17 @@ + + + + + + @@ -133006,6 +135318,13 @@ + + + + + + + @@ -133042,9 +135361,9 @@ - - - + + + @@ -133079,19 +135398,19 @@ - - + + - - + + - + - + @@ -133382,16 +135701,16 @@ - - + + - + - - - + + + @@ -133414,7 +135733,7 @@ - + @@ -133537,37 +135856,37 @@ - - - + + + - - - - - + + + + + - - - + + + - - - - - - - + + + + + + + - - - - - + + + + + @@ -133599,103 +135918,103 @@ - - + + - - - + + + - - - + + + - - + + - - + + - - - + + + - - - - - + + + + + - - - - + + + + - - - + + + - - + + - - + + - - - - + + + + - - - + + + - - + + - - - + + + - - - + + + - - + + - - - - - + + + + + - - - - + + + + - - + + @@ -133707,44 +136026,48 @@ - - - + + + - - + + - - - - - - + + + + + + - - - - + + + + - - + + - - + + - - - + + + + + + + @@ -133785,24 +136108,24 @@ - - + + - - + + - - + + - - + + @@ -133841,26 +136164,26 @@ - - + + - - - + + + - - - - - + + + + + - - - - + + + + @@ -133913,12 +136236,12 @@ - - + + - - + + @@ -134020,8 +136343,8 @@ - - + + @@ -134049,9 +136372,9 @@ - - - + + + @@ -134059,6 +136382,10 @@ + + + + @@ -134094,15 +136421,15 @@ - - + + - + @@ -134136,24 +136463,24 @@ - - - - - - + + + + + + - - - - - - + + + + + + @@ -134166,40 +136493,40 @@ - - + + - - + + - - + + - - + + - - + + - - + + - - + + @@ -134214,9 +136541,9 @@ - - - + + + @@ -134224,45 +136551,45 @@ - - + + - - - + + + - - - + + + - - + + - - + + - - - - + + + + - - - + + + - - - - - + + + + + @@ -134270,9 +136597,9 @@ - - - + + + @@ -134280,88 +136607,93 @@ - - + + - - - + + + - - - - + + + + - - - + + + - - - + + + - - - - - - + + + + + + - - - - + + + + - - - - - - - - + + + - - - + + + + + + + + + + + + + - - + + - - - - + + + + - - - + + + - - - + + + @@ -134381,31 +136713,31 @@ - - - - - + + + + + - - + + - - + + - - + + - - + + - - + + @@ -134422,24 +136754,30 @@ - - - + + + + + + + + + - - + + - - - - - + + + + + @@ -134460,12 +136798,12 @@ - - + + - - + + @@ -134475,12 +136813,12 @@ - - + + - - + + @@ -134498,12 +136836,22 @@ + + + + + + + + + + @@ -134591,10 +136939,10 @@ - - - - + + + + @@ -134602,6 +136950,7 @@ + @@ -134709,53 +137058,53 @@ - - - - + + + + - - - - + + + + - - + + - - - + + + - - - + + + - - + + - - + + - - - + + + - - - + + + - - - + + + @@ -134763,14 +137112,14 @@ - - - + + + - - - + + + @@ -134802,6 +137151,12 @@ + + + + + + @@ -134944,35 +137299,35 @@ - + - - - + + + - - - + + + - - - + + + - - - + + + - - + + @@ -135067,10 +137422,10 @@ - - - - + + + + @@ -135268,28 +137623,28 @@ - - - - - - + + + + + + - - + + - - + + - - + + - - + + @@ -135417,11 +137772,11 @@ - - - - - + + + + + @@ -135548,24 +137903,59 @@ - - - - + + + + + + + + + + + + + + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -135574,12 +137964,17 @@ - - - - + + + + + + + + + @@ -135593,39 +137988,39 @@ - - - - - - - - + + + + + + + + - - - - - - - - + + + + + + + + - - - - - + + + + + - - - - - - + + + + + + @@ -135726,22 +138121,22 @@ - - - - + + + + - - + + - - + + @@ -135752,9 +138147,9 @@ - - - + + + @@ -135772,12 +138167,12 @@ - - + + - - + + @@ -135792,14 +138187,19 @@ - - + + + + + + + @@ -135808,17 +138208,17 @@ - - + + - - - + + + @@ -135840,8 +138240,8 @@ - - + + @@ -135962,9 +138362,9 @@ - - - + + + @@ -136058,23 +138458,23 @@ - - - + + + - - + + - - - + + + - - - + + + @@ -136091,35 +138491,35 @@ - - - - + + + + - - - + + + - - - - + + + + - - + + - - - - + + + + @@ -136134,17 +138534,17 @@ - - - + + + - - + + - - + + @@ -136165,8 +138565,8 @@ - - + + @@ -136221,11 +138621,11 @@ - - - - - + + + + + @@ -136247,17 +138647,17 @@ - - + + - - + + - - - + + + @@ -136286,12 +138686,12 @@ - - + + - - + + @@ -136301,25 +138701,25 @@ - - + + - - - + + + - - + + - - + + - - + + @@ -136341,12 +138741,12 @@ - - - - - - + + + + + + @@ -136387,13 +138787,13 @@ - + - - - - + + + + @@ -136458,55 +138858,60 @@ - - - - + + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - + + - - - + + + - - - + + + + + + + + @@ -136522,37 +138927,37 @@ - - + + - - - - + + + + - - - + + + - - + + - - - - + + + + @@ -136641,17 +139046,23 @@ - - - + + + + + + + + + - - + + @@ -136664,8 +139075,12 @@ - - + + + + + + @@ -136684,31 +139099,36 @@ - - - - - + + + + + - - + + - - + + - - + + - - + + - - + + + + + + + @@ -136748,6 +139168,69 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -136758,13 +139241,26 @@ + + + + + + + + + - - - + + + + + + + @@ -136782,29 +139278,29 @@ - - - - - - + + + + + + - - - - - + + + + + - - - - - - - - + + + + + + + + @@ -136854,6 +139350,14 @@ + + + + + + + + @@ -136966,6 +139470,14 @@ + + + + + + + + @@ -137117,6 +139629,16 @@ + + + + + + + + + + @@ -137159,33 +139681,33 @@ - - - + + + - - - + + + - - - - + + + + - - + + - - + + - - - + + + @@ -137199,50 +139721,50 @@ - - + + - - + + - - + + - - + + - - + + - - + + - - - + + + - - + + - - + + - - - + + + @@ -137371,6 +139893,12 @@ + + + + + + @@ -137396,13 +139924,13 @@ - + - + - + @@ -137417,15 +139945,21 @@ - - - - - - - - - + + + + + + + + + + + + + + + @@ -137443,36 +139977,36 @@ - - - - - + + + + + - - - - - - + + + + + + - - - - - - - + + + + + + + - - - - - - + + + + + + @@ -137507,56 +140041,56 @@ - - - + + + - - - - - + + + + + - - + + - - - + + + - - - - + + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - + + @@ -137624,13 +140158,13 @@ - - - + + + - - + + @@ -137666,12 +140200,12 @@ - - + + - - + + @@ -137715,14 +140249,14 @@ - - - - + + + + - - + + @@ -137753,6 +140287,12 @@ + + + + + + @@ -137782,6 +140322,11 @@ + + + + + @@ -137796,6 +140341,12 @@ + + + + + + @@ -137866,13 +140417,13 @@ - + - + @@ -137883,7 +140434,7 @@ - + @@ -137921,9 +140472,9 @@ - - - + + + @@ -137939,33 +140490,33 @@ - - + + - - + + - - + + - - + + - - - - - + + + + + - - - - + + + + @@ -137977,8 +140528,12 @@ - - + + + + + + @@ -138078,16 +140633,16 @@ - - + + - - + + - - + + @@ -138104,12 +140659,12 @@ - - + + - - + + @@ -138124,12 +140679,12 @@ - - + + - - + + @@ -138181,13 +140736,13 @@ - - - - - - - + + + + + + + @@ -138220,9 +140775,9 @@ - - - + + + @@ -138247,13 +140802,13 @@ - - + + - - - + + + @@ -138305,6 +140860,12 @@ + + + + + + @@ -138321,29 +140882,29 @@ - - - + + + - - - + + + - - - + + + - - - - + + + + - - + + @@ -138351,86 +140912,95 @@ - - + + - - - + + + - - + + - - + + - - - + + + - - + + + + + + + - - + + - - + + - - - + + + + + + + - - + + - - + + - - + + - - - - + + + + - - - + + + @@ -138472,24 +141042,24 @@ - - - - + + + + - - + + - - - + + + - - - + + + @@ -138497,10 +141067,15 @@ - - - - + + + + + + + + + @@ -138509,24 +141084,24 @@ - - - + + + - - - + + + - - - - + + + + @@ -138534,76 +141109,81 @@ - - - - + + + + - - - - + + + + - - + + + - - + + + + + + - - - + + + - - - + + + - - + + - - - - - - + + + + + + - - - - + + + + - - + + - - - - + + + + - - - + + + - - - + + + @@ -138618,12 +141198,12 @@ - - - - - - + + + + + + @@ -138656,13 +141236,13 @@ - - - - - - - + + + + + + + @@ -138694,8 +141274,8 @@ - - + + @@ -138850,7 +141430,10 @@ - + + + + @@ -139082,21 +141665,33 @@ - + - - - - - - + + + + - - + + + + + + + + + + + + + + + + @@ -139305,43 +141900,43 @@ - - + + - - + + - - - + + + - - - + + + - - - + + + - - - - + + + + - - - - + + + + - - + + @@ -139378,25 +141973,25 @@ - - + + - - - + + + - - + + - - + + - - + + @@ -139425,10 +142020,10 @@ - - - - + + + + @@ -139544,16 +142139,16 @@ - - + + - - + + - - + + @@ -139673,6 +142268,10 @@ + + + + @@ -139788,6 +142387,12 @@ + + + + + + @@ -139800,6 +142405,22 @@ + + + + + + + + + + + + + + + + @@ -139846,6 +142467,13 @@ + + + + + + + @@ -139871,16 +142499,16 @@ - - - - + + + + - - - - + + + + @@ -140019,78 +142647,78 @@ - - + + - - - - + + + + - - - - - + + + + + - - - + + + - - - - + + + + - - + + - - + + - - + + - - - + + + - - + + - - + + - - + + - - + + - - + + - - + + - - - + + + @@ -140145,9 +142773,9 @@ - - - + + + @@ -140158,9 +142786,9 @@ - - - + + + @@ -140177,9 +142805,9 @@ - - - + + + @@ -140187,9 +142815,9 @@ - - - + + + @@ -140250,6 +142878,11 @@ + + + + + @@ -140261,6 +142894,10 @@ + + + + @@ -140300,10 +142937,10 @@ - - - - + + + + @@ -140316,14 +142953,14 @@ - - + + - - - - + + + + @@ -140357,27 +142994,27 @@ - - - - + + + + - - - + + + - - - - + + + + - - - - + + + + @@ -140398,21 +143035,21 @@ - + - - + + - - + + - - - - + + + + @@ -140468,16 +143105,16 @@ - - - - + + + + - - - - + + + + @@ -140533,18 +143170,18 @@ - - - - - + + + + + - - - - - + + + + + @@ -140556,8 +143193,8 @@ - - + + @@ -140567,9 +143204,9 @@ - - - + + + @@ -140577,11 +143214,18 @@ - - - - - + + + + + + + + + + + + @@ -140591,11 +143235,11 @@ - - - - - + + + + + @@ -140603,12 +143247,12 @@ - - + + - - + + @@ -140617,9 +143261,9 @@ - - - + + + @@ -140627,55 +143271,55 @@ - - - - + + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - + + - - - + + + - - - + + + @@ -140690,33 +143334,33 @@ - - - - - + + + + + - - - - - + + + + + - - - + + + - - - + + + - - - + + + @@ -140945,27 +143589,27 @@ - - + + - - - + + + - - - - + + + + - - + + @@ -140974,11 +143618,11 @@ - - - - - + + + + + @@ -140987,9 +143631,9 @@ - - - + + + @@ -141057,9 +143701,9 @@ - - - + + + @@ -141072,10 +143716,27 @@ - - - - + + + + + + + + + + + + + + + + + + + + + @@ -141262,6 +143923,10 @@ + + + + @@ -141293,16 +143958,16 @@ - - - + + + - - - - - + + + + + @@ -141468,143 +144133,148 @@ - - - - + + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - + + + + + + + - - + + - - - - - + + + + + - - - - - - + + + + + + - - - + + + - - - - + + + + - - + + - - - + + + - - - + + + - - - + + + - - + + - - + + - - - - + + + + - - - + + + @@ -141631,9 +144301,9 @@ - - - + + + @@ -141641,9 +144311,9 @@ - - - + + + @@ -141661,9 +144331,9 @@ - - - + + + @@ -141671,9 +144341,9 @@ - - - + + + @@ -141682,6 +144352,17 @@ + + + + + + + + + + + @@ -141693,10 +144374,10 @@ - - - - + + + + @@ -141794,9 +144475,9 @@ - - - + + + @@ -141804,9 +144485,9 @@ - - - + + + @@ -141901,11 +144582,20 @@ - - - - - + + + + + + + + + + + + + + @@ -141920,56 +144610,56 @@ - - - - - + + + + + - - - + + + - - - - - + + + + + - - - + + + - - - - + + + + - - - - - - + + + + + + - - - + + + - - - + + + @@ -142021,42 +144711,42 @@ - - + + - - + + - - + + - - + + - - - + + + - - + + - - + + - - + + @@ -142111,39 +144801,39 @@ - - + + - - + + - - - + + + - - - + + + - - + + - - - - - - + + + + + + - - - + + + @@ -142214,9 +144904,9 @@ - - - + + + @@ -142261,9 +144951,9 @@ - - - + + + @@ -142459,6 +145149,12 @@ + + + + + + @@ -142488,13 +145184,13 @@ - - + + - - - + + + @@ -142521,8 +145217,8 @@ - - + + @@ -142541,27 +145237,27 @@ - - + + - - - + + + - - + + - + - + @@ -142570,7 +145266,7 @@ - + @@ -142581,15 +145277,15 @@ - - - - - + + + + + - - + + @@ -142766,10 +145462,10 @@ - - - - + + + + @@ -142777,8 +145473,8 @@ - - + + @@ -142797,36 +145493,36 @@ - - - - - + + + + + - - + + - - - + + + - - - + + + - - - - - - - - - + + + + + + + + + @@ -142835,12 +145531,12 @@ - - - - - - + + + + + + @@ -142874,29 +145570,29 @@ - - + + - - - + + + - - + + - - + + - - + + @@ -142909,40 +145605,44 @@ - - - + + + - - + + - - + + - - + + - - - - - + + + + + + + + + - - + + - - + + @@ -143045,12 +145745,12 @@ - - + + - - + + @@ -143089,9 +145789,9 @@ - - - + + + @@ -143102,8 +145802,8 @@ - - + + @@ -143120,56 +145820,56 @@ - - + + - - - - - + + + + + - - + + - - - - + + + + - - + + - - + + - - - - + + + + - - - - + + + + - - + + @@ -143228,10 +145928,10 @@ - - - - + + + + @@ -143254,10 +145954,10 @@ - + - + @@ -143299,21 +145999,21 @@ - - - - - + + + + + - - - + + + - - - + + + @@ -143570,19 +146270,25 @@ - - + + + + + + + + - - - - - + + + + + @@ -143734,30 +146440,30 @@ - - + + - - + + - - + + - - - + + + - - - + + + - - + + @@ -143769,9 +146475,9 @@ - - - + + + @@ -143779,9 +146485,9 @@ - - - + + + @@ -143789,9 +146495,9 @@ - - - + + + @@ -143809,8 +146515,8 @@ - - + + @@ -143818,8 +146524,8 @@ - - + + @@ -143830,158 +146536,158 @@ - - + + - - - + + + - - - + + + - - - - - + + + + + - - - - - - + + + + + + - - - + + + - - + + - - - + + + - - - + + + - - - - - - + + + + + + - - - - - - + + + + + + - - + + - - - + + + - - - + + + - - - - - - - + + + + + + + - - - + + + - - - + + + - - + + - - - - + + + + - - + + - - - + + + - - - + + + - - - + + + - - + + - - + + - - + + - - - - - + + + + + - - - + + + - - + + @@ -143990,48 +146696,48 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - + + - - - + + + - - - - - + + + + + - - - - - - + + + + + + @@ -144041,162 +146747,162 @@ - - - + + + - - + + - - - - + + + + - - + + - - + + - - - + + + - - + + - - + + - - - - + + + + - - - + + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - - - - + + + + + + + - - - - - - - + + + + + + + - - - - - - - - + + + + + + + + - - - - - - + + + + + + - - - - - - + + + + + + - - - - - - - + + + + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - + + + + @@ -144211,19 +146917,19 @@ - - + + - - - - - + + + + + @@ -144273,12 +146979,12 @@ - - + + - - + + @@ -144293,21 +146999,21 @@ - - + + - - + + - - - + + + @@ -144318,12 +147024,12 @@ - - + + - - + + @@ -144378,12 +147084,12 @@ - - + + - - + + @@ -144488,6 +147194,10 @@ + + + + @@ -144496,6 +147206,10 @@ + + + + @@ -144516,6 +147230,11 @@ + + + + + @@ -144557,14 +147276,14 @@ - - - - - - - - + + + + + + + + @@ -144870,8 +147589,8 @@ - - + + @@ -144924,6 +147643,15 @@ + + + + + + + + + @@ -145027,8 +147755,8 @@ - - + + @@ -145123,6 +147851,10 @@ + + + + @@ -145155,8 +147887,8 @@ - - + + @@ -145164,9 +147896,9 @@ - - - + + + @@ -145174,75 +147906,75 @@ - - + + - - - + + + - - + + - - - + + + - - - - + + + + - - + + - - + + - - - + + + - - - - - - - - + + + + + + + + - - - - - - - - + + + + + + + + - - + + - - - + + + - - - + + + @@ -145250,23 +147982,23 @@ - - - + + + - - - + + + - - - + + + @@ -145277,39 +148009,39 @@ - - - - - - - - + + + + + + + + - - - - - - - - + + + + + + + + - - - - - - - - + + + + + + + + - - - + + + @@ -145550,6 +148282,13 @@ + + + + + + + @@ -145617,6 +148356,14 @@ + + + + + + + + @@ -145827,11 +148574,11 @@ - - - - - + + + + + @@ -145848,6 +148595,13 @@ + + + + + + + @@ -145871,6 +148625,12 @@ + + + + + + @@ -145893,7 +148653,19 @@ + + + + + + + + + + + + @@ -145907,9 +148679,9 @@ - - - + + + @@ -146014,9 +148786,9 @@ - - - + + + @@ -146027,14 +148799,14 @@ - - - + + + - - - + + + @@ -146068,12 +148840,12 @@ - - - - - - + + + + + + @@ -146081,8 +148853,8 @@ - - + + @@ -146104,8 +148876,8 @@ - - + + @@ -146142,8 +148914,8 @@ - - + + @@ -146153,16 +148925,16 @@ - - - + + + - - - - - + + + + + @@ -146172,16 +148944,16 @@ - - - - + + + + - - - - + + + + @@ -146521,22 +149293,22 @@ - - - - + + + + - - - - + + + + - - - - + + + + @@ -146545,21 +149317,21 @@ - + - - - - - - - + + + + + + + - - - + + + @@ -146571,13 +149343,13 @@ - - + + - - + + @@ -146591,15 +149363,15 @@ - - + + - - - - - + + + + + @@ -146607,23 +149379,23 @@ - - - - + + + + - - - - + + + + - - - - - + + + + + @@ -146678,11 +149450,11 @@ - - - - - + + + + + @@ -146698,13 +149470,13 @@ - - - + + + - - + + @@ -146715,29 +149487,29 @@ - - - - + + + + - - - - + + + + - - + + - - + + - - - + + + @@ -146776,7 +149548,7 @@ - + @@ -146810,12 +149582,12 @@ - - + + - - + + @@ -146907,43 +149679,43 @@ - - + + - - - - + + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - + + @@ -146991,13 +149763,13 @@ - + - - - - + + + + @@ -147030,12 +149802,12 @@ - - - - - - + + + + + + @@ -147043,46 +149815,46 @@ - - - - + + + + - - - - - - - - - - + + + + + + + + + + - - - - + + + + - - - + + + - - - - + + + + - - - - - + + + + + @@ -147093,38 +149865,38 @@ - - - + + + - - + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - + + @@ -147142,9 +149914,9 @@ - - - + + + @@ -147159,22 +149931,22 @@ - - - - - - + + + + + + - - - + + + - - - + + + @@ -147204,9 +149976,9 @@ - - - + + + @@ -147219,10 +149991,11 @@ - - + + + @@ -147250,19 +150023,19 @@ - - - - - - + + + + + + - - - - - + + + + + diff --git a/android/abi_gki_aarch64_asus b/android/abi_gki_aarch64_asus old mode 100755 new mode 100644 diff --git a/android/abi_gki_aarch64_db845c b/android/abi_gki_aarch64_db845c index 85a14be32a7d..acbd8c5a6d04 100644 --- a/android/abi_gki_aarch64_db845c +++ b/android/abi_gki_aarch64_db845c @@ -319,6 +319,7 @@ ktime_get ktime_get_mono_fast_ns ktime_get_real_ts64 + ktime_get_with_offset kvfree kvfree_call_rcu kvmalloc_node @@ -521,6 +522,7 @@ request_threaded_irq reset_control_assert reset_control_deassert + reset_control_put reset_control_reset round_jiffies_up rpmsg_register_device @@ -605,7 +607,9 @@ strlen strncmp strncpy + strnlen strpbrk + strscpy_pad strsep __sw_hweight16 __sw_hweight32 @@ -700,6 +704,7 @@ bus_set_iommu device_link_add device_match_fwnode + devm_platform_get_and_ioremap_resource driver_find_device generic_device_group generic_iommu_put_resv_regions @@ -897,7 +902,9 @@ dma_buf_put dma_buf_unmap_attachment dma_get_sgtable_attrs + down_read find_vma + up_read wait_for_completion_interruptible # required by gpio-regulator.ko @@ -1018,7 +1025,6 @@ kernel_param_unlock kfree_skb_list ktime_get_seconds - ktime_get_with_offset napi_gro_receive netdev_set_default_ethtool_ops netif_carrier_off @@ -1297,6 +1303,8 @@ kthread_create_worker kthread_destroy_worker kthread_queue_work + kthread_should_stop + kthread_stop llist_add_batch memdup_user_nul memparse @@ -1413,11 +1421,8 @@ usb_put_hcd usb_remove_hcd -# required by pdr_interface.ko - strnlen - # required by phy-qcom-qmp.ko - of_clk_get_by_name + devm_get_clk_from_child __of_reset_control_get # required by phy-qcom-qusb2.ko @@ -1585,7 +1590,6 @@ autoremove_wake_function datagram_poll do_wait_intr_irq - idr_alloc_u32 lock_sock_nested proto_register proto_unregister @@ -1613,6 +1617,12 @@ sock_queue_rcv_skb sock_register sock_unregister + __xa_alloc + xa_erase + xa_find + xa_find_after + __xa_insert + xa_load # required by reboot-mode.ko devres_add @@ -1647,6 +1657,7 @@ mmc_regulator_set_vqmmc mmc_send_tuning regulator_is_supported_voltage + __reset_control_get __sdhci_add_host sdhci_add_host sdhci_cleanup_host @@ -1679,7 +1690,9 @@ snd_ctl_add snd_ctl_new1 snd_pcm_add_chmap_ctls - snd_pcm_create_iec958_consumer_hw_params + snd_pcm_create_iec958_consumer_default + snd_pcm_fill_iec958_consumer + snd_pcm_fill_iec958_consumer_hw_params snd_pcm_hw_constraint_eld # required by snd-soc-qcom-common.ko @@ -1711,9 +1724,6 @@ # required by snd-soc-wcd9335.ko kmemdup_nul - snd_soc_get_volsw_sx - snd_soc_info_volsw_sx - snd_soc_put_volsw_sx strnstr # required by snd-soc-wcd934x.ko @@ -1795,3 +1805,11 @@ # required by wcd934x.ko mfd_add_devices mfd_remove_devices + +# preserved by --additions-only + idr_alloc_u32 + of_clk_get_by_name + snd_pcm_create_iec958_consumer_hw_params + snd_soc_get_volsw_sx + snd_soc_info_volsw_sx + snd_soc_put_volsw_sx diff --git a/android/abi_gki_aarch64_exynos b/android/abi_gki_aarch64_exynos index 777a6434355e..0b37ab51884c 100644 --- a/android/abi_gki_aarch64_exynos +++ b/android/abi_gki_aarch64_exynos @@ -144,6 +144,7 @@ clk_set_rate clk_unprepare clockevents_config_and_register + clocks_calc_mult_shift __clocksource_register_scale __close_fd cma_alloc @@ -198,6 +199,8 @@ cpufreq_table_index_unsorted cpufreq_this_cpu_can_update cpufreq_unregister_notifier + cpu_hotplug_disable + cpu_hotplug_enable __cpuhp_remove_state __cpuhp_setup_state __cpuhp_setup_state_cpuslocked @@ -235,6 +238,7 @@ crypto_shash_update crypto_unregister_alg crypto_unregister_scomp + csum_ipv6_magic csum_partial csum_tcpudp_nofold _ctype @@ -385,6 +389,7 @@ dev_pm_opp_add dev_pm_opp_disable dev_pm_opp_find_freq_ceil + dev_pm_opp_find_freq_ceil_by_volt dev_pm_opp_find_freq_exact dev_pm_opp_find_freq_floor dev_pm_opp_get_freq @@ -428,6 +433,7 @@ dma_buf_export dma_buf_fd dma_buf_get + dma_buf_get_flags dma_buf_map_attachment dma_buf_mmap dma_buf_move_notify @@ -509,6 +515,7 @@ drm_atomic_add_affected_connectors drm_atomic_add_affected_planes drm_atomic_commit + drm_atomic_get_connector_state drm_atomic_get_crtc_state drm_atomic_get_plane_state drm_atomic_get_private_obj_state @@ -552,6 +559,7 @@ drm_atomic_set_fb_for_plane drm_atomic_set_mode_for_crtc drm_atomic_state_alloc + drm_atomic_state_clear __drm_atomic_state_free drm_bridge_add drm_bridge_attach @@ -584,6 +592,7 @@ drm_crtc_vblank_off drm_crtc_vblank_on drm_crtc_vblank_put + drm_crtc_wait_one_vblank drm_cvt_mode __drm_dbg __drm_debug @@ -745,6 +754,7 @@ drm_vma_node_allow drm_vma_node_is_allowed drm_vma_node_revoke + drm_wait_one_vblank drm_writeback_connector_init drm_writeback_queue_job drm_writeback_signal_completion @@ -824,6 +834,7 @@ __get_free_pages get_net_ns_by_fd get_net_ns_by_pid + get_options get_random_bytes get_random_u32 __get_task_comm @@ -966,6 +977,7 @@ input_set_capability input_unregister_device input_unregister_handle + int_pow int_sqrt iomem_resource iommu_alloc_resv_region @@ -975,6 +987,7 @@ iommu_device_sysfs_remove iommu_device_unlink iommu_device_unregister + iommu_dma_enable_best_fit_algo iommu_dma_reserve_iova iommu_domain_alloc iommu_fwspec_add_ids @@ -1009,6 +1022,7 @@ irq_domain_set_info irq_domain_xlate_onetwocell irq_domain_xlate_twocell + irq_do_set_affinity irq_find_mapping irq_get_irqchip_state irq_get_irq_data @@ -1038,6 +1052,7 @@ kasan_flag_enabled kasprintf kernel_kobj + kernfs_path_from_node kern_mount kern_unmount key_create_or_update @@ -1074,6 +1089,8 @@ kobject_uevent kobject_uevent_env krealloc + kset_create_and_add + kset_unregister kstat kstrdup kstrndup @@ -1091,6 +1108,7 @@ kstrtoull_from_user ksys_sync_helper kthread_bind + kthread_bind_mask kthread_cancel_delayed_work_sync kthread_cancel_work_sync kthread_create_on_node @@ -1120,6 +1138,8 @@ kvfree kvfree_call_rcu kvmalloc_node + led_classdev_register_ext + led_classdev_unregister __list_add_valid __list_del_entry_valid list_sort @@ -1144,6 +1164,7 @@ __memcpy_toio memdup_user memmove + memory_read_from_buffer memparse memremap memset @@ -1159,6 +1180,7 @@ mii_nway_restart mipi_dsi_attach mipi_dsi_compression_mode + mipi_dsi_dcs_get_display_brightness mipi_dsi_dcs_read mipi_dsi_dcs_set_column_address mipi_dsi_dcs_set_display_brightness @@ -1316,6 +1338,7 @@ of_property_read_string_helper of_property_read_u32_index of_property_read_u64 + of_property_read_u64_index of_property_read_variable_u16_array of_property_read_variable_u32_array of_property_read_variable_u8_array @@ -1331,6 +1354,9 @@ oops_in_progress orderly_poweroff page_endio + page_frag_alloc + __page_frag_cache_drain + page_frag_free page_mapping __page_pinner_migration_failed panic @@ -1378,9 +1404,11 @@ pci_read_config_dword pci_read_config_word __pci_register_driver + pci_release_regions pci_release_resource pci_rescan_bus pci_resize_resource + pci_restore_msi_state pci_restore_state pci_save_state pci_set_master @@ -1388,6 +1416,7 @@ pci_store_saved_state pci_unmap_rom pci_unregister_driver + pci_wake_from_d3 pci_write_config_dword pci_write_config_word PDE_DATA @@ -1418,6 +1447,7 @@ pinctrl_utils_free_map pin_get_name pin_user_pages + pin_user_pages_fast pin_user_pages_remote platform_bus_type platform_device_add @@ -1506,6 +1536,8 @@ pwm_set_chip_data queue_delayed_work_on queue_work_on + radix_tree_delete + radix_tree_lookup radix_tree_tagged ___ratelimit raw_notifier_call_chain @@ -1647,11 +1679,13 @@ sched_set_normal sched_setscheduler sched_setscheduler_nocheck + sched_uclamp_used schedule schedule_timeout schedule_timeout_interruptible scnprintf scsi_block_when_processing_errors + scsi_dma_unmap scsi_eh_ready_devs __scsi_execute scsi_print_sense_hdr @@ -1712,6 +1746,7 @@ skb_realloc_headroom skb_trim smp_call_function + smp_call_function_any smp_call_function_many smp_call_function_single smp_call_on_cpu @@ -1783,6 +1818,7 @@ snd_soc_info_volsw_range snd_soc_info_volsw_sx snd_soc_info_xr_sx + snd_soc_lookup_component snd_soc_new_compress snd_soc_of_get_dai_link_codecs snd_soc_of_get_dai_name @@ -1832,6 +1868,7 @@ __stack_chk_guard static_key_slow_dec static_key_slow_inc + stop_machine stop_one_cpu_nowait stpcpy strcasecmp @@ -1903,7 +1940,15 @@ tasklet_init tasklet_kill __tasklet_schedule + tasklet_setup task_rq_lock + tcp_register_congestion_control + tcp_reno_cong_avoid + tcp_reno_ssthresh + tcp_reno_undo_cwnd + tcp_slow_start + tcp_unregister_congestion_control + thermal_cdev_update thermal_cooling_device_unregister thermal_of_cooling_device_register thermal_zone_device_disable @@ -1925,11 +1970,13 @@ trace_event_reg trace_handle_return __traceiter_android_rvh_can_migrate_task + __traceiter_android_rvh_cpu_cgroup_attach __traceiter_android_rvh_cpu_cgroup_can_attach __traceiter_android_rvh_dequeue_task __traceiter_android_rvh_enqueue_task __traceiter_android_rvh_find_lowest_rq __traceiter_android_rvh_find_new_ilb + __traceiter_android_rvh_flush_task __traceiter_android_rvh_gic_v3_set_affinity __traceiter_android_rvh_post_init_entity_util_avg __traceiter_android_rvh_replace_next_task_fair @@ -1937,18 +1984,24 @@ __traceiter_android_rvh_sched_newidle_balance __traceiter_android_rvh_sched_nohz_balancer_kick __traceiter_android_rvh_sched_rebalance_domains + __traceiter_android_rvh_schedule __traceiter_android_rvh_select_fallback_rq __traceiter_android_rvh_select_task_rq_fair __traceiter_android_rvh_select_task_rq_rt + __traceiter_android_rvh_wake_up_new_task + __traceiter_android_vh_cgroup_attach __traceiter_android_vh_cpu_idle_enter __traceiter_android_vh_cpu_idle_exit __traceiter_android_vh_do_wake_up_sync __traceiter_android_vh_ipi_stop + __traceiter_android_vh_is_fpsimd_save __traceiter_android_vh_scheduler_tick __traceiter_android_vh_set_wake_flags __traceiter_android_vh_show_mem + __traceiter_android_vh_ufs_check_int_errors __traceiter_android_vh_ufs_compl_command __traceiter_android_vh_ufs_prepare_command + __traceiter_android_vh_ufs_send_tm_command __traceiter_cpu_idle __traceiter_device_pm_callback_end __traceiter_device_pm_callback_start @@ -1976,11 +2029,13 @@ __traceiter_workqueue_execute_start trace_output_call __tracepoint_android_rvh_can_migrate_task + __tracepoint_android_rvh_cpu_cgroup_attach __tracepoint_android_rvh_cpu_cgroup_can_attach __tracepoint_android_rvh_dequeue_task __tracepoint_android_rvh_enqueue_task __tracepoint_android_rvh_find_lowest_rq __tracepoint_android_rvh_find_new_ilb + __tracepoint_android_rvh_flush_task __tracepoint_android_rvh_gic_v3_set_affinity __tracepoint_android_rvh_post_init_entity_util_avg __tracepoint_android_rvh_replace_next_task_fair @@ -1988,18 +2043,24 @@ __tracepoint_android_rvh_sched_newidle_balance __tracepoint_android_rvh_sched_nohz_balancer_kick __tracepoint_android_rvh_sched_rebalance_domains + __tracepoint_android_rvh_schedule __tracepoint_android_rvh_select_fallback_rq __tracepoint_android_rvh_select_task_rq_fair __tracepoint_android_rvh_select_task_rq_rt + __tracepoint_android_rvh_wake_up_new_task + __tracepoint_android_vh_cgroup_attach __tracepoint_android_vh_cpu_idle_enter __tracepoint_android_vh_cpu_idle_exit __tracepoint_android_vh_do_wake_up_sync __tracepoint_android_vh_ipi_stop + __tracepoint_android_vh_is_fpsimd_save __tracepoint_android_vh_scheduler_tick __tracepoint_android_vh_set_wake_flags __tracepoint_android_vh_show_mem + __tracepoint_android_vh_ufs_check_int_errors __tracepoint_android_vh_ufs_compl_command __tracepoint_android_vh_ufs_prepare_command + __tracepoint_android_vh_ufs_send_tm_command __tracepoint_cpu_idle __tracepoint_device_pm_callback_end __tracepoint_device_pm_callback_start @@ -2129,6 +2190,7 @@ unregister_shrinker up update_devfreq + update_rq_clock up_read up_write usb_add_function @@ -2301,6 +2363,7 @@ wakeup_source_add wakeup_source_destroy wakeup_source_register + wakeup_source_remove wakeup_source_unregister __wake_up_sync __wake_up_sync_key @@ -2322,6 +2385,7 @@ xhci_gen_setup xhci_get_endpoint_index xhci_get_ep_ctx + xhci_get_slot_ctx xhci_init_driver xhci_initialize_ring_info xhci_link_segments diff --git a/android/abi_gki_aarch64_fips140 b/android/abi_gki_aarch64_fips140 index faea593f3b77..68a850db7f58 100644 --- a/android/abi_gki_aarch64_fips140 +++ b/android/abi_gki_aarch64_fips140 @@ -112,6 +112,7 @@ _raw_spin_lock _raw_spin_unlock refcount_warn_saturate + rng_is_initialized scatterwalk_ffwd scatterwalk_map_and_copy sg_init_one diff --git a/android/abi_gki_aarch64_galaxy b/android/abi_gki_aarch64_galaxy index d5358ae912e2..7ba5f74f973a 100644 --- a/android/abi_gki_aarch64_galaxy +++ b/android/abi_gki_aarch64_galaxy @@ -574,6 +574,7 @@ _snd_ctl_add_follower _snd_pcm_stream_lock_irqsave _totalram_pages + _trace_android_vh_record_pcpu_rwsem_starttime access_process_vm ack_all_badblocks activate_task @@ -1901,6 +1902,8 @@ find_vpid finish_wait firmware_request_nowarn + fixed_phy_register + fixed_phy_unregister fixed_size_llseek flow_keys_basic_dissector flush_dcache_page @@ -2327,6 +2330,7 @@ irq_create_mapping_affinity irq_create_of_mapping irq_dispose_mapping + irq_domain_add_simple irq_domain_alloc_irqs_parent irq_domain_create_hierarchy irq_domain_free_irqs_common @@ -3023,6 +3027,7 @@ phy_ethtool_get_link_ksettings phy_ethtool_nway_reset phy_ethtool_set_link_ksettings + phy_ethtool_set_wol phy_exit phy_find_first phy_get_pause @@ -3034,9 +3039,12 @@ phy_power_off phy_power_on phy_print_status + phy_register_fixup_for_uid + phy_save_page phy_set_mode_ext phy_start phy_stop + phy_unregister_fixup_for_uid pick_highest_pushable_task pid_nr_ns pid_task @@ -4066,6 +4074,7 @@ ttm_tt_populate ttm_tt_set_placement_caching ttm_unmap_and_unpopulate_pages + tty_encode_baud_rate tty_flip_buffer_push tty_insert_flip_string_fixed_flag tty_kref_put @@ -4214,8 +4223,10 @@ usb_asmedia_modifyflowcontrol usb_assign_descriptors usb_autopm_get_interface + usb_autopm_get_interface_async usb_autopm_get_interface_no_resume usb_autopm_put_interface + usb_autopm_put_interface_async usb_bulk_msg usb_calc_bus_time usb_choose_configuration @@ -4293,6 +4304,7 @@ usb_ifnum_to_if usb_initialize_gadget usb_interface_id + usb_interrupt_msg usb_kill_urb usb_match_id usb_match_one_id diff --git a/android/abi_gki_aarch64_honor b/android/abi_gki_aarch64_honor new file mode 100644 index 000000000000..a1c85ea34e54 --- /dev/null +++ b/android/abi_gki_aarch64_honor @@ -0,0 +1,3 @@ +[abi_symbol_list] + __traceiter_android_rvh_dma_buf_stats_teardown + __tracepoint_android_rvh_dma_buf_stats_teardown diff --git a/android/abi_gki_aarch64_mtk b/android/abi_gki_aarch64_mtk index 807be584e15b..8d2c95696079 100644 --- a/android/abi_gki_aarch64_mtk +++ b/android/abi_gki_aarch64_mtk @@ -371,7 +371,10 @@ device_unregister _dev_info __dev_kfree_skb_any + __dev_kfree_skb_irq + devm_add_action __devm_alloc_percpu + devm_alloc_etherdev_mqs devm_blk_ksm_init devm_clk_bulk_get devm_clk_bulk_get_optional @@ -409,14 +412,17 @@ devm_kasprintf devm_kfree devm_kmalloc + devm_led_classdev_flash_register_ext devm_led_classdev_register_ext devm_led_classdev_unregister devm_mbox_controller_register + devm_mdiobus_alloc_size devm_memremap devm_mfd_add_devices devm_nvmem_cell_get devm_nvmem_device_get devm_nvmem_register + devm_of_mdiobus_register devm_of_phy_get_by_index __devm_of_phy_provider_register devm_of_platform_populate @@ -433,6 +439,7 @@ devm_power_supply_register devm_rc_allocate_device devm_rc_register_device + devm_register_netdev devm_regmap_add_irq_chip devm_regmap_field_alloc devm_regmap_field_bulk_alloc @@ -584,6 +591,7 @@ down_write d_path dput + dql_completed drain_workqueue driver_create_file driver_remove_file @@ -809,7 +817,13 @@ genlmsg_put genl_register_family genl_unregister_family + __genphy_config_aneg + genphy_read_abilities + genphy_read_mmd_unsupported + genphy_read_status genphy_resume + genphy_suspend + genphy_write_mmd_unsupported gen_pool_add_owner gen_pool_alloc_algo_owner gen_pool_avail @@ -835,6 +849,7 @@ get_kernel_pages get_net_ns_by_fd get_net_ns_by_pid + get_pelt_halflife get_pid_task get_random_bytes get_random_u32 @@ -957,11 +972,13 @@ init_uts_ns init_wait_entry __init_waitqueue_head + input_alloc_absinfo input_allocate_device input_event input_free_device input_mt_init_slots input_mt_report_slot_state + input_mt_sync_frame input_register_device input_set_abs_params input_set_capability @@ -1133,7 +1150,9 @@ kvmalloc_node led_classdev_flash_register_ext led_classdev_flash_unregister + led_colors led_get_flash_fault + led_set_brightness led_set_brightness_sync led_set_flash_brightness led_set_flash_timeout @@ -1143,6 +1162,7 @@ led_update_brightness led_update_flash_brightness linear_range_get_max_value + linear_range_get_selector_high linear_range_get_value __list_add_valid __list_del_entry_valid @@ -1169,8 +1189,12 @@ mbox_send_message mdiobus_alloc_size mdiobus_free + __mdiobus_read + mdiobus_read __mdiobus_register mdiobus_unregister + __mdiobus_write + mdiobus_write media_create_intf_link media_create_pad_link media_device_cleanup @@ -1270,10 +1294,12 @@ mutex_lock_killable mutex_trylock mutex_unlock + napi_complete_done napi_disable napi_gro_flush napi_gro_receive __napi_schedule + __napi_schedule_irqoff napi_schedule_prep __ndelay nd_tbl @@ -1292,6 +1318,7 @@ netif_receive_skb_list netif_rx netif_rx_ni + netif_schedule_queue netif_tx_stop_all_queues netif_tx_wake_queue netlink_broadcast @@ -1372,6 +1399,7 @@ of_get_next_child of_get_next_parent of_get_parent + of_get_phy_mode of_get_property of_get_regulator_init_data of_graph_get_next_endpoint @@ -1392,6 +1420,7 @@ of_parse_phandle_with_fixed_args of_phandle_iterator_init of_phandle_iterator_next + of_phy_connect of_phy_simple_xlate of_platform_depopulate of_platform_device_create @@ -1459,19 +1488,29 @@ phy_connect phy_disconnect phy_do_ioctl_running + phy_drivers_register + phy_drivers_unregister phy_ethtool_get_link_ksettings phy_ethtool_nway_reset phy_ethtool_set_link_ksettings phy_exit phy_get phy_init + phy_mii_ioctl + __phy_modify + phy_modify + phy_modify_paged_changed phy_power_off phy_power_on phy_print_status phy_put + phy_read_paged + phy_restore_page + phy_select_page phy_set_mode_ext phy_start phy_stop + phy_write_paged pid_task pinconf_generic_parse_dt_config pinctrl_dev_get_drvdata @@ -1660,6 +1699,7 @@ regmap_raw_read regmap_raw_write regmap_read + regmap_test_bits regmap_update_bits_base regmap_write regulator_count_voltages @@ -1668,6 +1708,7 @@ regulator_enable regulator_enable_regmap regulator_get + regulator_get_bypass_regmap regulator_get_current_limit_regmap regulator_get_mode regulator_get_optional @@ -1685,6 +1726,7 @@ regulator_notifier_call_chain regulator_put regulator_set_active_discharge_regmap + regulator_set_bypass_regmap regulator_set_current_limit regulator_set_current_limit_regmap regulator_set_load @@ -2091,6 +2133,8 @@ timer_unstable_counter_workaround topology_set_thermal_pressure _totalram_pages + touchscreen_parse_properties + touchscreen_report_pos __trace_bprintk __trace_bputs trace_event_buffer_commit @@ -2153,6 +2197,7 @@ __traceiter_android_vh_rwsem_init __traceiter_android_vh_rwsem_wake __traceiter_android_vh_rwsem_write_finished + __traceiter_android_vh_sched_pelt_multiplier __traceiter_android_vh_scheduler_tick __traceiter_android_vh_selinux_avc_insert __traceiter_android_vh_selinux_avc_lookup @@ -2237,6 +2282,7 @@ __tracepoint_android_vh_rwsem_init __tracepoint_android_vh_rwsem_wake __tracepoint_android_vh_rwsem_write_finished + __tracepoint_android_vh_sched_pelt_multiplier __tracepoint_android_vh_scheduler_tick __tracepoint_android_vh_selinux_avc_insert __tracepoint_android_vh_selinux_avc_lookup @@ -2790,10 +2836,13 @@ fwnode_graph_parse_endpoint fwnode_property_get_reference_args fwnode_property_read_u64_array + gen_pool_avail gen_pool_dma_alloc_align gen_pool_has_addr + gen_pool_size getboottime64 get_governor_parent_kobj + get_pelt_halflife get_task_exe_file get_vaddr_frames get_zeroed_page @@ -2930,6 +2979,7 @@ platform_find_device_by_driver pm_wq power_supply_is_system_supplied + power_supply_register_no_ws power_supply_unreg_notifier prepare_to_wait printk_deferred @@ -3070,6 +3120,7 @@ __traceiter_android_vh_rwsem_init __traceiter_android_vh_rwsem_wake __traceiter_android_vh_rwsem_write_finished + __traceiter_android_vh_sched_pelt_multiplier __traceiter_android_vh_scmi_timeout_sync __traceiter_android_vh_show_resume_epoch_val __traceiter_android_vh_show_suspend_epoch_val @@ -3123,6 +3174,7 @@ __tracepoint_android_vh_rwsem_init __tracepoint_android_vh_rwsem_wake __tracepoint_android_vh_rwsem_write_finished + __tracepoint_android_vh_sched_pelt_multiplier __tracepoint_android_vh_scmi_timeout_sync __tracepoint_android_vh_show_resume_epoch_val __tracepoint_android_vh_show_suspend_epoch_val @@ -3171,6 +3223,7 @@ update_devfreq usb_add_phy_dev usb_assign_descriptors + usb_clear_halt usb_copy_descriptors usb_ep_alloc_request usb_ep_autoconfig @@ -3193,6 +3246,8 @@ usb_otg_state_string usb_phy_set_charger_current usb_remove_phy + usb_role_switch_set_role + usb_unlink_urb v4l2_async_notifier_add_subdev v4l2_async_notifier_cleanup v4l2_async_subdev_notifier_register diff --git a/android/abi_gki_aarch64_oplus b/android/abi_gki_aarch64_oplus index 5b208407c2a5..d1c65a2281cb 100644 --- a/android/abi_gki_aarch64_oplus +++ b/android/abi_gki_aarch64_oplus @@ -5,6 +5,7 @@ add_device_randomness add_memory add_memory_subsection + address_space_init_once add_swap_extent add_taint add_timer @@ -64,6 +65,8 @@ bio_add_pc_page bio_alloc_bioset bio_associate_blkg_from_css + __bio_crypt_clone + bio_crypt_set_ctx bio_endio bio_put bio_reset @@ -89,6 +92,7 @@ blk_execute_rq_nowait blk_get_request blk_mq_free_request + blk_mq_queue_inflight blk_mq_rq_cpu blk_mq_sched_mark_restart_hctx blk_mq_start_request @@ -408,6 +412,7 @@ dev_fwnode __dev_get_by_index dev_get_by_index + dev_get_by_index_rcu dev_get_by_name dev_get_regmap dev_get_stats @@ -681,6 +686,7 @@ dma_unmap_sg_attrs do_exit do_wait_intr_irq + do_traversal_all_lruvec down down_interruptible down_read @@ -1657,7 +1663,9 @@ net_ratelimit nf_ct_attach nf_ct_delete + nf_register_net_hook nf_register_net_hooks + nf_unregister_net_hook nf_unregister_net_hooks nla_find nla_memcpy @@ -1802,10 +1810,12 @@ page_endio __page_file_index __page_file_mapping + __page_mapcount page_get_link page_mapping __page_pinner_migration_failed page_symlink + page_to_lruvec panic panic_notifier_list panic_timeout @@ -2000,6 +2010,7 @@ preempt_schedule_notrace prepare_to_wait prepare_to_wait_event + prepare_to_wait_exclusive print_hex_dump printk printk_deferred @@ -2041,11 +2052,14 @@ qcom_smem_state_update_bits queue_delayed_work_on queue_work_on + radix_tree_delete_item radix_tree_gang_lookup radix_tree_insert radix_tree_iter_delete radix_tree_lookup + radix_tree_lookup_slot radix_tree_next_chunk + radix_tree_replace_slot radix_tree_preload ___ratelimit rational_best_approximation @@ -2097,6 +2111,7 @@ rdev_get_regmap read_cache_page_gfp reboot_mode + reclaim_pages refcount_dec_and_lock refcount_dec_not_one refcount_warn_saturate @@ -2282,6 +2297,8 @@ rtc_update_irq rtc_valid_tm rtnl_is_locked + __rtnl_link_register + __rtnl_link_unregister rtnl_lock rtnl_unlock runqueues @@ -2680,6 +2697,7 @@ __traceiter_android_rvh_build_perf_domains __traceiter_android_rvh_can_migrate_task __traceiter_android_rvh_check_preempt_wakeup + __traceiter_android_rvh_check_preempt_tick __traceiter_android_rvh_cpu_cgroup_attach __traceiter_android_rvh_cpu_cgroup_online __traceiter_android_rvh_cpu_overutilized @@ -2779,6 +2797,8 @@ __traceiter_android_vh_cpu_idle_enter __traceiter_android_vh_cpu_idle_exit __traceiter_android_vh_cpu_up + __traceiter_android_vh_check_page_look_around_ref + __traceiter_android_vh_do_futex __traceiter_android_vh_do_send_sig_info __traceiter_android_vh_drain_all_pages_bypass __traceiter_android_vh_em_cpu_energy @@ -2793,8 +2813,14 @@ __traceiter_android_vh_ftrace_oops_exit __traceiter_android_vh_ftrace_size_check __traceiter_android_vh_futex_sleep_start + __traceiter_android_vh_futex_wait_end + __traceiter_android_vh_futex_wait_start + __traceiter_android_vh_futex_wake_this + __traceiter_android_vh_futex_wake_traverse_plist + __traceiter_android_vh_futex_wake_up_q_finish __traceiter_android_vh_get_from_fragment_pool __traceiter_android_vh_gpio_block_read + __traceiter_android_vh_handle_failed_page_trylock __traceiter_android_vh_include_reserved_zone __traceiter_android_vh_iommu_alloc_iova __traceiter_android_vh_iommu_free_iova @@ -2802,32 +2828,58 @@ __traceiter_android_vh_ipi_stop __traceiter_android_vh_ipv6_gen_linklocal_addr __traceiter_android_vh_jiffies_update + __traceiter_android_vh_killed_process __traceiter_android_vh_kmalloc_slab __traceiter_android_vh_logbuf + __traceiter_android_vh_look_around + __traceiter_android_vh_look_around_migrate_page __traceiter_android_vh_mem_cgroup_alloc __traceiter_android_vh_mem_cgroup_css_offline __traceiter_android_vh_mem_cgroup_css_online __traceiter_android_vh_mem_cgroup_free __traceiter_android_vh_mem_cgroup_id_remove __traceiter_android_vh_meminfo_proc_show + __traceiter_android_vh_alloc_pages_slowpath_begin + __traceiter_android_vh_alloc_pages_slowpath_end __traceiter_android_vh_mutex_unlock_slowpath + __traceiter_android_vh_mutex_unlock_slowpath_end __traceiter_android_vh_mutex_wait_finish __traceiter_android_vh_mutex_wait_start __traceiter_android_vh_override_creds __traceiter_android_vh_page_referenced_check_bypass + __traceiter_android_vh_page_should_be_protected + __traceiter_android_vh_page_trylock_set + __traceiter_android_vh_page_trylock_clear + __traceiter_android_vh_page_trylock_get_result + __traceiter_android_vh_mark_page_accessed + __traceiter_android_vh_show_mapcount_pages + __traceiter_android_vh_do_traversal_lruvec + __traceiter_android_vh_do_page_trylock + __traceiter_android_vh_update_page_mapcount + __traceiter_android_vh_add_page_to_lrulist + __traceiter_android_vh_del_page_from_lrulist __traceiter_android_vh_pcplist_add_cma_pages_bypass __traceiter_android_vh_prepare_update_load_avg_se __traceiter_android_vh_printk_hotplug __traceiter_android_vh_process_killed - __traceiter_android_vh_killed_process __traceiter_android_vh_revert_creds + __traceiter_android_vh_record_mutex_lock_starttime + __traceiter_android_vh_record_rtmutex_lock_starttime + __traceiter_android_vh_record_rwsem_lock_starttime + __traceiter_android_vh_record_pcpu_rwsem_starttime __traceiter_android_vh_rmqueue __traceiter_android_vh_rwsem_init + __traceiter_android_vh_rwsem_mark_wake_readers + __traceiter_android_vh_rwsem_set_owner + __traceiter_android_vh_rwsem_set_reader_owned + __traceiter_android_vh_rwsem_up_read_end + __traceiter_android_vh_rwsem_up_write_end __traceiter_android_vh_rwsem_wake __traceiter_android_vh_rwsem_wake_finish __traceiter_android_vh_rwsem_write_finished __traceiter_android_vh_save_track_hash __traceiter_android_vh_save_vmalloc_stack + __traceiter_android_vh_remove_vmalloc_stack __traceiter_android_vh_sched_stat_runtime_rt __traceiter_android_vh_scheduler_tick __traceiter_android_vh_selinux_avc_insert @@ -2882,12 +2934,14 @@ __traceiter_suspend_resume __traceiter_task_newtask __traceiter_task_rename + __traceiter_android_vh_test_clear_look_around_ref __traceiter_xhci_urb_giveback __tracepoint_android_rvh_account_irq __tracepoint_android_rvh_after_enqueue_task __tracepoint_android_rvh_build_perf_domains __tracepoint_android_rvh_can_migrate_task __tracepoint_android_rvh_check_preempt_wakeup + __tracepoint_android_rvh_check_preempt_tick __tracepoint_android_rvh_cpu_cgroup_attach __tracepoint_android_rvh_cpu_cgroup_online __tracepoint_android_rvh_cpu_overutilized @@ -2987,6 +3041,8 @@ __tracepoint_android_vh_cpu_idle_enter __tracepoint_android_vh_cpu_idle_exit __tracepoint_android_vh_cpu_up + __tracepoint_android_vh_check_page_look_around_ref + __tracepoint_android_vh_do_futex __tracepoint_android_vh_do_send_sig_info __tracepoint_android_vh_drain_all_pages_bypass __tracepoint_android_vh_em_cpu_energy @@ -3001,8 +3057,14 @@ __tracepoint_android_vh_ftrace_oops_exit __tracepoint_android_vh_ftrace_size_check __tracepoint_android_vh_futex_sleep_start + __tracepoint_android_vh_futex_wait_end + __tracepoint_android_vh_futex_wait_start + __tracepoint_android_vh_futex_wake_this + __tracepoint_android_vh_futex_wake_traverse_plist + __tracepoint_android_vh_futex_wake_up_q_finish __tracepoint_android_vh_get_from_fragment_pool __tracepoint_android_vh_gpio_block_read + __tracepoint_android_vh_handle_failed_page_trylock __tracepoint_android_vh_include_reserved_zone __tracepoint_android_vh_iommu_alloc_iova __tracepoint_android_vh_iommu_free_iova @@ -3010,32 +3072,58 @@ __tracepoint_android_vh_ipi_stop __tracepoint_android_vh_ipv6_gen_linklocal_addr __tracepoint_android_vh_jiffies_update + __tracepoint_android_vh_killed_process __tracepoint_android_vh_kmalloc_slab __tracepoint_android_vh_logbuf + __tracepoint_android_vh_look_around + __tracepoint_android_vh_look_around_migrate_page __tracepoint_android_vh_mem_cgroup_alloc __tracepoint_android_vh_mem_cgroup_css_offline __tracepoint_android_vh_mem_cgroup_css_online __tracepoint_android_vh_mem_cgroup_free __tracepoint_android_vh_mem_cgroup_id_remove __tracepoint_android_vh_meminfo_proc_show + __tracepoint_android_vh_alloc_pages_slowpath_begin + __tracepoint_android_vh_alloc_pages_slowpath_end __tracepoint_android_vh_mutex_unlock_slowpath + __tracepoint_android_vh_mutex_unlock_slowpath_end __tracepoint_android_vh_mutex_wait_finish __tracepoint_android_vh_mutex_wait_start __tracepoint_android_vh_override_creds __tracepoint_android_vh_page_referenced_check_bypass + __tracepoint_android_vh_page_should_be_protected + __tracepoint_android_vh_page_trylock_set + __tracepoint_android_vh_page_trylock_clear + __tracepoint_android_vh_page_trylock_get_result + __tracepoint_android_vh_mark_page_accessed + __tracepoint_android_vh_show_mapcount_pages + __tracepoint_android_vh_do_traversal_lruvec + __tracepoint_android_vh_do_page_trylock + __tracepoint_android_vh_update_page_mapcount + __tracepoint_android_vh_add_page_to_lrulist + __tracepoint_android_vh_del_page_from_lrulist __tracepoint_android_vh_pcplist_add_cma_pages_bypass __tracepoint_android_vh_prepare_update_load_avg_se __tracepoint_android_vh_printk_hotplug __tracepoint_android_vh_process_killed - __tracepoint_android_vh_killed_process __tracepoint_android_vh_revert_creds + __tracepoint_android_vh_record_mutex_lock_starttime + __tracepoint_android_vh_record_rtmutex_lock_starttime + __tracepoint_android_vh_record_rwsem_lock_starttime + __tracepoint_android_vh_record_pcpu_rwsem_starttime __tracepoint_android_vh_rmqueue __tracepoint_android_vh_rwsem_init + __tracepoint_android_vh_rwsem_mark_wake_readers + __tracepoint_android_vh_rwsem_set_owner + __tracepoint_android_vh_rwsem_set_reader_owned + __tracepoint_android_vh_rwsem_up_read_end + __tracepoint_android_vh_rwsem_up_write_end __tracepoint_android_vh_rwsem_wake __tracepoint_android_vh_rwsem_wake_finish __tracepoint_android_vh_rwsem_write_finished __tracepoint_android_vh_save_track_hash __tracepoint_android_vh_save_vmalloc_stack + __tracepoint_android_vh_remove_vmalloc_stack __tracepoint_android_vh_sched_stat_runtime_rt __tracepoint_android_vh_scheduler_tick __tracepoint_android_vh_selinux_avc_insert @@ -3062,6 +3150,7 @@ __tracepoint_android_vh_tune_inactive_ratio __tracepoint_android_vh_tune_scan_type __tracepoint_android_vh_tune_swappiness + __tracepoint_android_vh_test_clear_look_around_ref __tracepoint_android_vh_ufs_compl_command __tracepoint_android_vh_ufs_send_command __tracepoint_android_vh_ufs_send_tm_command @@ -3074,6 +3163,10 @@ __tracepoint_ipi_entry __tracepoint_ipi_raise __tracepoint_irq_handler_entry + __tracepoint_net_dev_queue + __tracepoint_net_dev_xmit + __tracepoint_netif_receive_skb + __tracepoint_netif_rx __tracepoint_pelt_se_tp tracepoint_probe_register tracepoint_probe_register_prio @@ -3556,3 +3649,4 @@ xhci_ring_cmd_db xhci_ring_free xhci_trb_virt_to_dma + zero_pfn diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index 34770c16c5b8..f3e65159f870 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -129,6 +129,7 @@ cgroup_path_ns cgroup_taskset_first cgroup_taskset_next + check_move_unevictable_pages __check_object_size check_preempt_curr check_zeroed_user @@ -881,6 +882,7 @@ extcon_get_edev_name extcon_get_extcon_dev extcon_get_property + extcon_get_property_capability extcon_get_state extcon_register_notifier extcon_set_state_sync @@ -1658,6 +1660,7 @@ page_endio page_mapping __page_pinner_migration_failed + __pagevec_release panic panic_notifier_list panic_timeout @@ -1965,7 +1968,6 @@ register_kretprobe register_memory_notifier register_module_notifier - register_qdisc register_netdev register_netdevice register_netdevice_notifier @@ -1973,6 +1975,7 @@ register_oom_notifier register_pernet_device register_pm_notifier + register_qdisc register_reboot_notifier register_restart_handler __register_rpmsg_driver @@ -2194,6 +2197,7 @@ sg_init_one sg_init_table sg_miter_next + sg_miter_skip sg_miter_start sg_miter_stop sg_next @@ -2233,6 +2237,7 @@ skb_complete_wifi_ack skb_copy skb_copy_bits + skb_copy_datagram_from_iter skb_copy_datagram_iter skb_copy_expand skb_dequeue @@ -2321,6 +2326,7 @@ snprintf soc_device_register soc_device_unregister + sock_alloc_send_pskb sock_alloc_send_skb __sock_create sock_create_kern @@ -2682,6 +2688,7 @@ __tracepoint_android_vh_cpu_idle_exit __tracepoint_android_vh_cpuidle_psci_enter __tracepoint_android_vh_cpuidle_psci_exit + __tracepoint_android_vh_disable_thermal_cooling_stats __tracepoint_android_vh_dump_throttled_rt_tasks __tracepoint_android_vh_freq_table_limits __tracepoint_android_vh_ftrace_dump_buffer @@ -2697,6 +2704,7 @@ __tracepoint_android_vh_jiffies_update __tracepoint_android_vh_logbuf __tracepoint_android_vh_logbuf_pr_cont + __tracepoint_android_vh_madvise_cold_or_pageout __tracepoint_android_vh_oom_check_panic __tracepoint_android_vh_printk_hotplug __tracepoint_android_vh_process_killed @@ -3020,6 +3028,7 @@ wait_for_completion_interruptible_timeout wait_for_completion_killable wait_for_completion_timeout + wait_on_page_bit __wait_rcu_gp wait_woken __wake_up diff --git a/android/abi_gki_aarch64_rockchip b/android/abi_gki_aarch64_rockchip index f048bf0063f0..aabe57eaca5f 100644 --- a/android/abi_gki_aarch64_rockchip +++ b/android/abi_gki_aarch64_rockchip @@ -6,7 +6,9 @@ alloc_chrdev_region __alloc_disk_node __alloc_pages_nodemask + __alloc_skb alloc_workqueue + anon_inode_getfd __arch_copy_from_user __arch_copy_to_user arm64_const_caps_ready @@ -15,9 +17,13 @@ atomic_notifier_call_chain atomic_notifier_chain_register atomic_notifier_chain_unregister + _bcd2bin + bcmp bdget_disk bdput - __bitmap_set + _bin2bcd + __bitmap_and + __bitmap_andnot blk_cleanup_queue blk_execute_rq_nowait blk_mq_free_request @@ -36,22 +42,60 @@ blk_queue_physical_block_size blk_rq_map_user blk_rq_unmap_user + blocking_notifier_call_chain + blocking_notifier_chain_register + blocking_notifier_chain_unregister + bpf_trace_run1 + bpf_trace_run2 + bpf_trace_run3 + bpf_trace_run4 + bpf_trace_run5 + bpf_trace_run6 + bpf_trace_run7 + bus_find_device + bus_register + bus_set_iommu + bus_unregister + cancel_delayed_work cancel_delayed_work_sync cancel_work_sync + capable + cdev_add + cdev_del cdev_device_add cdev_device_del cdev_init + cec_allocate_adapter + cec_delete_adapter + cec_fill_conn_info_from_drm + cec_notifier_cec_adap_register + cec_notifier_cec_adap_unregister + cec_notifier_conn_register + cec_notifier_conn_unregister + cec_notifier_set_phys_addr + cec_notifier_set_phys_addr_from_edid + cec_queue_pin_hpd_event + cec_received_msg_ts + cec_register_adapter + cec_transmit_attempt_done_ts + cec_unregister_adapter __cfi_slowpath __check_object_size __class_create + class_create_file_ns class_destroy class_for_each_device + __class_register + class_remove_file_ns + class_unregister + __ClearPageMovable clk_bulk_disable clk_bulk_enable clk_bulk_prepare clk_bulk_unprepare clk_disable clk_enable + clk_gate_ops clk_get __clk_get_name clk_get_parent @@ -59,6 +103,7 @@ clk_hw_get_flags clk_hw_get_name clk_hw_get_parent + clk_hw_get_parent_by_index clk_hw_get_rate __clk_mux_determine_rate clk_notifier_register @@ -67,6 +112,7 @@ clk_put clk_register clk_round_rate + clk_set_parent clk_set_phase clk_set_rate clk_unprepare @@ -77,6 +123,9 @@ complete completion_done __const_udelay + consume_skb + cpu_bit_bitmap + cpufreq_cpu_get __cpufreq_driver_target cpufreq_generic_suspend cpufreq_register_governor @@ -87,12 +136,26 @@ __cpuhp_setup_state cpu_hwcap_keys cpu_hwcaps + cpu_latency_qos_add_request + cpu_latency_qos_update_request cpumask_next cpu_number __cpu_online_mask __cpu_possible_mask cpus_read_lock cpus_read_unlock + crc16 + crc32_le + crypto_aead_decrypt + crypto_aead_encrypt + crypto_aead_setauthsize + crypto_aead_setkey + crypto_ahash_setkey + crypto_alloc_aead + crypto_alloc_ahash + crypto_alloc_shash + crypto_alloc_skcipher + crypto_cipher_encrypt_one crypto_destroy_tfm crypto_inc __crypto_memneq @@ -100,39 +163,71 @@ crypto_register_alg crypto_register_scomp crypto_register_shash + crypto_register_template + crypto_shash_update + crypto_skcipher_decrypt + crypto_skcipher_encrypt + crypto_skcipher_setkey crypto_unregister_aead crypto_unregister_alg crypto_unregister_scomp crypto_unregister_shash + crypto_unregister_template __crypto_xor + _ctype + debugfs_attr_read + debugfs_attr_write debugfs_create_dir debugfs_create_file debugfs_create_regset32 debugfs_remove + debugfs_rename + default_llseek delayed_work_timer_fn del_gendisk del_timer del_timer_sync - desc_to_gpio destroy_workqueue + dev_close dev_driver_string _dev_err dev_err_probe + devfreq_add_governor devfreq_recommended_opp + devfreq_register_opp_notifier + devfreq_remove_governor + devfreq_resume_device + devfreq_suspend_device + devfreq_unregister_opp_notifier dev_fwnode + dev_get_regmap + device_add device_add_disk device_create + device_create_file + device_del device_destroy device_get_child_node_count + device_get_match_data + device_get_named_child_node device_get_next_child_node device_initialize device_init_wakeup device_link_add device_link_del + device_match_name device_property_present device_property_read_string device_property_read_u32_array + device_property_read_u8_array + device_release_driver + device_remove_file + device_set_wakeup_capable + device_set_wakeup_enable + device_unregister + device_wakeup_enable _dev_info + __dev_kfree_skb_any devm_add_action devm_clk_bulk_get devm_clk_bulk_get_all @@ -143,6 +238,7 @@ devm_devfreq_add_device devm_devfreq_event_add_edev devm_devfreq_register_opp_notifier + devm_device_add_group devm_extcon_dev_allocate devm_extcon_dev_register devm_free_irq @@ -154,9 +250,14 @@ devm_gpiod_get_index_optional devm_gpiod_get_optional devm_gpio_request + devm_gpio_request_one + devm_iio_channel_get + devm_iio_device_alloc + __devm_iio_device_register devm_input_allocate_device devm_ioremap devm_ioremap_resource + devm_kasprintf devm_kfree devm_kmalloc devm_kmemdup @@ -165,11 +266,14 @@ devm_mfd_add_devices devm_nvmem_register devm_of_clk_add_hw_provider + devm_of_phy_get __devm_of_phy_provider_register devm_phy_create devm_phy_get + devm_phy_optional_get devm_pinctrl_get devm_pinctrl_register + devm_pinctrl_register_and_init devm_platform_get_and_ioremap_resource devm_platform_ioremap_resource devm_platform_ioremap_resource_byname @@ -183,42 +287,76 @@ devm_regulator_get devm_regulator_get_optional devm_regulator_register + devm_remove_action devm_request_threaded_irq devm_reset_control_array_get __devm_reset_control_get devm_snd_dmaengine_pcm_register + devm_snd_soc_register_card devm_snd_soc_register_component + devm_usb_get_phy + _dev_notice + dev_open + dev_pm_domain_detach dev_pm_opp_find_freq_ceil + dev_pm_opp_find_freq_floor dev_pm_opp_get_opp_count + dev_pm_opp_get_opp_table dev_pm_opp_get_voltage dev_pm_opp_of_get_sharing_cpus + dev_pm_opp_of_remove_table dev_pm_opp_put - dev_pm_opp_remove + dev_pm_opp_put_opp_table + dev_pm_opp_put_regulators + dev_pm_opp_register_set_opp_helper + dev_pm_opp_set_rate + dev_pm_opp_set_regulators + dev_pm_opp_set_supported_hw + dev_pm_opp_unregister_set_opp_helper + dev_printk devres_add devres_alloc_node devres_free dev_set_name _dev_warn disable_irq + disable_irq_nosync dma_alloc_attrs dma_buf_attach + dma_buf_begin_cpu_access dma_buf_detach + dma_buf_end_cpu_access dma_buf_export dma_buf_fd + dma_buf_get dma_buf_map_attachment + dma_buf_mmap dma_buf_put dma_buf_unmap_attachment dma_buf_vmap dma_buf_vunmap + dma_contiguous_default_area + dma_fence_add_callback + dma_fence_context_alloc + dma_fence_get_status + dma_fence_init + dma_fence_release + dma_fence_signal dma_free_attrs dma_get_sgtable_attrs dma_heap_add + dma_heap_get_dev dma_heap_get_name dmam_alloc_attrs dma_map_page_attrs + dma_map_resource dma_map_sg_attrs dmam_free_coherent dma_mmap_attrs + dma_pool_alloc + dma_pool_create + dma_pool_destroy + dma_pool_free dma_release_channel dma_request_chan dma_set_coherent_mask @@ -228,27 +366,40 @@ dma_sync_single_for_cpu dma_sync_single_for_device dma_unmap_page_attrs + dma_unmap_resource dma_unmap_sg_attrs + down down_read down_write + driver_register driver_unregister drm_add_edid_modes + drm_add_modes_noedid drm_atomic_get_crtc_state drm_atomic_get_new_connector_for_encoder drm_atomic_helper_bridge_destroy_state drm_atomic_helper_bridge_duplicate_state + drm_atomic_helper_bridge_propagate_bus_fmt drm_atomic_helper_bridge_reset drm_atomic_helper_connector_destroy_state drm_atomic_helper_connector_duplicate_state drm_atomic_helper_connector_reset drm_bridge_add drm_bridge_attach + drm_bridge_detect + drm_bridge_get_modes + drm_bridge_hpd_notify drm_bridge_remove drm_compat_ioctl drm_connector_attach_encoder drm_connector_cleanup + drm_connector_has_possible_encoder drm_connector_init drm_connector_init_with_ddc + drm_connector_list_iter_begin + drm_connector_list_iter_end + drm_connector_list_iter_next + drm_connector_unregister drm_connector_update_edid_property __drm_dbg drm_detect_hdmi_monitor @@ -260,9 +411,24 @@ drm_dev_register drm_dev_unregister drm_display_info_set_bus_formats + drm_display_mode_from_videomode + drm_display_mode_to_videomode + drm_dp_aux_register + drm_dp_aux_unregister drm_dp_bw_code_to_link_rate + drm_dp_channel_eq_ok + drm_dp_dpcd_read + drm_dp_dpcd_read_link_status + drm_dp_dpcd_write + drm_dp_get_phy_test_pattern + drm_dp_link_rate_to_bw_code + drm_dp_link_train_channel_eq_delay + drm_dp_link_train_clock_recovery_delay + drm_dp_read_dpcd_caps + drm_dp_set_phy_test_pattern __drm_err drm_gem_dumb_map_offset + drm_gem_get_pages drm_gem_handle_create drm_gem_mmap drm_gem_mmap_obj @@ -272,59 +438,103 @@ drm_gem_object_release drm_gem_prime_fd_to_handle drm_gem_prime_handle_to_fd + drm_gem_put_pages drm_gem_vm_close drm_get_edid drm_hdmi_avi_infoframe_from_display_mode + drm_hdmi_avi_infoframe_quant_range + drm_hdmi_infoframe_set_hdr_metadata drm_hdmi_vendor_infoframe_from_display_mode drm_helper_hpd_irq_event drm_helper_probe_single_connector_modes drm_ioctl + drm_kms_helper_hotplug_event + drm_kms_helper_is_poll_worker drm_match_cea_mode drm_mode_copy drm_mode_create drm_mode_duplicate + drm_mode_is_420_also + drm_mode_is_420_only drm_mode_probed_add + drm_modeset_acquire_fini + drm_modeset_acquire_init + drm_modeset_backoff + drm_modeset_drop_locks + drm_modeset_lock drm_mode_set_name + drm_mode_validate_driver drm_mode_vrefresh drm_object_attach_property drm_of_find_panel_or_bridge drm_open + drm_panel_add drm_panel_disable drm_panel_enable drm_panel_get_modes + drm_panel_init drm_panel_prepare drm_panel_unprepare drm_poll - drm_prime_gem_destroy drm_prime_pages_to_sg drm_prime_sg_to_page_addr_arrays + drm_property_replace_global_blob drm_read drm_release - dummy_irq_chip + drm_scdc_read + drm_scdc_set_high_tmds_clock_ratio + drm_scdc_set_scrambling + drm_scdc_write enable_irq + eth_mac_addr + eth_platform_get_mac_address + ethtool_op_get_link + ethtool_op_get_ts_info + eth_type_trans + eth_validate_addr + event_triggers_call extcon_get_edev_by_phandle extcon_get_state extcon_register_notifier + extcon_set_property_capability + extcon_set_state extcon_set_state_sync extcon_unregister_notifier failure_tracking + fasync_helper + fd_install find_next_bit find_next_zero_bit + find_vma finish_wait + flush_dcache_page flush_delayed_work flush_work flush_workqueue fpsimd_context_busy - frame_vector_to_pages + fput free_irq + free_netdev __free_pages free_pages + free_percpu + freq_qos_add_request + freq_qos_remove_request + freq_qos_update_request + fwnode_get_name fwnode_handle_put fwnode_property_present fwnode_property_read_string fwnode_property_read_u32_array gcd + generic_file_llseek generic_handle_irq + generic_mii_ioctl + __genphy_config_aneg + genphy_read_status + genphy_resume + genphy_soft_reset + genphy_suspend gen_pool_add_owner gen_pool_alloc_algo_owner gen_pool_create @@ -332,6 +542,11 @@ gen_pool_free_owner get_cpu_device get_device + __get_free_pages + get_random_bytes + get_unused_fd_flags + get_user_pages_remote + get_zeroed_page gic_nonsecure_priorities gpiochip_add_pin_range gpiochip_generic_free @@ -340,17 +555,27 @@ gpiod_cansleep gpiod_direction_input gpiod_direction_output + gpiod_direction_output_raw gpiod_get_optional gpiod_get_raw_value gpiod_get_value gpiod_get_value_cansleep gpiod_set_consumer_name + gpiod_set_raw_value + gpiod_set_raw_value_cansleep gpiod_set_value gpiod_set_value_cansleep gpiod_to_irq + gpio_free + gpio_request gpio_to_desc handle_nested_irq handle_simple_irq + hdmi_audio_infoframe_init + hdmi_audio_infoframe_pack + hdmi_drm_infoframe_pack + hdmi_infoframe_pack + hdmi_vendor_infoframe_pack hid_debug hid_hw_close hid_hw_open @@ -361,37 +586,77 @@ __hid_request hid_unregister_driver hid_validate_values + hrtimer_cancel + hrtimer_forward + hrtimer_init + hrtimer_start_range_ns i2c_adapter_type i2c_add_adapter + i2c_add_numbered_adapter i2c_del_adapter i2c_del_driver i2c_get_adapter i2c_put_adapter i2c_register_driver + i2c_smbus_read_byte i2c_smbus_read_byte_data + i2c_smbus_read_i2c_block_data + i2c_smbus_read_word_data + i2c_smbus_write_byte_data + i2c_smbus_write_i2c_block_data __i2c_smbus_xfer i2c_smbus_xfer i2c_transfer i2c_transfer_buffer_flags + ida_alloc_range + ida_destroy + ida_free idr_alloc idr_destroy idr_find + idr_for_each + idr_get_next idr_preload idr_remove iio_buffer_init iio_buffer_put + iio_device_attach_buffer + iio_push_to_buffers + iio_read_channel_processed + init_net __init_rwsem __init_swait_queue_head init_timer_key init_wait_entry __init_waitqueue_head + input_allocate_device input_event input_ff_create_memless + input_free_device + input_mt_init_slots + input_mt_report_slot_state input_register_device input_set_abs_params + input_set_capability + input_unregister_device iommu_attach_device + iommu_attach_group iommu_detach_device + iommu_detach_group + iommu_device_register + iommu_device_sysfs_add + iommu_device_sysfs_remove + iommu_get_dma_cookie iommu_get_domain_for_dev + iommu_group_alloc + iommu_group_get + iommu_group_put + iommu_group_ref_get + iommu_map + iommu_map_sg + iommu_put_dma_cookie + iommu_set_fault_handler + iommu_unmap __ioremap iounmap irq_create_mapping_affinity @@ -400,12 +665,14 @@ irq_find_mapping irq_get_irq_data irq_modify_status + irq_set_affinity_hint irq_set_chained_handler_and_data irq_set_chip irq_set_chip_and_handler_name irq_set_chip_data irq_set_irq_type irq_set_irq_wake + irq_to_desc is_vmalloc_addr jiffies jiffies_to_msecs @@ -419,22 +686,42 @@ __kfifo_out kfree kfree_const + kfree_sensitive + kfree_skb + kill_fasync + kimage_voffset __kmalloc kmalloc_caches + kmalloc_order_trace kmem_cache_alloc kmem_cache_alloc_trace kmem_cache_create kmem_cache_destroy kmem_cache_free kmemdup + kobject_create_and_add + kobject_init_and_add + kobject_put + kobject_uevent_env + kstrdup kstrdup_const kstrtoint kstrtouint + kstrtouint_from_user kstrtoull + kthread_create_on_node + kthread_create_worker + kthread_destroy_worker + kthread_flush_worker + kthread_queue_work + kthread_should_stop + kthread_stop ktime_get ktime_get_mono_fast_ns + ktime_get_real_ts64 ktime_get_with_offset kvfree + kvfree_call_rcu kvmalloc_node led_classdev_register_ext led_classdev_unregister @@ -443,10 +730,17 @@ led_trigger_unregister __list_add_valid __list_del_entry_valid + __local_bh_enable_ip + __lock_page __log_post_read_mmio __log_read_mmio __log_write_mmio lzo1x_decompress_safe + mdiobus_alloc_size + mdiobus_free + mdiobus_read + mdiobus_unregister + mdiobus_write media_create_pad_link media_device_init __media_device_register @@ -461,24 +755,63 @@ media_pipeline_stop memcpy memdup_user + memmove memset memstart_addr + mfd_add_devices mfd_remove_devices + mii_check_media + mii_ethtool_gset + mii_nway_restart + mipi_dsi_attach + mipi_dsi_create_packet + mipi_dsi_detach + mipi_dsi_device_register_full + mipi_dsi_device_unregister + mipi_dsi_host_register + mipi_dsi_host_unregister + misc_deregister + misc_register + mmc_cqe_request_done mmc_of_parse + mmc_request_done + __mmdrop mod_delayed_work_on mod_timer + __module_get module_layout module_put __msecs_to_jiffies msleep + msleep_interruptible __mutex_init mutex_is_locked mutex_lock + mutex_lock_interruptible + mutex_trylock mutex_unlock + napi_gro_receive + __netdev_alloc_skb + netdev_err + netdev_info + netdev_update_features + netdev_warn + netif_carrier_off + netif_carrier_on + netif_rx + netif_rx_ni + netif_tx_wake_queue + netlink_unicast + nla_memcpy + nla_put + nla_reserve no_llseek + nonseekable_open noop_llseek nr_cpu_ids + ns_to_kernel_old_timeval ns_to_timespec64 + nvmem_cell_get nvmem_cell_put nvmem_cell_read of_address_to_resource @@ -487,6 +820,8 @@ of_clk_del_provider of_clk_get of_clk_get_by_name + of_clk_get_parent_count + of_clk_set_defaults of_clk_src_onecell_get of_clk_src_simple_get of_count_phandle_with_args @@ -494,17 +829,30 @@ of_device_get_match_data of_device_is_available of_device_is_compatible + of_drm_find_bridge + of_drm_find_panel + of_find_compatible_node of_find_device_by_node + of_find_i2c_device_by_node + of_find_matching_node_and_match + of_find_mipi_dsi_host_by_node of_find_node_by_name + of_find_node_opts_by_path of_find_property of_get_child_by_name + of_get_compatible_child + of_get_drm_display_mode + of_get_i2c_adapter_by_node of_get_named_gpio_flags of_get_next_available_child of_get_next_child of_get_parent of_get_property of_get_regulator_init_data + of_graph_get_endpoint_by_regs + of_graph_get_next_endpoint of_graph_get_remote_node + of_graph_get_remote_port_parent of_graph_parse_endpoint of_iomap of_irq_get_byname @@ -514,33 +862,55 @@ of_node_name_eq of_nvmem_cell_get of_parse_phandle + of_parse_phandle_with_args of_phy_simple_xlate of_property_count_elems_of_size of_property_match_string of_property_read_string of_property_read_string_helper of_property_read_u32_index + of_property_read_u64 of_property_read_variable_u32_array + of_property_read_variable_u8_array of_regulator_match of_reserved_mem_device_init_by_idx + __of_reset_control_get of_usb_get_dr_mode_by_phy __page_pinner_migration_failed panic_notifier_list + param_array_ops param_ops_bool + param_ops_byte + param_ops_charp param_ops_int param_ops_string param_ops_uint + pcie_capability_clear_and_set_word + pci_read_config_dword + pci_write_config_dword PDE_DATA __per_cpu_offset + perf_trace_buf_alloc + perf_trace_run_bpf_submit pfn_valid + phy_attached_info phy_configure + phy_drivers_register + phy_drivers_unregister phy_exit phy_init phy_power_off phy_power_on phy_set_mode_ext + pinconf_generic_dt_free_map pinconf_generic_dt_node_to_map pinctrl_dev_get_drvdata + pinctrl_enable + pinctrl_generic_add_group + pinctrl_generic_get_group + pinctrl_generic_get_group_count + pinctrl_generic_get_group_name + pinctrl_generic_get_group_pins pinctrl_gpio_direction_input pinctrl_gpio_direction_output pinctrl_lookup_state @@ -548,6 +918,11 @@ pinctrl_pm_select_sleep_state pinctrl_select_state pinctrl_utils_free_map + pinmux_generic_add_function + pinmux_generic_get_function + pinmux_generic_get_function_count + pinmux_generic_get_function_groups + pinmux_generic_get_function_name platform_bus_type platform_device_put platform_device_register_full @@ -557,10 +932,15 @@ platform_driver_unregister platform_get_irq platform_get_irq_byname + platform_get_irq_optional platform_get_resource platform_get_resource_byname + platform_irq_count + pm_clk_create + pm_clk_destroy pm_power_off __pm_relax + pm_relax __pm_runtime_disable pm_runtime_enable pm_runtime_force_resume @@ -573,20 +953,37 @@ __pm_runtime_suspend __pm_runtime_use_autosuspend __pm_stay_awake + pm_stay_awake pm_wakeup_ws_event + power_supply_am_i_supplied power_supply_changed power_supply_class + power_supply_get_battery_info + power_supply_get_by_name + power_supply_get_by_phandle power_supply_get_drvdata + power_supply_get_property + power_supply_put_battery_info + power_supply_register + power_supply_reg_notifier + power_supply_unregister + prandom_bytes + prandom_u32 preempt_schedule preempt_schedule_notrace prepare_to_wait_event print_hex_dump printk + proc_create proc_create_data + proc_mkdir + pskb_expand_head + __pskb_pull_tail put_device put_disk __put_page __put_task_struct + put_unused_fd pwm_adjust_config pwm_apply_state queue_delayed_work_on @@ -597,10 +994,16 @@ _raw_spin_lock_bh _raw_spin_lock_irq _raw_spin_lock_irqsave + _raw_spin_trylock _raw_spin_unlock _raw_spin_unlock_bh _raw_spin_unlock_irq _raw_spin_unlock_irqrestore + rb_erase + rb_insert_color + rb_next + __rcu_read_lock + __rcu_read_unlock rdev_get_drvdata rdev_get_id refcount_warn_saturate @@ -609,13 +1012,21 @@ regcache_sync __register_chrdev register_chrdev_region + register_inetaddr_notifier + register_netdev + register_netdevice + register_netdevice_notifier + register_pm_notifier register_reboot_notifier + register_shrinker regmap_bulk_read regmap_bulk_write regmap_field_read regmap_field_update_bits_base regmap_irq_get_domain regmap_irq_get_virq + regmap_multi_reg_write + regmap_raw_read regmap_raw_write regmap_read regmap_update_bits_base @@ -626,6 +1037,8 @@ regulator_disable_regmap regulator_enable regulator_enable_regmap + regulator_get + regulator_get_optional regulator_get_voltage regulator_get_voltage_sel_regmap regulator_is_enabled @@ -634,19 +1047,34 @@ regulator_list_voltage_linear_range regulator_map_voltage_linear regulator_map_voltage_linear_range + regulator_put regulator_set_voltage regulator_set_voltage_sel_regmap regulator_set_voltage_time_sel + regulator_unregister + release_firmware remap_pfn_range remove_proc_entry + report_iommu_fault + request_firmware request_threaded_irq reset_control_assert reset_control_deassert revalidate_disk_size + rfkill_alloc + rfkill_destroy + rfkill_register + rfkill_set_hw_state + rfkill_unregister + round_jiffies_relative rtc_class_open rtc_read_time + rtc_time64_to_tm rtc_tm_to_time64 rtc_valid_tm + rtnl_is_locked + rtnl_lock + rtnl_unlock scatterwalk_map_and_copy sched_clock schedule @@ -659,6 +1087,7 @@ scsi_ioctl_block_when_processing_errors sdev_prefix_printk sdhci_add_host + sdhci_execute_tuning sdhci_get_property sdhci_pltfm_clk_get_max_clock sdhci_pltfm_free @@ -670,117 +1099,277 @@ sdhci_suspend_host seq_lseek seq_printf + seq_putc seq_puts seq_read set_page_dirty_lock + __SetPageMovable sg_alloc_table sg_alloc_table_from_pages sg_free_table + sg_init_one + sg_init_table + sg_nents sg_next + __sg_page_iter_next + __sg_page_iter_start + simple_attr_open + simple_attr_release + simple_read_from_buffer simple_strtol + simple_strtoul single_open single_release + skb_add_rx_frag + skb_clone + skb_copy + skb_copy_bits + skb_copy_expand + skb_pull + skb_push + skb_put + skb_trim + skcipher_alloc_instance_simple + skcipher_register_instance skcipher_walk_aead_decrypt skcipher_walk_aead_encrypt skcipher_walk_done + skcipher_walk_virt snd_pcm_format_width + snd_soc_add_component_controls + snd_soc_add_dai_controls + snd_soc_card_jack_new snd_soc_component_read + snd_soc_component_set_jack snd_soc_component_update_bits snd_soc_component_write + snd_soc_dai_set_sysclk + snd_soc_dapm_add_routes + snd_soc_dapm_disable_pin_unlocked snd_soc_dapm_force_enable_pin_unlocked snd_soc_dapm_get_enum_double + snd_soc_dapm_get_pin_switch snd_soc_dapm_get_volsw + snd_soc_dapm_info_pin_switch + snd_soc_dapm_new_controls snd_soc_dapm_put_enum_double + snd_soc_dapm_put_pin_switch snd_soc_dapm_put_volsw + snd_soc_dapm_sync_unlocked + snd_soc_get_dai_name snd_soc_get_enum_double snd_soc_get_volsw snd_soc_info_enum_double snd_soc_info_volsw snd_soc_jack_add_gpios snd_soc_jack_report + snd_soc_of_parse_audio_routing + snd_soc_of_parse_card_name + snd_soc_of_parse_daifmt + snd_soc_params_to_frame_size + snd_soc_pm_ops snd_soc_put_enum_double snd_soc_put_volsw + snd_soc_register_component + snd_soc_unregister_component snprintf + sort __spi_register_driver spi_sync sprintf sscanf __stack_chk_fail __stack_chk_guard + strcasecmp + strchr strcmp + strcpy strlcpy strlen + strncasecmp + strncat strncmp strncpy strnlen + strrchr strscpy + strsep strstr + __sw_hweight16 + __sw_hweight32 + __sw_hweight64 + __sw_hweight8 + sync_file_create + sync_file_get_fence synchronize_irq + synchronize_net + synchronize_rcu syscon_node_to_regmap syscon_regmap_lookup_by_phandle sysfs_create_file_ns sysfs_create_group sysfs_create_link __sysfs_match_string + sysfs_remove_file_ns sysfs_remove_group sysfs_remove_link + sysfs_streq + system_freezable_wq + system_highpri_wq + system_long_wq + system_power_efficient_wq + system_state system_unbound_wq system_wq tasklet_init tasklet_kill __tasklet_schedule + tcpm_tcpc_reset thermal_zone_get_zone_by_name + trace_event_buffer_commit + trace_event_buffer_reserve + trace_event_ignore_this_pid + trace_event_raw_init + trace_event_reg + trace_handle_return __traceiter_rwmmio_post_read __traceiter_rwmmio_read __traceiter_rwmmio_write __tracepoint_rwmmio_post_read __tracepoint_rwmmio_read __tracepoint_rwmmio_write + trace_print_array_seq + trace_print_symbols_seq + trace_raw_output_prep + trace_seq_printf + try_module_get + tty_termios_baud_rate + typec_switch_get_drvdata + typec_switch_register + typec_switch_unregister + __udelay + unlock_page __unregister_chrdev unregister_chrdev_region + unregister_inetaddr_notifier + unregister_netdev + unregister_netdevice_notifier + unregister_netdevice_queue unregister_reboot_notifier + unregister_shrinker + up update_devfreq up_read up_write + usb_add_hcd + usb_alloc_urb + usb_autopm_get_interface + usb_autopm_put_interface + usb_calc_bus_time + usb_control_msg + usb_create_hcd usb_debug_root + usb_deregister + usb_deregister_dev + usb_disabled + usb_free_urb + usb_get_dev + usb_get_intf + usb_hcd_check_unlink_urb + usb_hcd_giveback_urb + usb_hcd_link_urb_to_ep + usb_hcd_resume_root_hub + usb_hcd_unlink_urb_from_ep + usb_hid_driver + usb_match_id + usbnet_change_mtu + usbnet_defer_kevent + usbnet_disconnect + usbnet_get_drvinfo + usbnet_get_endpoints + usbnet_get_link + usbnet_get_link_ksettings + usbnet_get_msglevel + usbnet_get_stats64 + usbnet_link_change + usbnet_nway_reset + usbnet_open + usbnet_probe + usbnet_read_cmd + usbnet_read_cmd_nopm + usbnet_resume + usbnet_set_link_ksettings + usbnet_set_msglevel + usbnet_skb_return + usbnet_start_xmit + usbnet_stop + usbnet_suspend + usbnet_tx_timeout + usbnet_write_cmd + usbnet_write_cmd_async + usbnet_write_cmd_nopm + usb_poison_urb + usb_put_dev + usb_put_hcd + usb_put_intf + usb_register_dev + usb_register_driver + usb_remove_hcd + usb_set_interface + usb_submit_urb + usb_unpoison_urb + __usecs_to_jiffies usleep_range uuid_null v4l2_async_notifier_cleanup v4l2_async_notifier_init v4l2_async_notifier_register + v4l2_async_notifier_unregister v4l2_async_register_subdev v4l2_async_subdev_notifier_register v4l2_async_unregister_subdev v4l2_ctrl_find v4l2_ctrl_g_ctrl + v4l2_ctrl_g_ctrl_int64 v4l2_ctrl_handler_free v4l2_ctrl_handler_init_class + __v4l2_ctrl_handler_setup v4l2_ctrl_handler_setup + __v4l2_ctrl_modify_range + v4l2_ctrl_new_custom + v4l2_ctrl_new_int_menu v4l2_ctrl_new_std + v4l2_ctrl_new_std_menu v4l2_ctrl_new_std_menu_items + __v4l2_ctrl_s_ctrl + __v4l2_ctrl_s_ctrl_int64 + v4l2_ctrl_subdev_subscribe_event v4l2_device_register v4l2_device_register_subdev __v4l2_device_register_subdev_nodes v4l2_device_unregister v4l2_device_unregister_subdev + v4l2_enum_dv_timings_cap v4l2_event_queue v4l2_event_subdev_unsubscribe v4l2_event_subscribe + v4l2_event_unsubscribe v4l2_fh_open v4l2_i2c_subdev_init + v4l2_match_dv_timings v4l2_pipeline_pm_get v4l2_pipeline_pm_put + v4l2_print_dv_timings v4l2_querymenu + v4l2_src_change_event_subdev_subscribe v4l2_subdev_call_wrappers v4l2_subdev_init v4l2_subdev_link_validate + v4l2_subdev_notify_event v4l2_type_names + v4l2_valid_dv_timings vabits_actual vb2_buffer_done - vb2_common_vm_ops - vb2_create_framevec - vb2_destroy_framevec - vb2_dma_contig_memops vb2_fop_mmap vb2_fop_poll vb2_fop_release @@ -810,14 +1399,17 @@ vmap vm_get_page_prot vm_map_pages - vm_map_ram - vm_unmap_ram + vsnprintf vunmap + vzalloc wait_for_completion wait_for_completion_timeout __wake_up + wake_up_process wakeup_source_add + wakeup_source_remove __warn_printk + work_busy # required by 8250_dw.ko of_device_is_big_endian @@ -833,15 +1425,12 @@ serial8250_suspend_port serial8250_unregister_port serial8250_update_uartclk - tty_termios_baud_rate # required by act8865-regulator.ko regulator_set_pull_down_regmap # required by adc-keys.ko - devm_iio_channel_get iio_get_channel_type - iio_read_channel_processed input_set_poll_interval input_setup_polling @@ -849,20 +1438,202 @@ ce_aes_expandkey # required by analogix_dp.ko - drm_dp_aux_register - drm_dp_aux_unregister - drm_dp_dpcd_read - drm_dp_dpcd_write drm_dp_start_crc drm_dp_stop_crc +# required by aspm_ext.ko + pci_find_capability + pci_find_ext_capability + +# required by bcmdhd.ko + alloc_etherdev_mqs + complete_and_exit + down_interruptible + down_timeout + iwe_stream_add_event + iwe_stream_add_point + iwe_stream_add_value + __kfifo_init + kobject_uevent + mmc_set_data_timeout + mmc_sw_reset + mmc_wait_for_req + netif_set_xps_queue + __netlink_kernel_create + netlink_kernel_release + nla_append + nla_put_nohdr + __nlmsg_put + _raw_read_lock_bh + _raw_read_unlock_bh + sched_set_fifo_low + sdio_claim_host + sdio_disable_func + sdio_enable_func + sdio_f0_readb + sdio_f0_writeb + sdio_get_host_pm_caps + sdio_memcpy_fromio + sdio_memcpy_toio + sdio_readb + sdio_readl + sdio_readsb + sdio_readw + sdio_register_driver + sdio_release_host + sdio_retune_crc_disable + sdio_retune_crc_enable + sdio_retune_hold_now + sdio_retune_release + sdio_set_block_size + sdio_set_host_pm_flags + sdio_unregister_driver + sdio_writeb + sdio_writel + sdio_writew + set_cpus_allowed_ptr + __skb_pad + skb_realloc_headroom + sock_wfree + sprint_symbol + strcat + strspn + sys_tz + unregister_pm_notifier + wireless_send_event + +# required by bifrost_kbase.ko + __arch_clear_user + __bitmap_equal + __bitmap_or + __bitmap_weight + __bitmap_xor + cache_line_size + clear_page + complete_all + debugfs_create_bool + devfreq_add_device + devfreq_cooling_unregister + devfreq_remove_device + dev_pm_opp_find_freq_exact + dma_fence_default_wait + dma_fence_remove_callback + downgrade_write + down_read_trylock + dump_stack + find_get_pid + freezing_slow_path + get_user_pages + get_user_pages_fast + hrtimer_active + iomem_resource + kobject_del + kstrndup + kstrtobool_from_user + ktime_get_raw + ktime_get_raw_ts64 + memchr + of_dma_is_coherent + of_property_read_variable_u64_array + pid_task + pin_user_pages + pin_user_pages_remote + put_pid + rb_first + rb_prev + rb_replace_node + __refrigerator + register_oom_notifier + __release_region + remap_vmalloc_range + __request_region + seq_open + __seq_open_private + seq_release + seq_release_private + seq_write + set_freezable + shmem_file_setup + simple_open + strcspn + system_freezing_cnt + _totalram_pages + __traceiter_gpu_mem_total + trace_output_call + __tracepoint_gpu_mem_total + trace_print_flags_seq + unmap_mapping_range + unpin_user_page + unregister_oom_notifier + vmalloc_user + vmf_insert_pfn_prot + +# required by cdc-wdm.ko + cdc_parse_cdc_header + +# required by cdc_mbim.ko + cdc_ncm_bind_common + cdc_ncm_change_mtu + cdc_ncm_fill_tx_frame + cdc_ncm_rx_verify_ndp16 + cdc_ncm_rx_verify_nth16 + cdc_ncm_select_altsetting + cdc_ncm_unbind + in6_dev_finish_destroy + __ipv6_addr_type + ipv6_stub + +# required by cfg80211.ko + bpf_trace_run10 + dev_change_net_namespace + __dev_get_by_index + dev_get_by_index + device_rename + genlmsg_multicast_allns + genlmsg_put + genl_register_family + genl_unregister_family + get_net_ns_by_fd + get_net_ns_by_pid + inet_csk_get_port + init_uts_ns + key_create_or_update + key_put + keyring_alloc + ktime_get_coarse_with_offset + memcmp + netlink_broadcast + netlink_register_notifier + netlink_unregister_notifier + net_ns_type_operations + nla_find + __nla_parse + nla_put_64bit + __nla_validate + of_prop_next_u32 + __put_net + register_pernet_device + request_firmware_nowait + rfkill_blocked + rfkill_pause_polling + rfkill_resume_polling + __sock_create + sock_release + unregister_pernet_device + verify_pkcs7_signature + wireless_nlevent_flush + # required by ch.ko - param_array_ops scsi_device_lookup __scsi_execute scsi_print_sense_hdr scsi_register_driver +# required by clk-link.ko + pm_clk_add + pm_clk_resume + pm_clk_suspend + # required by clk-pwm.ko of_clk_add_hw_provider of_clk_hw_simple_get @@ -872,7 +1643,6 @@ # required by clk-rockchip-regmap.ko clk_hw_get_num_parents - clk_hw_get_parent_by_index divider_recalc_rate divider_round_rate_parent @@ -881,22 +1651,19 @@ clk_divider_ro_ops clk_fixed_factor_ops clk_fractional_divider_ops - clk_gate_ops __clk_get_hw clk_hw_register_composite clk_hw_round_rate + clk_hw_set_parent clk_mux_ops clk_mux_ro_ops - clk_register_composite clk_register_divider_table clk_register_fixed_factor clk_register_gate clk_register_mux_table - divider_get_val match_string register_restart_handler reset_controller_register - __udelay # required by clk-scmi.ko clk_hw_set_rate_range @@ -904,10 +1671,8 @@ scmi_driver_register scmi_driver_unregister -# required by cma_heap.ko - cma_get_name - dma_contiguous_default_area - dma_heap_get_drvdata +# required by cm3218.ko + i2c_smbus_write_word_data # required by cpufreq-dt.ko cpufreq_enable_boost_support @@ -925,9 +1690,6 @@ dev_pm_opp_of_cpumask_add_table dev_pm_opp_of_cpumask_remove_table dev_pm_opp_of_register_em - dev_pm_opp_put_regulators - dev_pm_opp_set_rate - dev_pm_opp_set_regulators dev_pm_opp_set_sharing_cpus policy_has_boost_freq @@ -946,73 +1708,43 @@ # required by cqhci.ko devm_blk_ksm_init - mmc_cqe_request_done -# required by cw2015_battery.ko - device_property_read_u8_array - power_supply_am_i_supplied - power_supply_get_battery_info - power_supply_put_battery_info - regmap_raw_read +# required by cryptodev.ko + crypto_ahash_final + crypto_alloc_akcipher + krealloc + proc_dointvec + register_sysctl_table + sg_last + unregister_sysctl_table -# required by dw-hdmi-cec.ko - cec_allocate_adapter - cec_delete_adapter - cec_notifier_cec_adap_register - cec_notifier_cec_adap_unregister - cec_received_msg_ts - cec_register_adapter - cec_transmit_attempt_done_ts - cec_unregister_adapter - devm_remove_action +# required by cw221x_battery.ko + power_supply_is_system_supplied + +# required by display-connector.ko + drm_atomic_get_new_bridge_state + drm_probe_ddc + +# required by dm9601.ko + mii_link_ok # required by dw-hdmi-hdcp.ko - device_create_file - device_remove_file kstrtobool - misc_deregister - misc_register sha1_init sha1_transform +# required by dw-hdmi-qp.ko + drm_mode_equal + hdmi_avi_infoframe_pack_only + # required by dw-hdmi.ko - bcmp - cancel_delayed_work - cec_fill_conn_info_from_drm - cec_notifier_conn_register - cec_notifier_conn_unregister - cec_notifier_set_phys_addr - cec_notifier_set_phys_addr_from_edid - cec_queue_pin_hpd_event - drm_bridge_hpd_notify drm_connector_attach_max_bpc_property drm_default_rgb_quant_range - drm_hdmi_avi_infoframe_quant_range - drm_hdmi_infoframe_set_hdr_metadata - drm_mode_is_420_also - drm_mode_is_420_only - drm_property_replace_global_blob - drm_scdc_read - drm_scdc_set_high_tmds_clock_ratio - drm_scdc_set_scrambling - drm_scdc_write - extcon_set_property_capability - hdmi_drm_infoframe_pack - hdmi_vendor_infoframe_pack - of_get_i2c_adapter_by_node # required by dw-mipi-dsi.ko - debugfs_attr_read - debugfs_attr_write drm_panel_bridge_add_typed drm_panel_bridge_connector drm_panel_bridge_remove - mipi_dsi_create_packet - mipi_dsi_host_register - mipi_dsi_host_unregister - of_drm_find_bridge - simple_attr_open - simple_attr_release # required by dw_mmc-rockchip.ko clk_get_phase @@ -1033,14 +1765,12 @@ mmc_regulator_set_ocr mmc_regulator_set_vqmmc mmc_remove_host - mmc_request_done sdio_signal_irq sg_miter_next sg_miter_start sg_miter_stop # required by dw_wdt.ko - platform_get_irq_optional watchdog_init_timeout watchdog_register_device watchdog_set_restart_priority @@ -1049,54 +1779,35 @@ # required by dwc2.ko __bitmap_clear bitmap_find_next_zero_area_off - device_set_wakeup_capable - device_wakeup_enable - devm_usb_get_phy - hrtimer_cancel - hrtimer_init - hrtimer_start_range_ns + __bitmap_set phy_reset - _raw_spin_trylock usb_add_gadget_udc - usb_add_hcd - usb_calc_bus_time - usb_create_hcd usb_del_gadget_udc - usb_disabled usb_ep_set_maxpacket_limit usb_gadget_giveback_request usb_gadget_map_request usb_gadget_set_state usb_gadget_unmap_request usb_get_dr_mode - usb_hcd_check_unlink_urb - usb_hcd_giveback_urb - usb_hcd_link_urb_to_ep usb_hcd_map_urb_for_dma - usb_hcd_resume_root_hub - usb_hcd_unlink_urb_from_ep usb_hcd_unmap_urb_for_dma usb_hub_clear_tt_buffer usb_phy_set_charger_current - usb_put_hcd - usb_remove_hcd usb_role_switch_get_drvdata usb_role_switch_register usb_role_switch_unregister usb_speed_string usb_wakeup_enabled_descendants -# required by fan53555.ko - gpiod_set_raw_value +# required by dwmac-rockchip.ko + csum_tcpudp_nofold + ip_send_check + of_get_phy_mode # required by fusb302.ko - device_get_named_child_node - disable_irq_nosync extcon_get_extcon_dev fwnode_create_software_node - i2c_smbus_read_i2c_block_data - i2c_smbus_write_byte_data - i2c_smbus_write_i2c_block_data + sched_set_fifo tcpm_cc_change tcpm_pd_hard_reset tcpm_pd_receive @@ -1104,7 +1815,9 @@ tcpm_register_port tcpm_unregister_port tcpm_vbus_change - vsnprintf + +# required by gc2145.ko + v4l2_ctrl_subdev_log_status # required by ghash-ce.ko aes_expandkey @@ -1125,28 +1838,15 @@ irq_gc_set_wake irq_generic_chip_ops irq_get_domain_generic_chip - irq_of_parse_and_map of_pinctrl_get -# required by grf.ko - of_find_matching_node_and_match - # required by hid-alps.ko - down input_alloc_absinfo - input_allocate_device - input_free_device - input_mt_init_slots - input_mt_report_slot_state input_mt_sync_frame - up # required by hid-holtek-kbd.ko usb_ifnum_to_if -# required by hid-ntrig.ko - usb_control_msg - # required by hid-primax.ko hid_report_raw_event @@ -1159,19 +1859,17 @@ i2c_verify_client # required by i2c-gpio.ko + desc_to_gpio i2c_bit_add_numbered_bus # required by i2c-hid.ko - dev_printk hid_add_device hid_allocate_device hid_destroy_device hid_input_report hid_parse_report - i2c_smbus_read_byte # required by i2c-mux.ko - i2c_add_numbered_adapter __i2c_transfer rt_mutex_lock rt_mutex_trylock @@ -1190,19 +1888,21 @@ # required by industrialio-triggered-buffer.ko iio_alloc_pollfunc iio_dealloc_pollfunc - iio_device_attach_buffer # required by io-domain.ko _dev_crit regulator_register_notifier regulator_unregister_notifier +# required by kalmia.ko + usb_bulk_msg + # required by kfifo_buf.ko __kfifo_to_user - mutex_lock_interruptible -# required by leds-gpio.ko - devm_gpio_request_one +# required by leds-rgb13h.ko + led_classdev_flash_register_ext + led_classdev_flash_unregister # required by ledtrig-heartbeat.ko avenrun @@ -1213,6 +1913,57 @@ # required by lzo.ko lzo1x_1_compress +# required by mac80211.ko + alloc_netdev_mqs + __alloc_percpu_gfp + arc4_crypt + arc4_setkey + call_rcu + crc32_be + crypto_shash_digest + crypto_shash_finup + crypto_shash_setkey + dev_alloc_name + dev_fetch_sw_netstats + dev_queue_xmit + ether_setup + get_random_u32 + __hw_addr_init + __hw_addr_sync + __hw_addr_unsync + kernel_param_lock + kernel_param_unlock + kfree_skb_list + ktime_get_seconds + netdev_set_default_ethtool_ops + netif_receive_skb + netif_receive_skb_list + netif_tx_stop_all_queues + net_ratelimit + ___pskb_trim + rcu_barrier + register_inet6addr_notifier + rhashtable_free_and_destroy + rhashtable_insert_slow + rhltable_init + __rht_bucket_nested + rht_bucket_nested + rht_bucket_nested_insert + round_jiffies + round_jiffies_up + skb_checksum_help + skb_clone_sk + skb_complete_wifi_ack + skb_dequeue + skb_ensure_writable + __skb_get_hash + __skb_gso_segment + skb_queue_head + skb_queue_purge + skb_queue_tail + unregister_inet6addr_notifier + unregister_netdevice_many + # required by nvme-core.ko bd_set_nr_sectors blk_execute_rq @@ -1241,45 +1992,19 @@ blk_set_queue_dying blk_status_to_errno blk_sync_queue - bpf_trace_run1 - bpf_trace_run2 - bpf_trace_run3 - capable cleanup_srcu_struct - device_add - device_del device_remove_file_self dev_pm_qos_expose_latency_tolerance dev_pm_qos_hide_latency_tolerance dev_pm_qos_update_user_latency_tolerance - event_triggers_call - ida_alloc_range - ida_destroy - ida_free init_srcu_struct - kobject_uevent_env - list_sort memchr_inv - param_ops_byte param_ops_ulong - perf_trace_buf_alloc - perf_trace_run_bpf_submit set_capacity_revalidate_and_notify set_disk_ro __srcu_read_unlock - synchronize_rcu synchronize_srcu - trace_event_buffer_commit - trace_event_buffer_reserve - trace_event_ignore_this_pid - trace_event_raw_init - trace_event_reg - trace_handle_return - trace_print_symbols_seq - trace_raw_output_prep - trace_seq_printf trace_seq_putc - try_module_get xa_destroy xa_erase xa_find @@ -1300,12 +2025,7 @@ blk_mq_update_nr_hw_queues blk_put_queue __blk_rq_map_sg - device_release_driver dma_max_mapping_size - dma_pool_alloc - dma_pool_create - dma_pool_destroy - dma_pool_free __do_once_done __do_once_start mempool_alloc @@ -1340,60 +2060,62 @@ pci_sriov_configure_simple pci_unregister_driver pm_suspend_global_flags - sg_init_table - __sw_hweight64 sysfs_remove_file_from_group wait_for_completion_io_timeout +# required by ohci-hcd.ko + gen_pool_dma_alloc_align + gen_pool_dma_zalloc_align + sb800_prefetch + schedule_timeout_uninterruptible + usb_amd_dev_put + usb_amd_quirk_pll_disable + usb_amd_quirk_pll_enable + usb_hc_died + usb_hcd_poll_rh_status + usb_hcds_loaded + usb_root_hub_lost_power + +# required by ohci-platform.ko + usb_hcd_platform_shutdown + # required by optee-rng.ko - driver_register hwrng_register hwrng_unregister # required by optee.ko alloc_pages_exact __arm_smccc_hvc + bus_for_each_dev device_register - find_vma free_pages_exact - idr_get_next - kimage_voffset - ktime_get_real_ts64 memremap memunmap - msleep_interruptible wait_for_completion_interruptible -# required by ov2680.ko - v4l2_ctrl_auto_cluster - v4l2_ctrl_new_std_menu - __v4l2_find_nearest_size - -# required by ov5695.ko - __v4l2_ctrl_modify_range - v4l2_ctrl_new_int_menu - # required by panel-simple.ko + devm_backlight_device_register drm_bus_flags_from_videomode drm_connector_set_panel_orientation - drm_display_mode_from_videomode - drm_panel_add - drm_panel_init drm_panel_of_backlight drm_panel_remove - mipi_dsi_attach + mipi_dsi_compression_mode + mipi_dsi_dcs_get_display_brightness + mipi_dsi_dcs_set_display_brightness mipi_dsi_dcs_write_buffer - mipi_dsi_detach mipi_dsi_driver_register_full mipi_dsi_driver_unregister mipi_dsi_generic_write + mipi_dsi_picture_parameter_set of_drm_get_panel_orientation of_find_i2c_adapter_by_node of_get_display_timing - of_get_drm_display_mode videomode_from_timing # required by pcie-dw-rockchip.ko + cpumask_next_and + debugfs_create_devm_seqfile + dw_pcie_find_ext_capability dw_pcie_host_init dw_pcie_link_up dw_pcie_read @@ -1401,14 +2123,13 @@ dw_pcie_setup_rc dw_pcie_write dw_pcie_write_dbi - kthread_create_on_node - of_prop_next_string - wake_up_process + pci_disable_link_state + pci_set_power_state # required by pcierockchiphost.ko - devm_of_phy_get devm_pci_alloc_host_bridge devm_pci_remap_cfg_resource + dummy_irq_chip jiffies_to_usecs of_pci_get_max_link_speed pci_host_probe @@ -1419,24 +2140,24 @@ # required by phy-rockchip-inno-dsidphy.ko phy_mipi_dphy_config_validate -# required by phy-rockchip-inno-hdmi-phy.ko - nvmem_cell_get - # required by phy-rockchip-inno-usb2.ko devm_extcon_register_notifier - extcon_set_state extcon_sync - wakeup_source_remove # required by phy-rockchip-inno-usb3.ko - strcasecmp usb_add_phy +# required by phy-rockchip-samsung-hdptx-hdmi.ko + of_platform_device_create + # required by phy-rockchip-typec.ko extcon_get_property -# required by phy-rockchip-usb.ko - __of_reset_control_get +# required by phy-rockchip-usbdp.ko + typec_mux_get_drvdata + typec_mux_register + typec_mux_unregister + usb_get_maximum_speed # required by pinctrl-rk628.ko irq_domain_xlate_twocell @@ -1445,6 +2166,7 @@ # required by pinctrl-rockchip.ko of_find_node_by_phandle + of_platform_depopulate of_platform_populate pinconf_generic_parse_dt_config pinctrl_force_default @@ -1459,8 +2181,6 @@ dma_async_tx_descriptor_init dmaengine_unmap_put dma_get_slave_channel - dma_map_resource - dma_unmap_resource loops_per_jiffy of_dma_controller_free of_dma_controller_register @@ -1469,16 +2189,27 @@ # required by pm_domains.ko clk_bulk_put - of_clk_get_parent_count of_genpd_add_provider_onecell panic + param_get_bool + param_set_bool pm_clk_add_clk - pm_clk_create - pm_clk_destroy pm_genpd_add_subdomain pm_genpd_init pm_genpd_remove - strrchr + pm_wq + +# required by pps_core.ko + kobject_get + +# required by ptp.ko + kthread_cancel_delayed_work_sync + kthread_delayed_work_timer_fn + kthread_mod_delayed_work + kthread_queue_delayed_work + ktime_get_snapshot + posix_clock_register + posix_clock_unregister # required by pwm-regulator.ko regulator_map_voltage_iterate @@ -1495,19 +2226,37 @@ pwm_free pwm_request +# required by pwrseq_simple.ko + bitmap_alloc + devm_gpiod_get_array + gpiod_set_array_value_cansleep + mmc_pwrseq_register + mmc_pwrseq_unregister + # required by reboot-mode.ko devres_release kernel_kobj +# required by rfkill-rk.ko + kstrtoll + rfkill_init_sw_state + rfkill_set_sw_state + +# required by rga3.ko + alloc_iova_fast + dma_fence_wait_timeout + free_iova_fast + idr_alloc_cyclic + kstrdup_quotable_cmdline + mmput + # required by rk628.ko irq_dispose_mapping irq_domain_xlate_onetwocell irq_set_parent - mfd_add_devices # required by rk805-pwrkey.ko devm_request_any_context_irq - input_set_capability # required by rk806-core.ko devm_regmap_add_irq_chip @@ -1519,82 +2268,118 @@ gpiod_is_active_low # required by rk808.ko - kobject_create_and_add platform_device_add platform_device_alloc pm_power_off_prepare register_syscore_ops regmap_add_irq_chip regmap_del_irq_chip - system_state unregister_syscore_ops -# required by rk818_battery.ko - blocking_notifier_call_chain - blocking_notifier_chain_register - blocking_notifier_chain_unregister +# required by rk860x-regulator.ko + regulator_suspend_enable + +# required by rk_cma_heap.ko + dma_heap_get_drvdata + dma_heap_put + +# required by rk_crypto.ko + crypto_ahash_digest + crypto_dequeue_request + crypto_enqueue_request + crypto_init_queue + crypto_register_ahash + crypto_register_akcipher + crypto_register_skcipher + crypto_req_done + crypto_unregister_ahash + crypto_unregister_akcipher + crypto_unregister_skcipher + des_expand_key + rsa_parse_priv_key + rsa_parse_pub_key + scatterwalk_ffwd + sg_copy_from_buffer + sg_copy_to_buffer + sg_nents_for_len + sg_pcopy_from_buffer + sg_pcopy_to_buffer + +# required by rk_headset_irq_hook_adc.ko + iio_read_channel_raw + +# required by rk_ircut.ko + drain_workqueue + +# required by rk_system_heap.ko + deferred_free + dmabuf_page_pool_alloc + dmabuf_page_pool_create + dmabuf_page_pool_destroy + dmabuf_page_pool_free + swiotlb_max_segment + +# required by rk_vcodec.ko + devm_iounmap + dev_pm_domain_attach + dev_pm_opp_get_freq + disable_hardirq + dma_buf_begin_cpu_access_partial + dma_buf_end_cpu_access_partial + __fdget + iommu_device_unregister + iommu_dma_reserve_iova + __kthread_init_worker + kthread_worker_fn + of_device_alloc + of_dma_configure_id + platform_device_del + pm_generic_runtime_resume + pm_generic_runtime_suspend + proc_create_single_data + proc_remove # required by rknpu.ko - dma_buf_mmap - dma_fence_context_alloc - dma_fence_init - dma_fence_release - dma_fence_signal - drm_gem_create_mmap_offset + dev_pm_domain_attach_by_name drm_gem_dumb_destroy drm_gem_handle_delete drm_gem_prime_export drm_gem_prime_import_dev drm_gem_vm_open - fd_install - get_unused_fd_flags - of_dma_configure_id + drm_prime_gem_destroy set_user_nice - sync_file_create vmf_insert_mixed + vm_insert_page # required by rockchip-cpufreq.ko + cpufreq_unregister_notifier dev_pm_opp_put_prop_name - dev_pm_opp_register_set_opp_helper -# required by rockchip-iommu.ko - bus_set_iommu - get_zeroed_page - iommu_device_register - iommu_device_sysfs_add - iommu_device_sysfs_remove - iommu_get_dma_cookie - iommu_group_alloc - iommu_group_put - iommu_group_ref_get - iommu_put_dma_cookie - platform_irq_count - report_iommu_fault +# required by rockchip-hdmirx.ko + cec_s_phys_addr_from_edid + cpu_latency_qos_remove_request + device_create_with_groups + of_reserved_mem_device_release + v4l2_ctrl_log_status + v4l2_ctrl_subscribe_event + v4l2_find_dv_timings_cap + v4l2_src_change_event_subscribe + vb2_dma_contig_memops + vb2_fop_read # required by rockchip-rng.ko devm_hwrng_register devm_of_iomap -# required by rockchip.ko - __genphy_config_aneg - genphy_resume - genphy_soft_reset - genphy_suspend - mdiobus_read - mdiobus_write - phy_drivers_register - phy_drivers_unregister - # required by rockchip_bus.ko cpu_topology +# required by rockchip_debug.ko + nr_irqs + # required by rockchip_dmc.ko - cpufreq_cpu_get cpufreq_cpu_put cpufreq_quick_get - cpu_latency_qos_add_request - cpu_latency_qos_update_request - devfreq_add_governor devfreq_event_disable_edev devfreq_event_enable_edev devfreq_event_get_edev_by_phandle @@ -1604,11 +2389,7 @@ devfreq_monitor_start devfreq_monitor_stop devfreq_monitor_suspend - devfreq_resume_device - devfreq_suspend_device devfreq_update_interval - _dev_notice - dev_pm_opp_add input_close_device input_open_device input_register_handle @@ -1619,22 +2400,32 @@ # required by rockchip_dmc_common.ko down_write_trylock +# required by rockchip_headset_core.ko + iio_channel_get + # required by rockchip_opp_select.ko - dev_pm_opp_get_opp_table + dev_pm_opp_disable dev_pm_opp_of_add_table - dev_pm_opp_put_opp_table dev_pm_opp_set_prop_name - of_find_node_opts_by_path - regulator_get_optional - regulator_put + regulator_get_linear_step + +# required by rockchip_pwm_remotectl.ko + __tasklet_hi_schedule # required by rockchip_saradc.ko - devm_iio_device_alloc - __devm_iio_device_register iio_get_time_ns - iio_push_to_buffers iio_trigger_notify_done +# required by rockchip_system_monitor.ko + add_cpu + bitmap_parselist + compat_only_sysfs_link_entry_to_kobj + dev_pm_qos_add_request + dev_pm_qos_remove_request + dev_pm_qos_update_request + remove_cpu + thermal_zone_get_temp + # required by rockchip_thermal.ko devm_thermal_zone_of_sensor_register thermal_zone_device_disable @@ -1643,8 +2434,6 @@ # required by rockchipdrm.ko adjust_managed_page_count - clk_is_match - clk_set_parent component_add component_bind_all component_del @@ -1653,11 +2442,12 @@ component_match_add_release component_unbind_all devm_of_phy_get_by_index - devm_phy_optional_get + driver_find_device drm_add_modes_noedid drm_atomic_commit drm_atomic_get_connector_state drm_atomic_get_plane_state + drm_atomic_helper_bridge_propagate_bus_fmt drm_atomic_helper_check drm_atomic_helper_check_plane_state drm_atomic_helper_cleanup_planes @@ -1671,13 +2461,13 @@ __drm_atomic_helper_connector_reset __drm_atomic_helper_crtc_destroy_state __drm_atomic_helper_crtc_duplicate_state - drm_atomic_helper_dirtyfb __drm_atomic_helper_disable_plane drm_atomic_helper_duplicate_state drm_atomic_helper_fake_vblank drm_atomic_helper_page_flip __drm_atomic_helper_plane_destroy_state __drm_atomic_helper_plane_duplicate_state + __drm_atomic_helper_plane_reset drm_atomic_helper_set_config drm_atomic_helper_shutdown drm_atomic_helper_swap_state @@ -1688,11 +2478,13 @@ drm_atomic_set_mode_for_crtc drm_atomic_state_alloc __drm_atomic_state_free + drm_bridge_chain_mode_set + drm_bridge_get_edid + drm_connector_attach_content_protection_property drm_connector_list_iter_begin drm_connector_list_iter_end drm_connector_list_iter_next drm_connector_list_update - drm_connector_unregister drm_crtc_cleanup drm_crtc_enable_color_mgmt drm_crtc_from_index @@ -1705,7 +2497,11 @@ drm_crtc_vblank_put drm_debugfs_create_files drm_do_get_edid - drm_dp_channel_eq_ok + drm_dp_clock_recovery_ok + drm_dp_get_adjust_request_pre_emphasis + drm_dp_get_adjust_request_voltage + drm_dp_read_desc + drm_dp_read_sink_count drm_encoder_cleanup drm_encoder_init drm_event_reserve_init_locked @@ -1714,6 +2510,7 @@ drm_flip_work_init drm_flip_work_queue drm_format_info + drm_format_info_min_pitch drm_framebuffer_cleanup drm_framebuffer_init drm_gem_cma_vm_ops @@ -1726,17 +2523,14 @@ drm_gem_fb_afbc_init drm_gem_fb_create_handle drm_gem_fb_init_with_funcs - drm_gem_get_pages drm_gem_map_attach drm_gem_map_detach drm_gem_map_dma_buf - drm_gem_object_put_locked - drm_gem_put_pages drm_gem_unmap_dma_buf drm_get_format_info drm_get_format_name + drm_hdcp_update_content_protection drm_helper_mode_fill_fb_struct - drm_kms_helper_hotplug_event drm_kms_helper_poll_enable drm_kms_helper_poll_fini drm_kms_helper_poll_init @@ -1745,11 +2539,13 @@ drmm_mode_config_init drm_mm_print drm_mm_remove_node + drm_mm_reserve_node drm_mm_takedown drm_mode_config_cleanup drm_mode_config_helper_resume drm_mode_config_helper_suspend drm_mode_config_reset + drm_mode_create_hdmi_colorspace_property drm_mode_create_tv_properties drm_mode_crtc_set_gamma_size drm_mode_debug_printmodeline @@ -1759,9 +2555,9 @@ drm_mode_prune_invalid drm_mode_set_crtcinfo drm_modeset_lock_all + drm_modeset_unlock drm_modeset_unlock_all drm_mode_sort - drm_mode_validate_driver drm_mode_validate_size drm_mode_validate_ycbcr420 drm_of_crtc_port_mask @@ -1773,14 +2569,20 @@ drm_plane_create_zpos_property drm_prime_get_contiguous_size __drm_printfn_seq_file + drm_property_blob_put drm_property_create drm_property_create_bitmask + drm_property_create_bool drm_property_create_enum drm_property_create_object drm_property_create_range drm_property_destroy + drm_property_lookup_blob + drm_property_replace_blob __drm_puts_seq_file drm_rect_calc_hscale + drm_self_refresh_helper_cleanup + drm_self_refresh_helper_init drm_send_event_locked drm_simple_encoder_init drm_universal_plane_init @@ -1788,35 +2590,26 @@ drm_writeback_connector_init drm_writeback_queue_job drm_writeback_signal_completion - hdmi_infoframe_pack iommu_domain_alloc iommu_domain_free - iommu_map - iommu_map_sg - iommu_set_fault_handler - iommu_unmap memblock_free - of_graph_get_next_endpoint + mipi_dsi_packet_format_is_short + of_find_backlight_by_node + of_fwnode_ops of_graph_get_port_by_id of_graph_get_remote_port - of_graph_get_remote_port_parent phy_mipi_dphy_get_default_config platform_find_device_by_driver __platform_register_drivers platform_unregister_drivers - release_firmware - request_firmware - sort - __sw_hweight32 - __sw_hweight8 __vmalloc +# required by rtc-hym8563.ko + devm_rtc_device_register + # required by rtc-rk808.ko - _bcd2bin - _bin2bcd devm_rtc_allocate_device __rtc_register_device - rtc_time64_to_tm rtc_update_irq # required by sdhci-of-arasan.ko @@ -1827,7 +2620,6 @@ sdhci_cqe_irq sdhci_dumpregs sdhci_enable_clk - sdhci_execute_tuning sdhci_pltfm_unregister sdhci_set_power_and_bus_voltage sdhci_set_uhs_signaling @@ -1839,22 +2631,17 @@ sdhci_adma_write_desc sdhci_remove_host sdhci_request + sdhci_reset_tuning # required by sg.ko blk_get_request blk_put_request blk_rq_map_user_iov blk_verify_command - cdev_add cdev_alloc - cdev_del class_interface_unregister - fasync_helper get_sg_io_hdr import_iovec - kill_fasync - __module_get - nonseekable_open put_sg_io_hdr _raw_read_lock_irqsave _raw_read_unlock_irqrestore @@ -1878,59 +2665,77 @@ irq_stat # required by sii902x.ko - drm_display_mode_to_videomode - hdmi_audio_infoframe_pack hdmi_avi_infoframe_pack - of_property_read_variable_u8_array -# required by snd-soc-cx2072x.ko - regmap_multi_reg_write - snd_soc_params_to_frame_size +# required by smsc95xx.ko + csum_partial + __mdiobus_register + phy_connect_direct + phy_disconnect + phy_ethtool_get_link_ksettings + phy_ethtool_nway_reset + phy_ethtool_set_link_ksettings + phy_find_first + phy_get_pause + phy_init_hw + phy_mii_ioctl + phy_print_status + phy_start + phy_stop + usb_autopm_get_interface_no_resume # required by snd-soc-es8316.ko snd_pcm_hw_constraint_list - snd_soc_dapm_disable_pin_unlocked - snd_soc_dapm_sync_unlocked # required by snd-soc-hdmi-codec.ko - hdmi_audio_infoframe_init snd_ctl_add snd_ctl_new1 snd_pcm_add_chmap_ctls - snd_pcm_create_iec958_consumer_hw_params + snd_pcm_create_iec958_consumer_default + snd_pcm_fill_iec958_consumer + snd_pcm_fill_iec958_consumer_hw_params snd_pcm_hw_constraint_eld - snd_soc_dapm_add_routes + snd_pcm_stop # required by snd-soc-rk817.ko - snd_soc_add_component_controls snd_soc_component_exit_regmap snd_soc_component_init_regmap - snd_soc_unregister_component + +# required by snd-soc-rockchip-hdmi.ko + snd_soc_dapm_new_widgets + +# required by snd-soc-rockchip-i2s-tdm.ko + clk_is_match + pm_runtime_forbid + snd_pcm_stop_xrun + +# required by snd-soc-rockchip-i2s.ko + of_prop_next_string + +# required by snd-soc-rockchip-multicodecs.ko + of_parse_phandle_with_fixed_args + snd_soc_jack_add_zones + snd_soc_jack_get_type + +# required by snd-soc-rockchip-spdif.ko + snd_pcm_create_iec958_consumer_hw_params + +# required by snd-soc-rt5640.ko + regmap_register_patch + snd_soc_dapm_force_bias_level # required by snd-soc-simple-card-utils.ko - devm_kasprintf devm_kvasprintf - snd_soc_card_jack_new - snd_soc_dai_set_sysclk snd_soc_dai_set_tdm_slot - snd_soc_dapm_get_pin_switch - snd_soc_dapm_info_pin_switch - snd_soc_dapm_put_pin_switch - snd_soc_of_parse_audio_routing snd_soc_of_parse_audio_simple_widgets - snd_soc_of_parse_card_name - snd_soc_of_parse_daifmt snd_soc_runtime_calc_hw # required by snd-soc-simple-card.ko - devm_snd_soc_register_card - of_parse_phandle_with_args snd_soc_dai_link_set_capabilities snd_soc_of_get_dai_name snd_soc_of_parse_aux_devs snd_soc_of_parse_node_prefix snd_soc_of_parse_tdm_slot - snd_soc_pm_ops # required by spi-rockchip.ko devm_spi_register_controller @@ -1943,30 +2748,85 @@ spi_setup stream_open -# required by system_heap.ko - deferred_free - dmabuf_page_pool_alloc - dmabuf_page_pool_create - dmabuf_page_pool_destroy - dmabuf_page_pool_free - dma_heap_get_dev - __sg_page_iter_next - __sg_page_iter_start - swiotlb_max_segment +# required by stmmac-platform.ko + device_get_phy_mode + of_get_mac_address + of_phy_is_fixed_link + platform_get_irq_byname_optional + +# required by stmmac.ko + devm_alloc_etherdev_mqs + dql_completed + dql_reset + ethtool_convert_legacy_u32_to_link_mode + ethtool_convert_link_mode_to_legacy_u32 + flow_block_cb_setup_simple + flow_rule_match_basic + flow_rule_match_ipv4_addrs + flow_rule_match_ports + mdiobus_get_phy + __napi_alloc_skb + napi_complete_done + napi_disable + __napi_schedule + napi_schedule_prep + netdev_alert + netdev_pick_tx + netdev_rss_key_fill + netif_device_attach + netif_device_detach + netif_napi_add + __netif_napi_del + netif_schedule_queue + netif_set_real_num_rx_queues + netif_set_real_num_tx_queues + of_mdiobus_register + page_pool_alloc_pages + page_pool_create + page_pool_destroy + page_pool_put_page + page_pool_release_page + phy_init_eee + phylink_connect_phy + phylink_create + phylink_destroy + phylink_disconnect_phy + phylink_ethtool_get_eee + phylink_ethtool_get_pauseparam + phylink_ethtool_get_wol + phylink_ethtool_ksettings_get + phylink_ethtool_ksettings_set + phylink_ethtool_nway_reset + phylink_ethtool_set_eee + phylink_ethtool_set_pauseparam + phylink_ethtool_set_wol + phylink_get_eee_err + phylink_mac_change + phylink_mii_ioctl + phylink_of_phy_connect + phylink_set_port_modes + phylink_speed_down + phylink_speed_up + phylink_start + phylink_stop + pm_wakeup_dev_event + reset_control_reset + skb_tstamp_tx + +# required by sw_sync.ko + dma_fence_free + dma_fence_signal_locked + __get_task_comm # required by tcpci_husb311.ko - i2c_smbus_read_word_data + tcpci_get_tcpm_port tcpci_irq tcpci_register_port tcpci_unregister_port # required by tee.ko - bus_register - bus_unregister class_find_device - crypto_alloc_shash crypto_shash_final - crypto_shash_update gen_pool_best_fit gen_pool_set_algo gen_pool_virt_to_phys @@ -1977,9 +2837,10 @@ # required by test_power.ko param_get_int - power_supply_register - power_supply_unregister - strncasecmp + +# required by timer-rockchip.ko + clockevents_config_and_register + irq_of_parse_and_map # required by tps65132-regulator.ko regulator_set_active_discharge_regmap @@ -1997,9 +2858,54 @@ typec_altmode_vdm typec_get_negotiated_svdm_version +# required by usblp.ko + add_wait_queue + default_wake_function + remove_wait_queue + stpcpy + usb_anchor_urb + usb_find_common_endpoints + usb_find_interface + usb_kill_anchored_urbs + usb_poison_anchored_urbs + usb_unanchor_urb + +# required by usbserial.ko + driver_attach + param_ops_ushort + put_tty_driver + schedule_timeout_interruptible + __tty_alloc_driver + tty_flip_buffer_push + tty_hangup + __tty_insert_flip_char + tty_insert_flip_string_fixed_flag + tty_kref_put + tty_ldisc_deref + tty_ldisc_ref + tty_port_close + tty_port_destroy + tty_port_hangup + tty_port_init + tty_port_open + tty_port_register_device + tty_port_tty_get + tty_port_tty_wakeup + tty_register_driver + tty_set_operations + tty_standard_install + tty_std_termios + tty_termios_copy_hw + tty_unregister_device + tty_unregister_driver + tty_vhangup + usb_kill_urb + usb_match_one_id + usb_show_dynids + usb_store_new_id + # required by v4l2-fwnode.ko fwnode_device_is_available - fwnode_get_name fwnode_graph_get_next_endpoint fwnode_graph_get_port_parent fwnode_graph_get_remote_endpoint @@ -2009,31 +2915,33 @@ fwnode_property_read_u64_array v4l2_async_notifier_add_fwnode_subdev v4l2_async_notifier_add_subdev - v4l2_async_notifier_unregister # required by video_rkcif.ko media_entity_setup_link # required by video_rkisp.ko - kmalloc_order_trace media_device_cleanup __memcpy_fromio __memcpy_toio - of_property_read_u64 param_ops_ullong - v4l2_ctrl_g_ctrl_int64 - v4l2_event_unsubscribe v4l2_pipeline_link_notify -# required by videobuf2-dma-sg.ko +# required by videobuf2-cma-sg.ko + frame_vector_to_pages split_page + vb2_common_vm_ops + vb2_create_framevec + vb2_destroy_framevec + vm_map_ram + vm_unmap_ram + +# required by vl6180.ko + iio_read_const_attr # required by zram.ko __alloc_percpu bio_endio blk_alloc_queue - __class_register - class_unregister __cpuhp_state_add_instance __cpuhp_state_remove_instance crypto_alloc_base @@ -2042,25 +2950,17 @@ crypto_has_alg disk_end_io_acct disk_start_io_acct - flush_dcache_page - free_percpu fsync_bdev - __get_free_pages - idr_for_each kstrtou16 memparse memset64 __num_online_cpus page_endio register_blkdev - strcpy - sysfs_streq unregister_blkdev - vzalloc # required by zsmalloc.ko alloc_anon_inode - __ClearPageMovable contig_page_data dec_zone_page_state inc_zone_page_state @@ -2069,14 +2969,8 @@ kern_mount kern_unmount kill_anon_super - kstrdup - __lock_page page_mapping _raw_read_lock _raw_read_unlock _raw_write_lock _raw_write_unlock - register_shrinker - __SetPageMovable - unlock_page - unregister_shrinker diff --git a/android/abi_gki_aarch64_transsion b/android/abi_gki_aarch64_transsion index 7538d32cf51b..83e28c1e1746 100644 --- a/android/abi_gki_aarch64_transsion +++ b/android/abi_gki_aarch64_transsion @@ -6,3 +6,69 @@ scan_swap_map_slots swap_alloc_cluster check_cache_active + zero_pfn + nr_swap_pages + plist_requeue + plist_del + __traceiter_android_rvh_handle_pte_fault_end + __traceiter_android_vh_handle_pte_fault_end + __traceiter_android_vh_cow_user_page + __traceiter_android_vh_swapin_add_anon_rmap + __traceiter_android_vh_waiting_for_page_migration + __traceiter_android_vh_migrate_page_states + __traceiter_android_vh_page_referenced_one_end + __traceiter_android_vh_count_pswpin + __traceiter_android_vh_count_pswpout + __traceiter_android_vh_count_swpout_vm_event + __traceiter_android_vh_swap_slot_cache_active + __traceiter_android_rvh_drain_slots_cache_cpu + __traceiter_android_vh_drain_slots_cache_cpu + __traceiter_android_rvh_alloc_swap_slot_cache + __traceiter_android_vh_alloc_swap_slot_cache + __traceiter_android_rvh_free_swap_slot + __traceiter_android_vh_free_swap_slot + __traceiter_android_rvh_get_swap_page + __traceiter_android_vh_get_swap_page + __traceiter_android_vh_page_isolated_for_reclaim + __traceiter_android_vh_inactive_is_low + __traceiter_android_vh_snapshot_refaults + __traceiter_android_vh_account_swap_pages + __traceiter_android_vh_unuse_swap_page + __traceiter_android_vh_init_swap_info_struct + __traceiter_android_vh_si_swapinfo + __traceiter_android_rvh_alloc_si + __traceiter_android_vh_alloc_si + __traceiter_android_vh_free_pages + __traceiter_android_vh_set_shmem_page_flag + __traceiter_android_vh_ra_tuning_max_page + __tracepoint_android_rvh_handle_pte_fault_end + __tracepoint_android_vh_handle_pte_fault_end + __tracepoint_android_vh_cow_user_page + __tracepoint_android_vh_swapin_add_anon_rmap + __tracepoint_android_vh_waiting_for_page_migration + __tracepoint_android_vh_migrate_page_states + __tracepoint_android_vh_page_referenced_one_end + __tracepoint_android_vh_count_pswpin + __tracepoint_android_vh_count_pswpout + __tracepoint_android_vh_count_swpout_vm_event + __tracepoint_android_vh_swap_slot_cache_active + __tracepoint_android_rvh_drain_slots_cache_cpu + __tracepoint_android_vh_drain_slots_cache_cpu + __tracepoint_android_rvh_alloc_swap_slot_cache + __tracepoint_android_vh_alloc_swap_slot_cache + __tracepoint_android_rvh_free_swap_slot + __tracepoint_android_vh_free_swap_slot + __tracepoint_android_rvh_get_swap_page + __tracepoint_android_vh_get_swap_page + __tracepoint_android_vh_page_isolated_for_reclaim + __tracepoint_android_vh_inactive_is_low + __tracepoint_android_vh_snapshot_refaults + __tracepoint_android_vh_account_swap_pages + __tracepoint_android_vh_unuse_swap_page + __tracepoint_android_vh_init_swap_info_struct + __tracepoint_android_vh_si_swapinfo + __tracepoint_android_rvh_alloc_si + __tracepoint_android_vh_alloc_si + __tracepoint_android_vh_free_pages + __tracepoint_android_vh_set_shmem_page_flag + __tracepoint_android_vh_ra_tuning_max_page diff --git a/android/abi_gki_aarch64_tuxera b/android/abi_gki_aarch64_tuxera new file mode 100644 index 000000000000..b14b56603408 --- /dev/null +++ b/android/abi_gki_aarch64_tuxera @@ -0,0 +1,248 @@ +[abi_symbol_list] +add_to_page_cache_locked +__alloc_pages_nodemask +__arch_copy_from_user +__arch_copy_to_user +arm64_const_caps_ready +autoremove_wake_function +balance_dirty_pages_ratelimited +bcmp +bdev_read_only +__bforget +bio_add_page +bio_alloc_bioset +bio_associate_blkg +bio_put +__bitmap_weight +bit_waitqueue +blkdev_issue_discard +blkdev_issue_flush +blk_finish_plug +blk_start_plug +__blockdev_direct_IO +block_invalidatepage +block_is_partially_uptodate +__breadahead +__bread_gfp +__brelse +buffer_migrate_page +capable +capable_wrt_inode_uidgid +__cfi_slowpath +__check_object_size +clear_inode +clear_page_dirty_for_io +complete_and_exit +cpu_hwcap_keys +cpu_hwcaps +create_empty_buffers +current_umask +d_add +d_add_ci +d_instantiate +d_make_root +down_read +down_write +dput +drop_nlink +d_splice_alias +dump_stack +end_buffer_read_sync +end_page_writeback +errseq_set +failure_tracking +fiemap_fill_next_extent +fiemap_prep +filemap_fdatawait_range +filemap_fdatawrite +filemap_flush +__filemap_set_wb_err +filemap_write_and_wait_range +file_remove_privs +file_update_time +file_write_and_wait_range +finish_wait +flush_dcache_page +freezing_slow_path +fs_bio_set +generic_error_remove_page +generic_file_direct_write +generic_file_llseek +generic_file_mmap +generic_file_open +generic_file_read_iter +generic_file_splice_read +generic_fillattr +generic_perform_write +generic_read_dir +generic_write_checks +__getblk_gfp +gic_nonsecure_priorities +grab_cache_page_write_begin +iget5_locked +igrab +ihold +ilookup5 +in_group_p +__init_rwsem +init_wait_entry +__init_waitqueue_head +inode_dio_wait +inode_init_once +inode_newsize_ok +inode_set_flags +__insert_inode_hash +invalidate_bdev +invalidate_mapping_pages +io_schedule +iov_iter_advance +iov_iter_alignment +iov_iter_get_pages +iput +is_bad_inode +iter_file_splice_write +iunique +jiffies +jiffies_to_msecs +kasan_flag_enabled +kfree +kill_block_super +__kmalloc +kmalloc_caches +kmem_cache_alloc +kmem_cache_alloc_trace +kmem_cache_create +kmem_cache_create_usercopy +kmem_cache_destroy +kmem_cache_free +krealloc +kthread_create_on_node +kthread_should_stop +kthread_stop +ktime_get_coarse_real_ts64 +kvfree +__list_add_valid +__list_del_entry_valid +ll_rw_block +load_nls +load_nls_default +__lock_buffer +__lock_page +lru_cache_add +make_bad_inode +mark_buffer_dirty +mark_buffer_write_io_error +__mark_inode_dirty +mark_page_accessed +memcpy +memmove +memset +mktime64 +mnt_drop_write_file +mnt_want_write_file +module_layout +mount_bdev +mpage_readahead +mpage_readpage +__msecs_to_jiffies +__mutex_init +mutex_lock +mutex_trylock +mutex_unlock +new_inode +notify_change +pagecache_get_page +page_cache_next_miss +page_cache_prev_miss +__page_pinner_migration_failed +pagevec_lookup_range_tag +__pagevec_release +__percpu_down_read +preempt_schedule +preempt_schedule_notrace +prepare_to_wait +prepare_to_wait_event +printk +__put_page +put_pages_list +___ratelimit +_raw_read_lock +_raw_read_lock_irqsave +_raw_read_unlock +_raw_read_unlock_irqrestore +_raw_spin_lock +_raw_spin_lock_irqsave +_raw_spin_unlock +_raw_spin_unlock_irqrestore +_raw_write_lock +_raw_write_lock_irqsave +_raw_write_unlock +_raw_write_unlock_irqrestore +rcuwait_wake_up +readahead_gfp_mask +read_cache_page +redirty_page_for_writepage +__refrigerator +register_filesystem +__remove_inode_hash +sb_min_blocksize +sb_set_blocksize +schedule +schedule_timeout_interruptible +seq_printf +setattr_prepare +set_freezable +set_nlink +set_page_dirty +__set_page_dirty_buffers +__set_page_dirty_nobuffers +set_user_nice +simple_strtol +simple_strtoul +simple_strtoull +sprintf +__stack_chk_fail +__stack_chk_guard +strchr +strcmp +strlen +strncasecmp +strncmp +strsep +strstr +submit_bh +submit_bio +__sync_dirty_buffer +sync_dirty_buffer +sync_filesystem +sync_inode_metadata +system_freezing_cnt +sys_tz +tag_pages_for_writeback +__test_set_page_writeback +time64_to_tm +_trace_android_vh_record_pcpu_rwsem_starttime +truncate_inode_pages +truncate_inode_pages_final +truncate_setsize +try_to_writeback_inodes_sb +unload_nls +unlock_buffer +unlock_new_inode +unlock_page +unregister_filesystem +up_read +up_write +vfree +vfs_fsync_range +__vmalloc +vsnprintf +vzalloc +__wait_on_buffer +wait_on_page_bit +wake_bit_function +__wake_up +wake_up_process +__warn_printk +write_inode_now +xa_load diff --git a/android/abi_gki_aarch64_type_visibility b/android/abi_gki_aarch64_type_visibility new file mode 100644 index 000000000000..705bf5574222 --- /dev/null +++ b/android/abi_gki_aarch64_type_visibility @@ -0,0 +1,6 @@ +[abi_symbol_list] + +# for type visibility + GKI_struct_selinux_state + GKI_struct_gic_chip_data + GKI_struct_swap_slots_cache diff --git a/android/abi_gki_aarch64_vivo b/android/abi_gki_aarch64_vivo index c3bc87e17c9f..28c4b05aa43f 100644 --- a/android/abi_gki_aarch64_vivo +++ b/android/abi_gki_aarch64_vivo @@ -277,6 +277,7 @@ del_gendisk del_timer del_timer_sync + dentry_path_raw desc_to_gpio destroy_workqueue dev_coredumpv diff --git a/android/abi_gki_aarch64_xiaomi b/android/abi_gki_aarch64_xiaomi index a162683958cd..ebbbb0e81420 100644 --- a/android/abi_gki_aarch64_xiaomi +++ b/android/abi_gki_aarch64_xiaomi @@ -200,3 +200,11 @@ wakeup_sources_read_unlock wakeup_sources_walk_start wakeup_sources_walk_next + +#required by mi_mempool.ko module + __traceiter_android_vh_mmput + __tracepoint_android_vh_mmput + __traceiter_android_vh_alloc_pages_reclaim_bypass + __tracepoint_android_vh_alloc_pages_reclaim_bypass + __traceiter_android_vh_alloc_pages_failure_bypass + __tracepoint_android_vh_alloc_pages_failure_bypass diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 807d7b9a1860..0ce1eee0924b 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h @@ -62,6 +62,7 @@ register struct thread_info *__current_thread_info __asm__("$8"); #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SYSCALL_AUDIT 4 /* syscall audit active */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */ #define TIF_MEMDIE 13 /* is terminating due to OOM killer */ #define TIF_POLLING_NRFLAG 14 /* idle is polling for TIF_NEED_RESCHED */ @@ -71,11 +72,12 @@ register struct thread_info *__current_thread_info __asm__("$8"); #define _TIF_NEED_RESCHED (1<pcb.ksp = (unsigned long) childstack; childti->pcb.flags = 1; /* set FEN, clear everything else */ - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ memset(childstack, 0, sizeof(struct switch_stack) + sizeof(struct pt_regs)); diff --git a/arch/alpha/kernel/rtc.c b/arch/alpha/kernel/rtc.c index 1b1d5963ac55..48ffbfbd0624 100644 --- a/arch/alpha/kernel/rtc.c +++ b/arch/alpha/kernel/rtc.c @@ -80,7 +80,12 @@ init_rtc_epoch(void) static int alpha_rtc_read_time(struct device *dev, struct rtc_time *tm) { - mc146818_get_time(tm); + int ret = mc146818_get_time(tm); + + if (ret < 0) { + dev_err_ratelimited(dev, "unable to read current time\n"); + return ret; + } /* Adjust for non-default epochs. It's easier to depend on the generic __get_rtc_time and adjust the epoch here than create diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index 3739efce1ec0..948b89789da8 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -527,7 +527,7 @@ do_work_pending(struct pt_regs *regs, unsigned long thread_flags, schedule(); } else { local_irq_enable(); - if (thread_flags & _TIF_SIGPENDING) { + if (thread_flags & (_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)) { do_signal(regs, r0, r19); r0 = 0; } else { diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c index 438b10c44d73..2b7a314b8452 100644 --- a/arch/alpha/kernel/srmcons.c +++ b/arch/alpha/kernel/srmcons.c @@ -59,7 +59,7 @@ srmcons_do_receive_chars(struct tty_port *port) } while((result.bits.status & 1) && (++loops < 10)); if (count) - tty_schedule_flip(port); + tty_flip_buffer_push(port); return count; } diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 8f777d6441a5..80347382a380 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -32,7 +32,7 @@ static inline void ioport_unmap(void __iomem *addr) { } -extern void iounmap(const void __iomem *addr); +extern void iounmap(const volatile void __iomem *addr); /* * io{read,write}{16,32}be() macros diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h index f9eef0e8f0b7..c0942c24d401 100644 --- a/arch/arc/include/asm/thread_info.h +++ b/arch/arc/include/asm/thread_info.h @@ -79,6 +79,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void) #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_SYSCALL_TRACE 15 /* syscall trace active */ /* true if poll_idle() is polling TIF_NEED_RESCHED */ @@ -89,11 +90,12 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void) #define _TIF_SIGPENDING (1<flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(c_regs, 0, sizeof(struct pt_regs)); c_callee->r13 = kthread_arg; diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 9d5996e014c0..4868bdebf586 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -405,7 +405,7 @@ void do_signal(struct pt_regs *regs) restart_scall = in_syscall(regs) && syscall_restartable(regs); - if (get_signal(&ksig)) { + if (test_thread_flag(TIF_SIGPENDING) && get_signal(&ksig)) { if (restart_scall) { arc_restart_syscall(&ksig.ka, regs); syscall_wont_restart(regs); /* No more restarts */ diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c index 95c649fbc95a..d3b1ea16e9cd 100644 --- a/arch/arc/mm/ioremap.c +++ b/arch/arc/mm/ioremap.c @@ -93,7 +93,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, EXPORT_SYMBOL(ioremap_prot); -void iounmap(const void __iomem *addr) +void iounmap(const volatile void __iomem *addr) { /* weird double cast to handle phys_addr_t > 32 bits */ if (arc_uncached_addr_space((phys_addr_t)(u32)addr)) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index d5bca27cf927..f6ce22c95cd2 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -25,7 +25,7 @@ config ARM select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAS_GCOV_PROFILE_ALL - select ARCH_KEEP_MEMBLOCK if HAVE_ARCH_PFN_VALID || KEXEC + select ARCH_KEEP_MEMBLOCK select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_NO_SG_CHAIN if !ARM_HAS_SG_CHAIN select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX @@ -522,7 +522,6 @@ config ARCH_S3C24XX config ARCH_OMAP1 bool "TI OMAP1" depends on MMU - select ARCH_HAS_HOLES_MEMORYMODEL select ARCH_OMAP select CLKDEV_LOOKUP select CLKSRC_MMIO @@ -1482,9 +1481,6 @@ config OABI_COMPAT UNPREDICTABLE (in fact it can be predicted that it won't work at all). If in doubt say N. -config ARCH_HAS_HOLES_MEMORYMODEL - bool - config ARCH_SELECT_MEMORY_MODEL bool @@ -1496,7 +1492,7 @@ config ARCH_SPARSEMEM_ENABLE select SPARSEMEM_STATIC if SPARSEMEM config HAVE_ARCH_PFN_VALID - def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM + def_bool y config HIGHMEM bool "High Memory Support" @@ -1796,7 +1792,6 @@ config CMDLINE choice prompt "Kernel command line type" if CMDLINE != "" default CMDLINE_FROM_BOOTLOADER - depends on ATAGS config CMDLINE_FROM_BOOTLOADER bool "Use bootloader kernel arguments if available" diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index b8c3bd8bdf46..898abab0e57f 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -128,6 +128,7 @@ dtb-$(CONFIG_ARCH_BCM_5301X) += \ bcm47094-luxul-xwr-3150-v1.dtb \ bcm47094-netgear-r8500.dtb \ bcm47094-phicomm-k3.dtb \ + bcm53015-meraki-mr26.dtb \ bcm53016-meraki-mr32.dtb \ bcm94708.dtb \ bcm94709.dtb \ diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi b/arch/arm/boot/dts/am335x-pcm-953.dtsi index 6c547c83e5dd..fc465f0d7e18 100644 --- a/arch/arm/boot/dts/am335x-pcm-953.dtsi +++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi @@ -12,22 +12,20 @@ compatible = "phytec,am335x-pcm-953", "phytec,am335x-phycore-som", "ti,am33xx"; /* Power */ - regulators { - vcc3v3: fixedregulator@1 { - compatible = "regulator-fixed"; - regulator-name = "vcc3v3"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - regulator-boot-on; - }; + vcc3v3: fixedregulator1 { + compatible = "regulator-fixed"; + regulator-name = "vcc3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + }; - vcc1v8: fixedregulator@2 { - compatible = "regulator-fixed"; - regulator-name = "vcc1v8"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-boot-on; - }; + vcc1v8: fixedregulator2 { + compatible = "regulator-fixed"; + regulator-name = "vcc1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; }; /* User IO */ diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi index 29fafb67cfaa..0d36e9dd14a4 100644 --- a/arch/arm/boot/dts/am33xx-l4.dtsi +++ b/arch/arm/boot/dts/am33xx-l4.dtsi @@ -1352,8 +1352,7 @@ mmc1: mmc@0 { compatible = "ti,am335-sdhci"; ti,needs-special-reset; - dmas = <&edma_xbar 24 0 0 - &edma_xbar 25 0 0>; + dmas = <&edma 24 0>, <&edma 25 0>; dma-names = "tx", "rx"; interrupts = <64>; reg = <0x0 0x1000>; diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts index 0d2fac98ce7d..c8b80f156ec9 100644 --- a/arch/arm/boot/dts/am3517-evm.dts +++ b/arch/arm/boot/dts/am3517-evm.dts @@ -161,6 +161,8 @@ /* HS USB Host PHY on PORT 1 */ hsusb1_phy: hsusb1_phy { + pinctrl-names = "default"; + pinctrl-0 = <&hsusb1_rst_pins>; compatible = "usb-nop-xceiv"; reset-gpios = <&gpio2 25 GPIO_ACTIVE_LOW>; /* gpio_57 */ #phy-cells = <0>; @@ -168,7 +170,9 @@ }; &davinci_emac { - status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <ðernet_pins>; + status = "okay"; }; &davinci_mdio { @@ -193,6 +197,8 @@ }; &i2c2 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_pins>; clock-frequency = <400000>; /* User DIP swithes [1:8] / User LEDS [1:2] */ tca6416: gpio@21 { @@ -205,6 +211,8 @@ }; &i2c3 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c3_pins>; clock-frequency = <400000>; }; @@ -223,6 +231,8 @@ }; &usbhshost { + pinctrl-names = "default"; + pinctrl-0 = <&hsusb1_pins>; port1-mode = "ehci-phy"; }; @@ -231,8 +241,35 @@ }; &omap3_pmx_core { - pinctrl-names = "default"; - pinctrl-0 = <&hsusb1_rst_pins>; + + ethernet_pins: pinmux_ethernet_pins { + pinctrl-single,pins = < + OMAP3_CORE1_IOPAD(0x21fe, PIN_INPUT | MUX_MODE0) /* rmii_mdio_data */ + OMAP3_CORE1_IOPAD(0x2200, MUX_MODE0) /* rmii_mdio_clk */ + OMAP3_CORE1_IOPAD(0x2202, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_rxd0 */ + OMAP3_CORE1_IOPAD(0x2204, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_rxd1 */ + OMAP3_CORE1_IOPAD(0x2206, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_crs_dv */ + OMAP3_CORE1_IOPAD(0x2208, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* rmii_rxer */ + OMAP3_CORE1_IOPAD(0x220a, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* rmii_txd0 */ + OMAP3_CORE1_IOPAD(0x220c, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* rmii_txd1 */ + OMAP3_CORE1_IOPAD(0x220e, PIN_OUTPUT_PULLDOWN |MUX_MODE0) /* rmii_txen */ + OMAP3_CORE1_IOPAD(0x2210, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_50mhz_clk */ + >; + }; + + i2c2_pins: pinmux_i2c2_pins { + pinctrl-single,pins = < + OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c2_scl */ + OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c2_sda */ + >; + }; + + i2c3_pins: pinmux_i2c3_pins { + pinctrl-single,pins = < + OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c3_scl */ + OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c3_sda */ + >; + }; leds_pins: pinmux_leds_pins { pinctrl-single,pins = < @@ -300,8 +337,6 @@ }; &omap3_pmx_core2 { - pinctrl-names = "default"; - pinctrl-0 = <&hsusb1_pins>; hsusb1_pins: pinmux_hsusb1_pins { pinctrl-single,pins = < diff --git a/arch/arm/boot/dts/am3517-som.dtsi b/arch/arm/boot/dts/am3517-som.dtsi index 8b669e2eafec..f7b680f6c48a 100644 --- a/arch/arm/boot/dts/am3517-som.dtsi +++ b/arch/arm/boot/dts/am3517-som.dtsi @@ -69,6 +69,8 @@ }; &i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; clock-frequency = <400000>; s35390a: s35390a@30 { @@ -179,6 +181,13 @@ &omap3_pmx_core { + i2c1_pins: pinmux_i2c1_pins { + pinctrl-single,pins = < + OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c1_scl */ + OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c1_sda */ + >; + }; + wl12xx_buffer_pins: pinmux_wl12xx_buffer_pins { pinctrl-single,pins = < OMAP3_CORE1_IOPAD(0x2156, PIN_OUTPUT | MUX_MODE4) /* mmc1_dat7.gpio_129 */ diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts index fde4c302f08e..92e08486ec81 100644 --- a/arch/arm/boot/dts/armada-385-turris-omnia.dts +++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts @@ -307,7 +307,7 @@ marvell,function = "spi0"; }; - spi0cs1_pins: spi0cs1-pins { + spi0cs2_pins: spi0cs2-pins { marvell,pins = "mpp26"; marvell,function = "spi0"; }; @@ -342,7 +342,7 @@ }; }; - /* MISO, MOSI, SCLK and CS1 are routed to pin header CN11 */ + /* MISO, MOSI, SCLK and CS2 are routed to pin header CN11 */ }; &uart0 { diff --git a/arch/arm/boot/dts/aspeed-ast2500-evb.dts b/arch/arm/boot/dts/aspeed-ast2500-evb.dts index 8bec21ed0de5..7a874debb7d5 100644 --- a/arch/arm/boot/dts/aspeed-ast2500-evb.dts +++ b/arch/arm/boot/dts/aspeed-ast2500-evb.dts @@ -5,7 +5,7 @@ / { model = "AST2500 EVB"; - compatible = "aspeed,ast2500"; + compatible = "aspeed,ast2500-evb", "aspeed,ast2500"; aliases { serial4 = &uart5; diff --git a/arch/arm/boot/dts/aspeed-ast2600-evb.dts b/arch/arm/boot/dts/aspeed-ast2600-evb.dts index 8d0f4656aa05..892814c02aa9 100644 --- a/arch/arm/boot/dts/aspeed-ast2600-evb.dts +++ b/arch/arm/boot/dts/aspeed-ast2600-evb.dts @@ -7,7 +7,7 @@ / { model = "AST2600 EVB"; - compatible = "aspeed,ast2600"; + compatible = "aspeed,ast2600-evb-a1", "aspeed,ast2600"; aliases { serial4 = &uart5; diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi index a362714ae9fc..1ef89dd55d92 100644 --- a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi +++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi @@ -117,11 +117,6 @@ groups = "FWSPID"; }; - pinctrl_fwqspid_default: fwqspid_default { - function = "FWSPID"; - groups = "FWQSPID"; - }; - pinctrl_fwspiwp_default: fwspiwp_default { function = "FWSPIWP"; groups = "FWSPIWP"; @@ -653,12 +648,12 @@ }; pinctrl_qspi1_default: qspi1_default { - function = "QSPI1"; + function = "SPI1"; groups = "QSPI1"; }; pinctrl_qspi2_default: qspi2_default { - function = "QSPI2"; + function = "SPI2"; groups = "QSPI2"; }; diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts index b1068cca4228..fd8dc1183b3e 100644 --- a/arch/arm/boot/dts/at91-sam9x60ek.dts +++ b/arch/arm/boot/dts/at91-sam9x60ek.dts @@ -233,10 +233,9 @@ status = "okay"; eeprom@53 { - compatible = "atmel,24c32"; + compatible = "atmel,24c02"; reg = <0x53>; pagesize = <16>; - size = <128>; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi index a06700e53e4c..9c8b3eb49ea3 100644 --- a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi +++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi @@ -62,8 +62,8 @@ regulators { vdd_3v3: VDD_IO { regulator-name = "VDD_IO"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3700000>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; regulator-initial-mode = <2>; regulator-allowed-modes = <2>, <4>; regulator-always-on; @@ -81,8 +81,8 @@ vddio_ddr: VDD_DDR { regulator-name = "VDD_DDR"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <1850000>; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; regulator-initial-mode = <2>; regulator-allowed-modes = <2>, <4>; regulator-always-on; @@ -104,8 +104,8 @@ vdd_core: VDD_CORE { regulator-name = "VDD_CORE"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <1850000>; + regulator-min-microvolt = <1250000>; + regulator-max-microvolt = <1250000>; regulator-initial-mode = <2>; regulator-allowed-modes = <2>, <4>; regulator-always-on; @@ -146,8 +146,8 @@ LDO1 { regulator-name = "LDO1"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3700000>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; regulator-always-on; regulator-state-standby { @@ -161,9 +161,8 @@ LDO2 { regulator-name = "LDO2"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3700000>; - regulator-always-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; regulator-state-standby { regulator-on-in-suspend; diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts index 308d472bd104..00b9e88ff545 100644 --- a/arch/arm/boot/dts/at91-sama5d2_icp.dts +++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts @@ -195,8 +195,8 @@ regulators { vdd_io_reg: VDD_IO { regulator-name = "VDD_IO"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3700000>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; regulator-initial-mode = <2>; regulator-allowed-modes = <2>, <4>; regulator-always-on; @@ -214,8 +214,8 @@ VDD_DDR { regulator-name = "VDD_DDR"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <1850000>; + regulator-min-microvolt = <1350000>; + regulator-max-microvolt = <1350000>; regulator-initial-mode = <2>; regulator-allowed-modes = <2>, <4>; regulator-always-on; @@ -233,8 +233,8 @@ VDD_CORE { regulator-name = "VDD_CORE"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <1850000>; + regulator-min-microvolt = <1250000>; + regulator-max-microvolt = <1250000>; regulator-initial-mode = <2>; regulator-allowed-modes = <2>, <4>; regulator-always-on; @@ -256,7 +256,6 @@ regulator-max-microvolt = <1850000>; regulator-initial-mode = <2>; regulator-allowed-modes = <2>, <4>; - regulator-always-on; regulator-state-standby { regulator-on-in-suspend; @@ -271,8 +270,8 @@ LDO1 { regulator-name = "LDO1"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3700000>; + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <2500000>; regulator-always-on; regulator-state-standby { @@ -286,8 +285,8 @@ LDO2 { regulator-name = "LDO2"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <3700000>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; regulator-always-on; regulator-state-standby { @@ -317,21 +316,21 @@ status = "okay"; eeprom@50 { - compatible = "atmel,24c32"; + compatible = "atmel,24c02"; reg = <0x50>; pagesize = <16>; status = "okay"; }; eeprom@52 { - compatible = "atmel,24c32"; + compatible = "atmel,24c02"; reg = <0x52>; pagesize = <16>; status = "disabled"; }; eeprom@53 { - compatible = "atmel,24c32"; + compatible = "atmel,24c02"; reg = <0x53>; pagesize = <16>; status = "disabled"; diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts index e42dae06b582..73cb157c4ef5 100644 --- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts @@ -91,7 +91,7 @@ spi1: spi@fc018000 { pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_spi0_cs>; + pinctrl-0 = <&pinctrl_spi1_cs>; cs-gpios = <&pioB 21 0>; status = "okay"; }; @@ -149,7 +149,7 @@ atmel,pins = ; }; - pinctrl_spi0_cs: spi0_cs_default { + pinctrl_spi1_cs: spi1_cs_default { atmel,pins = ; }; diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi index d1181ead18e5..21344fbc89e5 100644 --- a/arch/arm/boot/dts/at91rm9200.dtsi +++ b/arch/arm/boot/dts/at91rm9200.dtsi @@ -660,7 +660,7 @@ compatible = "atmel,at91rm9200-udc"; reg = <0xfffb0000 0x4000>; interrupts = <11 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&pmc PMC_TYPE_PERIPHERAL 11>, <&pmc PMC_TYPE_SYSTEM 2>; + clocks = <&pmc PMC_TYPE_PERIPHERAL 11>, <&pmc PMC_TYPE_SYSTEM 1>; clock-names = "pclk", "hclk"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi index 87bb39060e8b..4783e657b4cb 100644 --- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi +++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi @@ -39,6 +39,13 @@ }; + usb1 { + pinctrl_usb1_vbus_gpio: usb1_vbus_gpio { + atmel,pins = + ; /* PC5 GPIO */ + }; + }; + mmc0_slot1 { pinctrl_board_mmc0_slot1: mmc0_slot1-board { atmel,pins = @@ -84,6 +91,8 @@ }; usb1: gadget@fffa4000 { + pinctrl-0 = <&pinctrl_usb1_vbus_gpio>; + pinctrl-names = "default"; atmel,vbus-gpio = <&pioC 5 GPIO_ACTIVE_HIGH>; status = "okay"; }; @@ -219,6 +228,12 @@ wm8731: wm8731@1b { compatible = "wm8731"; reg = <0x1b>; + + /* PCK0 at 12MHz */ + clocks = <&pmc PMC_TYPE_SYSTEM 8>; + clock-names = "mclk"; + assigned-clocks = <&pmc PMC_TYPE_SYSTEM 8>; + assigned-clock-rates = <12000000>; }; }; diff --git a/arch/arm/boot/dts/bcm2835-rpi-b.dts b/arch/arm/boot/dts/bcm2835-rpi-b.dts index 1b63d6b19750..25d87212cefd 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-b.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-b.dts @@ -53,18 +53,17 @@ "GPIO18", "NC", /* GPIO19 */ "NC", /* GPIO20 */ - "GPIO21", + "CAM_GPIO0", "GPIO22", "GPIO23", "GPIO24", "GPIO25", "NC", /* GPIO26 */ - "CAM_GPIO0", - /* Binary number representing build/revision */ - "CONFIG0", - "CONFIG1", - "CONFIG2", - "CONFIG3", + "GPIO27", + "GPIO28", + "GPIO29", + "GPIO30", + "GPIO31", "NC", /* GPIO32 */ "NC", /* GPIO33 */ "NC", /* GPIO34 */ diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts index 33b2b77aa47d..00582eb2c12e 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts @@ -74,16 +74,18 @@ "GPIO27", "SDA0", "SCL0", - "NC", /* GPIO30 */ - "NC", /* GPIO31 */ - "NC", /* GPIO32 */ - "NC", /* GPIO33 */ - "NC", /* GPIO34 */ - "NC", /* GPIO35 */ - "NC", /* GPIO36 */ - "NC", /* GPIO37 */ - "NC", /* GPIO38 */ - "NC", /* GPIO39 */ + /* Used by BT module */ + "CTS0", + "RTS0", + "TXD0", + "RXD0", + /* Used by Wifi */ + "SD1_CLK", + "SD1_CMD", + "SD1_DATA0", + "SD1_DATA1", + "SD1_DATA2", + "SD1_DATA3", "CAM_GPIO1", /* GPIO40 */ "WL_ON", /* GPIO41 */ "NC", /* GPIO42 */ diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts index 61010266ca9a..90472e76a313 100644 --- a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts +++ b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts @@ -45,7 +45,7 @@ #gpio-cells = <2>; gpio-line-names = "BT_ON", "WL_ON", - "STATUS_LED_R", + "PWR_LED_R", "LAN_RUN", "", "CAM_GPIO0", diff --git a/arch/arm/boot/dts/bcm2837-rpi-cm3-io3.dts b/arch/arm/boot/dts/bcm2837-rpi-cm3-io3.dts index 588d9411ceb6..3dfce4312dfc 100644 --- a/arch/arm/boot/dts/bcm2837-rpi-cm3-io3.dts +++ b/arch/arm/boot/dts/bcm2837-rpi-cm3-io3.dts @@ -63,8 +63,8 @@ "GPIO43", "GPIO44", "GPIO45", - "GPIO46", - "GPIO47", + "SMPS_SCL", + "SMPS_SDA", /* Used by eMMC */ "SD_CLK_R", "SD_CMD_R", diff --git a/arch/arm/boot/dts/bcm53015-meraki-mr26.dts b/arch/arm/boot/dts/bcm53015-meraki-mr26.dts new file mode 100644 index 000000000000..14f58033efeb --- /dev/null +++ b/arch/arm/boot/dts/bcm53015-meraki-mr26.dts @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0-or-later OR MIT +/* + * Broadcom BCM470X / BCM5301X ARM platform code. + * DTS for Meraki MR26 / Codename: Venom + * + * Copyright (C) 2022 Christian Lamparter + */ + +/dts-v1/; + +#include "bcm4708.dtsi" +#include "bcm5301x-nand-cs0-bch8.dtsi" +#include + +/ { + compatible = "meraki,mr26", "brcm,bcm53015", "brcm,bcm4708"; + model = "Meraki MR26"; + + memory@0 { + reg = <0x00000000 0x08000000>; + device_type = "memory"; + }; + + leds { + compatible = "gpio-leds"; + + led-0 { + function = LED_FUNCTION_FAULT; + color = ; + gpios = <&chipcommon 13 GPIO_ACTIVE_HIGH>; + panic-indicator; + }; + led-1 { + function = LED_FUNCTION_INDICATOR; + color = ; + gpios = <&chipcommon 12 GPIO_ACTIVE_HIGH>; + }; + }; + + keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + + key-restart { + label = "Reset"; + linux,code = ; + gpios = <&chipcommon 11 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&uart0 { + clock-frequency = <50000000>; + /delete-property/ clocks; +}; + +&uart1 { + status = "disabled"; +}; + +&gmac0 { + status = "okay"; +}; + +&gmac1 { + status = "disabled"; +}; +&gmac2 { + status = "disabled"; +}; +&gmac3 { + status = "disabled"; +}; + +&nandcs { + nand-ecc-algo = "hw"; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <0x1>; + #size-cells = <0x1>; + + partition@0 { + label = "u-boot"; + reg = <0x0 0x200000>; + read-only; + }; + + partition@200000 { + label = "u-boot-env"; + reg = <0x200000 0x200000>; + /* empty */ + }; + + partition@400000 { + label = "u-boot-backup"; + reg = <0x400000 0x200000>; + /* empty */ + }; + + partition@600000 { + label = "u-boot-env-backup"; + reg = <0x600000 0x200000>; + /* empty */ + }; + + partition@800000 { + label = "ubi"; + reg = <0x800000 0x7780000>; + }; + }; +}; + +&srab { + status = "okay"; + + ports { + port@0 { + reg = <0>; + label = "poe"; + }; + + port@5 { + reg = <5>; + label = "cpu"; + ethernet = <&gmac0>; + + fixed-link { + speed = <1000>; + duplex-full; + }; + }; + }; +}; + +&i2c0 { + status = "okay"; + + pinctrl-names = "default"; + pinctrl-0 = <&pinmux_i2c>; + + clock-frequency = <100000>; + + ina219@40 { + compatible = "ti,ina219"; /* PoE power */ + reg = <0x40>; + shunt-resistor = <60000>; /* = 60 mOhms */ + }; + + eeprom@56 { + compatible = "atmel,24c64"; + reg = <0x56>; + pagesize = <32>; + read-only; + #address-cells = <1>; + #size-cells = <1>; + + /* it's empty */ + }; +}; + +&thermal { + status = "disabled"; + /* does not work, reads 418 degree Celsius */ +}; diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi index 06450066b178..255a13666edc 100644 --- a/arch/arm/boot/dts/exynos4412-midas.dtsi +++ b/arch/arm/boot/dts/exynos4412-midas.dtsi @@ -588,7 +588,7 @@ clocks = <&camera 1>; clock-names = "extclk"; samsung,camclk-out = <1>; - gpios = <&gpm1 6 GPIO_ACTIVE_HIGH>; + gpios = <&gpm1 6 GPIO_ACTIVE_LOW>; port { is_s5k6a3_ep: endpoint { diff --git a/arch/arm/boot/dts/exynos4412-origen.dts b/arch/arm/boot/dts/exynos4412-origen.dts index c2e793b69e7d..e2d76ea4404e 100644 --- a/arch/arm/boot/dts/exynos4412-origen.dts +++ b/arch/arm/boot/dts/exynos4412-origen.dts @@ -95,7 +95,7 @@ }; &ehci { - samsung,vbus-gpio = <&gpx3 5 1>; + samsung,vbus-gpio = <&gpx3 5 GPIO_ACTIVE_HIGH>; status = "okay"; phys = <&exynos_usbphy 2>, <&exynos_usbphy 3>; phy-names = "hsic0", "hsic1"; diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index 572198b6834e..06c4e0996503 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -129,7 +129,7 @@ samsung,i2c-max-bus-freq = <20000>; eeprom@50 { - compatible = "samsung,s524ad0xd1"; + compatible = "samsung,s524ad0xd1", "atmel,24c128"; reg = <0x50>; }; @@ -289,7 +289,7 @@ samsung,i2c-max-bus-freq = <20000>; eeprom@51 { - compatible = "samsung,s524ad0xd1"; + compatible = "samsung,s524ad0xd1", "atmel,24c128"; reg = <0x51>; }; diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts index 7e2b0f198dfa..1053b7c584d8 100644 --- a/arch/arm/boot/dts/imx28-evk.dts +++ b/arch/arm/boot/dts/imx28-evk.dts @@ -129,7 +129,7 @@ pinctrl-0 = <&spi2_pins_a>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "sst,sst25vf016b", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts index f3bddc5ada4b..13acdc7916b9 100644 --- a/arch/arm/boot/dts/imx28-m28evk.dts +++ b/arch/arm/boot/dts/imx28-m28evk.dts @@ -33,7 +33,7 @@ pinctrl-0 = <&spi2_pins_a>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "m25p80", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx28-sps1.dts b/arch/arm/boot/dts/imx28-sps1.dts index 43be7a6a769b..90928db0df70 100644 --- a/arch/arm/boot/dts/imx28-sps1.dts +++ b/arch/arm/boot/dts/imx28-sps1.dts @@ -51,7 +51,7 @@ pinctrl-0 = <&spi2_pins_a>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "everspin,mr25h256", "mr25h256"; diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts index 6d9a5ede94aa..006fbd7f5432 100644 --- a/arch/arm/boot/dts/imx53-ppd.dts +++ b/arch/arm/boot/dts/imx53-ppd.dts @@ -592,7 +592,7 @@ touchscreen@4b { compatible = "atmel,maxtouch"; - reset-gpio = <&gpio5 19 GPIO_ACTIVE_HIGH>; + reset-gpio = <&gpio5 19 GPIO_ACTIVE_LOW>; reg = <0x4b>; interrupt-parent = <&gpio5>; interrupts = <4 IRQ_TYPE_LEVEL_LOW>; diff --git a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts index 65359aece950..7da74e6f46d9 100644 --- a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts +++ b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts @@ -143,7 +143,7 @@ reg = <0x4a>; interrupt-parent = <&gpio1>; interrupts = <9 IRQ_TYPE_EDGE_FALLING>; /* SODIMM 28 */ - reset-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; /* SODIMM 30 */ + reset-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>; /* SODIMM 30 */ status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6dl-eckelmann-ci4x10.dts b/arch/arm/boot/dts/imx6dl-eckelmann-ci4x10.dts index b4a9523e325b..864dc5018451 100644 --- a/arch/arm/boot/dts/imx6dl-eckelmann-ci4x10.dts +++ b/arch/arm/boot/dts/imx6dl-eckelmann-ci4x10.dts @@ -297,7 +297,11 @@ phy-mode = "rmii"; phy-reset-gpios = <&gpio1 18 GPIO_ACTIVE_LOW>; phy-handle = <&phy>; - clocks = <&clks IMX6QDL_CLK_ENET>, <&clks IMX6QDL_CLK_ENET>, <&rmii_clk>; + clocks = <&clks IMX6QDL_CLK_ENET>, + <&clks IMX6QDL_CLK_ENET>, + <&rmii_clk>, + <&clks IMX6QDL_CLK_ENET_REF>; + clock-names = "ipg", "ahb", "ptp", "enet_out"; status = "okay"; mdio { diff --git a/arch/arm/boot/dts/imx6dl-rex-basic.dts b/arch/arm/boot/dts/imx6dl-rex-basic.dts index 0f1616bfa9a8..b72f8ea1e6f6 100644 --- a/arch/arm/boot/dts/imx6dl-rex-basic.dts +++ b/arch/arm/boot/dts/imx6dl-rex-basic.dts @@ -19,7 +19,7 @@ }; &ecspi3 { - flash: m25p80@0 { + flash: flash@0 { compatible = "sst,sst25vf016b", "jedec,spi-nor"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi index fdd81fdc3f35..cd3183c36488 100644 --- a/arch/arm/boot/dts/imx6dl.dtsi +++ b/arch/arm/boot/dts/imx6dl.dtsi @@ -84,6 +84,9 @@ ocram: sram@900000 { compatible = "mmio-sram"; reg = <0x00900000 0x20000>; + ranges = <0 0x00900000 0x20000>; + #address-cells = <1>; + #size-cells = <1>; clocks = <&clks IMX6QDL_CLK_OCRAM>; }; diff --git a/arch/arm/boot/dts/imx6q-apalis-eval.dts b/arch/arm/boot/dts/imx6q-apalis-eval.dts index fab83abb6466..a0683b4aeca1 100644 --- a/arch/arm/boot/dts/imx6q-apalis-eval.dts +++ b/arch/arm/boot/dts/imx6q-apalis-eval.dts @@ -140,7 +140,7 @@ reg = <0x4a>; interrupt-parent = <&gpio6>; interrupts = <10 IRQ_TYPE_EDGE_FALLING>; - reset-gpios = <&gpio6 9 GPIO_ACTIVE_HIGH>; /* SODIMM 13 */ + reset-gpios = <&gpio6 9 GPIO_ACTIVE_LOW>; /* SODIMM 13 */ status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts b/arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts index 1614b1ae501d..86e84781cf5d 100644 --- a/arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts +++ b/arch/arm/boot/dts/imx6q-apalis-ixora-v1.1.dts @@ -145,7 +145,7 @@ reg = <0x4a>; interrupt-parent = <&gpio6>; interrupts = <10 IRQ_TYPE_EDGE_FALLING>; - reset-gpios = <&gpio6 9 GPIO_ACTIVE_HIGH>; /* SODIMM 13 */ + reset-gpios = <&gpio6 9 GPIO_ACTIVE_LOW>; /* SODIMM 13 */ status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6q-apalis-ixora.dts b/arch/arm/boot/dts/imx6q-apalis-ixora.dts index fa9f98dd15ac..62e72773e53b 100644 --- a/arch/arm/boot/dts/imx6q-apalis-ixora.dts +++ b/arch/arm/boot/dts/imx6q-apalis-ixora.dts @@ -144,7 +144,7 @@ reg = <0x4a>; interrupt-parent = <&gpio6>; interrupts = <10 IRQ_TYPE_EDGE_FALLING>; - reset-gpios = <&gpio6 9 GPIO_ACTIVE_HIGH>; /* SODIMM 13 */ + reset-gpios = <&gpio6 9 GPIO_ACTIVE_LOW>; /* SODIMM 13 */ status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6q-ba16.dtsi b/arch/arm/boot/dts/imx6q-ba16.dtsi index e4578ed3371e..133991ca8c63 100644 --- a/arch/arm/boot/dts/imx6q-ba16.dtsi +++ b/arch/arm/boot/dts/imx6q-ba16.dtsi @@ -139,7 +139,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: n25q032@0 { + flash: flash@0 { compatible = "jedec,spi-nor"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi index 2a98cc657595..66be04299cbf 100644 --- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi +++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi @@ -160,7 +160,7 @@ pinctrl-0 = <&pinctrl_ecspi5>; status = "okay"; - m25_eeprom: m25p80@0 { + m25_eeprom: flash@0 { compatible = "atmel,at25"; spi-max-frequency = <10000000>; size = <0x8000>; diff --git a/arch/arm/boot/dts/imx6q-cm-fx6.dts b/arch/arm/boot/dts/imx6q-cm-fx6.dts index bfb530f29d9d..1ad41c944b4b 100644 --- a/arch/arm/boot/dts/imx6q-cm-fx6.dts +++ b/arch/arm/boot/dts/imx6q-cm-fx6.dts @@ -260,7 +260,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - m25p80@0 { + flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "st,m25p", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts index fa2307d8ce86..4dee1b22d5c1 100644 --- a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts +++ b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts @@ -102,7 +102,7 @@ cs-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { compatible = "m25p80", "jedec,spi-nor"; spi-max-frequency = <40000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6q-dms-ba16.dts b/arch/arm/boot/dts/imx6q-dms-ba16.dts index 48fb47e715f6..137db38f0d27 100644 --- a/arch/arm/boot/dts/imx6q-dms-ba16.dts +++ b/arch/arm/boot/dts/imx6q-dms-ba16.dts @@ -47,7 +47,7 @@ pinctrl-0 = <&pinctrl_ecspi5>; status = "okay"; - m25_eeprom: m25p80@0 { + m25_eeprom: flash@0 { compatible = "atmel,at25256B", "atmel,at25"; spi-max-frequency = <20000000>; size = <0x8000>; diff --git a/arch/arm/boot/dts/imx6q-gw5400-a.dts b/arch/arm/boot/dts/imx6q-gw5400-a.dts index 4cde45d5c90c..e894faba571f 100644 --- a/arch/arm/boot/dts/imx6q-gw5400-a.dts +++ b/arch/arm/boot/dts/imx6q-gw5400-a.dts @@ -137,7 +137,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { compatible = "sst,w25q256", "jedec,spi-nor"; spi-max-frequency = <30000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6q-marsboard.dts b/arch/arm/boot/dts/imx6q-marsboard.dts index 05ee28388229..cc1801002394 100644 --- a/arch/arm/boot/dts/imx6q-marsboard.dts +++ b/arch/arm/boot/dts/imx6q-marsboard.dts @@ -100,7 +100,7 @@ cs-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>; status = "okay"; - m25p80@0 { + flash@0 { compatible = "microchip,sst25vf016b"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6q-prti6q.dts b/arch/arm/boot/dts/imx6q-prti6q.dts index b4605edfd2ab..d8fa83effd63 100644 --- a/arch/arm/boot/dts/imx6q-prti6q.dts +++ b/arch/arm/boot/dts/imx6q-prti6q.dts @@ -364,8 +364,8 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_wifi>; interrupts-extended = <&gpio1 30 IRQ_TYPE_LEVEL_HIGH>; - ref-clock-frequency = "38400000"; - tcxo-clock-frequency = "19200000"; + ref-clock-frequency = <38400000>; + tcxo-clock-frequency = <19200000>; }; }; diff --git a/arch/arm/boot/dts/imx6q-rex-pro.dts b/arch/arm/boot/dts/imx6q-rex-pro.dts index 1767e1a3cd53..271f4b2d9b9f 100644 --- a/arch/arm/boot/dts/imx6q-rex-pro.dts +++ b/arch/arm/boot/dts/imx6q-rex-pro.dts @@ -19,7 +19,7 @@ }; &ecspi3 { - flash: m25p80@0 { + flash: flash@0 { compatible = "sst,sst25vf032b", "jedec,spi-nor"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi index 5277e3903291..afec1677e6ba 100644 --- a/arch/arm/boot/dts/imx6q.dtsi +++ b/arch/arm/boot/dts/imx6q.dtsi @@ -163,6 +163,9 @@ ocram: sram@900000 { compatible = "mmio-sram"; reg = <0x00900000 0x40000>; + ranges = <0 0x00900000 0x40000>; + #address-cells = <1>; + #size-cells = <1>; clocks = <&clks IMX6QDL_CLK_OCRAM>; }; diff --git a/arch/arm/boot/dts/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/imx6qdl-apalis.dtsi index 30fa349f9d05..a696873dc1ab 100644 --- a/arch/arm/boot/dts/imx6qdl-apalis.dtsi +++ b/arch/arm/boot/dts/imx6qdl-apalis.dtsi @@ -286,6 +286,8 @@ codec: sgtl5000@a { compatible = "fsl,sgtl5000"; reg = <0x0a>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sgtl5000>; clocks = <&clks IMX6QDL_CLK_CKO>; VDDA-supply = <®_module_3v3_audio>; VDDIO-supply = <®_module_3v3>; @@ -516,8 +518,6 @@ MX6QDL_PAD_DISP0_DAT21__AUD4_TXD 0x130b0 MX6QDL_PAD_DISP0_DAT22__AUD4_TXFS 0x130b0 MX6QDL_PAD_DISP0_DAT23__AUD4_RXD 0x130b0 - /* SGTL5000 sys_mclk */ - MX6QDL_PAD_GPIO_5__CCM_CLKO1 0x130b0 >; }; @@ -810,6 +810,12 @@ >; }; + pinctrl_sgtl5000: sgtl5000grp { + fsl,pins = < + MX6QDL_PAD_GPIO_5__CCM_CLKO1 0x130b0 + >; + }; + pinctrl_spdif: spdifgrp { fsl,pins = < MX6QDL_PAD_GPIO_16__SPDIF_IN 0x1b0b0 diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi index e21f6ac864e5..baa197c90060 100644 --- a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi +++ b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi @@ -96,7 +96,7 @@ pinctrl-0 = <&pinctrl_ecspi4>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "micron,n25q128a11", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi index ead7ba27e105..ff8cb47fb9fd 100644 --- a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi +++ b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi @@ -131,7 +131,7 @@ pinctrl-0 = <&pinctrl_ecspi4>; status = "okay"; - flash: m25p80@1 { + flash: flash@1 { #address-cells = <1>; #size-cells = <1>; compatible = "micron,n25q128a11", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6qdl-colibri.dtsi b/arch/arm/boot/dts/imx6qdl-colibri.dtsi index 4e2a309c93fa..1e86b3814708 100644 --- a/arch/arm/boot/dts/imx6qdl-colibri.dtsi +++ b/arch/arm/boot/dts/imx6qdl-colibri.dtsi @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ OR MIT /* - * Copyright 2014-2020 Toradex + * Copyright 2014-2022 Toradex * Copyright 2012 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. */ @@ -132,7 +132,7 @@ clock-frequency = <100000>; pinctrl-names = "default", "gpio"; pinctrl-0 = <&pinctrl_i2c2>; - pinctrl-0 = <&pinctrl_i2c2_gpio>; + pinctrl-1 = <&pinctrl_i2c2_gpio>; scl-gpios = <&gpio2 30 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; sda-gpios = <&gpio3 16 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; status = "okay"; @@ -488,7 +488,7 @@ >; }; - pinctrl_i2c2_gpio: i2c2grp { + pinctrl_i2c2_gpio: i2c2gpiogrp { fsl,pins = < MX6QDL_PAD_EIM_EB2__GPIO2_IO30 0x4001b8b1 MX6QDL_PAD_EIM_D16__GPIO3_IO16 0x4001b8b1 diff --git a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi index 648f5fcb72e6..2c1d6f28e695 100644 --- a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi +++ b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi @@ -35,7 +35,7 @@ pinctrl-0 = <&pinctrl_ecspi3>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "sst,sst25vf040b", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi index ed4e22259959..852601d5ab6b 100644 --- a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi @@ -31,7 +31,7 @@ user-pb { label = "user_pb"; - gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>; + gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>; linux,code = ; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi index 4cd7d290f5b2..7a2628fdd142 100644 --- a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi @@ -28,7 +28,7 @@ user-pb { label = "user_pb"; - gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>; + gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>; linux,code = ; }; diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi index 92f9977d1482..37d94aa45a8b 100644 --- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi +++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi @@ -51,16 +51,6 @@ vin-supply = <®_3p3v_s5>; }; - reg_3p3v_s0: regulator-3p3v-s0 { - compatible = "regulator-fixed"; - regulator-name = "V_3V3_S0"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - regulator-always-on; - regulator-boot-on; - vin-supply = <®_3p3v_s5>; - }; - reg_3p3v_s5: regulator-3p3v-s5 { compatible = "regulator-fixed"; regulator-name = "V_3V3_S5"; @@ -258,8 +248,8 @@ status = "okay"; /* default boot source: workaround #1 for errata ERR006282 */ - smarc_flash: spi-flash@0 { - compatible = "winbond,w25q16dw", "jedec,spi-nor"; + smarc_flash: flash@0 { + compatible = "jedec,spi-nor"; reg = <0>; spi-max-frequency = <20000000>; }; diff --git a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi index d526f01a2c52..b7e74d859a96 100644 --- a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi @@ -179,7 +179,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { compatible = "microchip,sst25vf016b"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi index a0917823c244..a88323ac6c69 100644 --- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi @@ -321,7 +321,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { compatible = "microchip,sst25vf016b"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi index 92d09a3ebe0e..ee7e2371f94b 100644 --- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi @@ -252,7 +252,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { compatible = "microchip,sst25vf016b"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi index 1243677b5f97..5adeb7aed220 100644 --- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi @@ -237,7 +237,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { compatible = "sst,sst25vf016b", "jedec,spi-nor"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi index afe477f32984..17535bf12516 100644 --- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi @@ -272,7 +272,7 @@ pinctrl-0 = <&pinctrl_ecspi1 &pinctrl_ecspi1_cs>; status = "disabled"; /* pin conflict with WEIM NOR */ - flash: m25p80@0 { + flash: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "st,m25p32", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi index fdc3aa9d544d..0aa1a0a28de0 100644 --- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi @@ -313,7 +313,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { compatible = "sst,sst25vf016b", "jedec,spi-nor"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi index f824c9abd11a..758c62fb9cac 100644 --- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi @@ -194,7 +194,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "st,m25p32", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6qdl-ts7970.dtsi b/arch/arm/boot/dts/imx6qdl-ts7970.dtsi index e6aa0c33754d..966038ecc5bf 100644 --- a/arch/arm/boot/dts/imx6qdl-ts7970.dtsi +++ b/arch/arm/boot/dts/imx6qdl-ts7970.dtsi @@ -226,7 +226,7 @@ reg = <0x28>; #gpio-cells = <2>; gpio-controller; - ngpio = <32>; + ngpios = <62>; }; sgtl5000: codec@a { diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 7a8837cbe21b..7858ae5d39df 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -756,7 +756,7 @@ regulator-name = "vddpu"; regulator-min-microvolt = <725000>; regulator-max-microvolt = <1450000>; - regulator-enable-ramp-delay = <150>; + regulator-enable-ramp-delay = <380>; anatop-reg-offset = <0x140>; anatop-vol-bit-shift = <9>; anatop-vol-bit-width = <5>; diff --git a/arch/arm/boot/dts/imx6qp.dtsi b/arch/arm/boot/dts/imx6qp.dtsi index b310f13a53f2..4d23c92aa8a6 100644 --- a/arch/arm/boot/dts/imx6qp.dtsi +++ b/arch/arm/boot/dts/imx6qp.dtsi @@ -9,12 +9,18 @@ ocram2: sram@940000 { compatible = "mmio-sram"; reg = <0x00940000 0x20000>; + ranges = <0 0x00940000 0x20000>; + #address-cells = <1>; + #size-cells = <1>; clocks = <&clks IMX6QDL_CLK_OCRAM>; }; ocram3: sram@960000 { compatible = "mmio-sram"; reg = <0x00960000 0x20000>; + ranges = <0 0x00960000 0x20000>; + #address-cells = <1>; + #size-cells = <1>; clocks = <&clks IMX6QDL_CLK_OCRAM>; }; diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts index 25f6f2fb1555..f16c830f1e91 100644 --- a/arch/arm/boot/dts/imx6sl-evk.dts +++ b/arch/arm/boot/dts/imx6sl-evk.dts @@ -137,7 +137,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "st,m25p32", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi index 91a8c54d5e11..c184a6d5bc42 100644 --- a/arch/arm/boot/dts/imx6sl.dtsi +++ b/arch/arm/boot/dts/imx6sl.dtsi @@ -114,6 +114,9 @@ ocram: sram@900000 { compatible = "mmio-sram"; reg = <0x00900000 0x20000>; + ranges = <0 0x00900000 0x20000>; + #address-cells = <1>; + #size-cells = <1>; clocks = <&clks IMX6SL_CLK_OCRAM>; }; diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi index 0b622201a1f3..bf5b262b91f9 100644 --- a/arch/arm/boot/dts/imx6sll.dtsi +++ b/arch/arm/boot/dts/imx6sll.dtsi @@ -115,6 +115,9 @@ ocram: sram@900000 { compatible = "mmio-sram"; reg = <0x00900000 0x20000>; + ranges = <0 0x00900000 0x20000>; + #address-cells = <1>; + #size-cells = <1>; }; intc: interrupt-controller@a01000 { diff --git a/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts b/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts index 66af78e83b70..a2c79bcf9a11 100644 --- a/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts +++ b/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts @@ -107,7 +107,7 @@ pinctrl-0 = <&pinctrl_ecspi1>; status = "okay"; - flash: m25p80@0 { + flash: flash@0 { compatible = "microchip,sst25vf016b"; spi-max-frequency = <20000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6sx-sdb-reva.dts b/arch/arm/boot/dts/imx6sx-sdb-reva.dts index dce5dcf96c25..7dda42553f4b 100644 --- a/arch/arm/boot/dts/imx6sx-sdb-reva.dts +++ b/arch/arm/boot/dts/imx6sx-sdb-reva.dts @@ -123,7 +123,7 @@ pinctrl-0 = <&pinctrl_qspi2>; status = "okay"; - flash0: s25fl128s@0 { + flash0: flash@0 { reg = <0>; #address-cells = <1>; #size-cells = <1>; @@ -133,7 +133,7 @@ spi-tx-bus-width = <4>; }; - flash1: s25fl128s@2 { + flash1: flash@2 { reg = <2>; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts index 5a63ca615722..1b808563a536 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dts +++ b/arch/arm/boot/dts/imx6sx-sdb.dts @@ -108,7 +108,7 @@ pinctrl-0 = <&pinctrl_qspi2>; status = "okay"; - flash0: n25q256a@0 { + flash0: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "micron,n25q256a", "jedec,spi-nor"; @@ -118,7 +118,7 @@ reg = <0>; }; - flash1: n25q256a@2 { + flash1: flash@2 { #address-cells = <1>; #size-cells = <1>; compatible = "micron,n25q256a", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index dfdca1804f9f..c399919943c3 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -161,12 +161,18 @@ ocram_s: sram@8f8000 { compatible = "mmio-sram"; reg = <0x008f8000 0x4000>; + ranges = <0 0x008f8000 0x4000>; + #address-cells = <1>; + #size-cells = <1>; clocks = <&clks IMX6SX_CLK_OCRAM_S>; }; ocram: sram@900000 { compatible = "mmio-sram"; reg = <0x00900000 0x20000>; + ranges = <0 0x00900000 0x20000>; + #address-cells = <1>; + #size-cells = <1>; clocks = <&clks IMX6SX_CLK_OCRAM>; }; diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi index 64c2d1e9f7fc..71d3c7e05e08 100644 --- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi +++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi @@ -239,7 +239,7 @@ pinctrl-0 = <&pinctrl_qspi>; status = "okay"; - flash0: n25q256a@0 { + flash0: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "micron,n25q256a", "jedec,spi-nor"; diff --git a/arch/arm/boot/dts/imx6ul-kontron-n6310-som.dtsi b/arch/arm/boot/dts/imx6ul-kontron-n6310-som.dtsi index 47d3ce5d255f..acd936540d89 100644 --- a/arch/arm/boot/dts/imx6ul-kontron-n6310-som.dtsi +++ b/arch/arm/boot/dts/imx6ul-kontron-n6310-som.dtsi @@ -19,7 +19,7 @@ }; &qspi { - spi-flash@0 { + flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "spi-nand"; diff --git a/arch/arm/boot/dts/imx6ul-kontron-n6311-som.dtsi b/arch/arm/boot/dts/imx6ul-kontron-n6311-som.dtsi index a095a7654ac6..29ed38dce580 100644 --- a/arch/arm/boot/dts/imx6ul-kontron-n6311-som.dtsi +++ b/arch/arm/boot/dts/imx6ul-kontron-n6311-som.dtsi @@ -18,7 +18,7 @@ }; &qspi { - spi-flash@0 { + flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "spi-nand"; diff --git a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi index 2a449a3c1ae2..09a83dbdf651 100644 --- a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi +++ b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi @@ -19,7 +19,7 @@ pinctrl-0 = <&pinctrl_ecspi2>; status = "okay"; - spi-flash@0 { + flash@0 { compatible = "mxicy,mx25v8035f", "jedec,spi-nor"; spi-max-frequency = <50000000>; reg = <0>; diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi index d7d9f3e46b92..c40684ad11b8 100644 --- a/arch/arm/boot/dts/imx6ul.dtsi +++ b/arch/arm/boot/dts/imx6ul.dtsi @@ -62,20 +62,18 @@ clock-frequency = <696000000>; clock-latency = <61036>; /* two CLK32 periods */ #cooling-cells = <2>; - operating-points = < + operating-points = /* kHz uV */ - 696000 1275000 - 528000 1175000 - 396000 1025000 - 198000 950000 - >; - fsl,soc-operating-points = < + <696000 1275000>, + <528000 1175000>, + <396000 1025000>, + <198000 950000>; + fsl,soc-operating-points = /* KHz uV */ - 696000 1275000 - 528000 1175000 - 396000 1175000 - 198000 1175000 - >; + <696000 1275000>, + <528000 1175000>, + <396000 1175000>, + <198000 1175000>; clocks = <&clks IMX6UL_CLK_ARM>, <&clks IMX6UL_CLK_PLL2_BUS>, <&clks IMX6UL_CLK_PLL2_PFD2>, @@ -147,6 +145,9 @@ ocram: sram@900000 { compatible = "mmio-sram"; reg = <0x00900000 0x20000>; + ranges = <0 0x00900000 0x20000>; + #address-cells = <1>; + #size-cells = <1>; }; intc: interrupt-controller@a01000 { @@ -540,7 +541,7 @@ }; kpp: keypad@20b8000 { - compatible = "fsl,imx6ul-kpp", "fsl,imx6q-kpp", "fsl,imx21-kpp"; + compatible = "fsl,imx6ul-kpp", "fsl,imx21-kpp"; reg = <0x020b8000 0x4000>; interrupts = ; clocks = <&clks IMX6UL_CLK_KPP>; @@ -994,7 +995,7 @@ }; csi: csi@21c4000 { - compatible = "fsl,imx6ul-csi", "fsl,imx7-csi"; + compatible = "fsl,imx6ul-csi"; reg = <0x021c4000 0x4000>; interrupts = ; clocks = <&clks IMX6UL_CLK_CSI>; @@ -1003,7 +1004,7 @@ }; lcdif: lcdif@21c8000 { - compatible = "fsl,imx6ul-lcdif", "fsl,imx28-lcdif"; + compatible = "fsl,imx6ul-lcdif", "fsl,imx6sx-lcdif"; reg = <0x021c8000 0x4000>; interrupts = ; clocks = <&clks IMX6UL_CLK_LCDIF_PIX>, @@ -1024,7 +1025,7 @@ qspi: spi@21e0000 { #address-cells = <1>; #size-cells = <0>; - compatible = "fsl,imx6ul-qspi", "fsl,imx6sx-qspi"; + compatible = "fsl,imx6ul-qspi"; reg = <0x021e0000 0x4000>, <0x60000000 0x10000000>; reg-names = "QuadSPI", "QuadSPI-memory"; interrupts = ; diff --git a/arch/arm/boot/dts/imx6ull-colibri.dtsi b/arch/arm/boot/dts/imx6ull-colibri.dtsi index 4436556624d6..548cfcc7a01d 100644 --- a/arch/arm/boot/dts/imx6ull-colibri.dtsi +++ b/arch/arm/boot/dts/imx6ull-colibri.dtsi @@ -37,7 +37,7 @@ reg_sd1_vmmc: regulator-sd1-vmmc { compatible = "regulator-gpio"; - gpio = <&gpio5 9 GPIO_ACTIVE_HIGH>; + gpios = <&gpio5 9 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_snvs_reg_sd>; regulator-always-on; diff --git a/arch/arm/boot/dts/imx6ull-kontron-n6411-som.dtsi b/arch/arm/boot/dts/imx6ull-kontron-n6411-som.dtsi index b7e984284e1a..d000606c0704 100644 --- a/arch/arm/boot/dts/imx6ull-kontron-n6411-som.dtsi +++ b/arch/arm/boot/dts/imx6ull-kontron-n6411-som.dtsi @@ -18,7 +18,7 @@ }; &qspi { - spi-flash@0 { + flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "spi-nand"; diff --git a/arch/arm/boot/dts/imx7-colibri-aster.dtsi b/arch/arm/boot/dts/imx7-colibri-aster.dtsi index 9fa701bec2ec..139188eb9f40 100644 --- a/arch/arm/boot/dts/imx7-colibri-aster.dtsi +++ b/arch/arm/boot/dts/imx7-colibri-aster.dtsi @@ -99,7 +99,7 @@ reg = <0x4a>; interrupt-parent = <&gpio2>; interrupts = <15 IRQ_TYPE_EDGE_FALLING>; /* SODIMM 107 */ - reset-gpios = <&gpio2 28 GPIO_ACTIVE_HIGH>; /* SODIMM 106 */ + reset-gpios = <&gpio2 28 GPIO_ACTIVE_LOW>; /* SODIMM 106 */ }; /* M41T0M6 real time clock on carrier board */ diff --git a/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi b/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi index 97601375f264..3caf450735d7 100644 --- a/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi +++ b/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi @@ -124,7 +124,7 @@ reg = <0x4a>; interrupt-parent = <&gpio1>; interrupts = <9 IRQ_TYPE_EDGE_FALLING>; /* SODIMM 28 */ - reset-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>; /* SODIMM 30 */ + reset-gpios = <&gpio1 10 GPIO_ACTIVE_LOW>; /* SODIMM 30 */ status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx7d-colibri-emmc.dtsi b/arch/arm/boot/dts/imx7d-colibri-emmc.dtsi index af39e5370fa1..045e4413d339 100644 --- a/arch/arm/boot/dts/imx7d-colibri-emmc.dtsi +++ b/arch/arm/boot/dts/imx7d-colibri-emmc.dtsi @@ -13,6 +13,10 @@ }; }; +&cpu1 { + cpu-supply = <®_DCDC2>; +}; + &gpio6 { gpio-line-names = "", "", diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts index 6823b9f1a2a3..6d562ebe9029 100644 --- a/arch/arm/boot/dts/imx7d-sdb.dts +++ b/arch/arm/boot/dts/imx7d-sdb.dts @@ -199,12 +199,7 @@ interrupt-parent = <&gpio2>; interrupts = <29 0>; pendown-gpio = <&gpio2 29 GPIO_ACTIVE_HIGH>; - ti,x-min = /bits/ 16 <0>; - ti,x-max = /bits/ 16 <0>; - ti,y-min = /bits/ 16 <0>; - ti,y-max = /bits/ 16 <0>; - ti,pressure-max = /bits/ 16 <0>; - ti,x-plate-ohms = /bits/ 16 <400>; + touchscreen-max-pressure = <255>; wakeup-source; }; }; diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi index 84d9cc13afb9..9e1b0af0aa43 100644 --- a/arch/arm/boot/dts/imx7s.dtsi +++ b/arch/arm/boot/dts/imx7s.dtsi @@ -102,6 +102,7 @@ compatible = "usb-nop-xceiv"; clocks = <&clks IMX7D_USB_HSIC_ROOT_CLK>; clock-names = "main_clk"; + power-domains = <&pgc_hsic_phy>; #phy-cells = <0>; }; @@ -1104,7 +1105,6 @@ compatible = "fsl,imx7d-usb", "fsl,imx27-usb"; reg = <0x30b30000 0x200>; interrupts = ; - power-domains = <&pgc_hsic_phy>; clocks = <&clks IMX7D_USB_CTRL_CLK>; fsl,usbphy = <&usbphynop3>; fsl,usbmisc = <&usbmisc3 0>; diff --git a/arch/arm/boot/dts/integratorap.dts b/arch/arm/boot/dts/integratorap.dts index 67d1f9b24a52..8600c0548525 100644 --- a/arch/arm/boot/dts/integratorap.dts +++ b/arch/arm/boot/dts/integratorap.dts @@ -153,6 +153,7 @@ pci: pciv3@62000000 { compatible = "arm,integrator-ap-pci", "v3,v360epc-pci"; + device_type = "pci"; #interrupt-cells = <1>; #size-cells = <2>; #address-cells = <3>; diff --git a/arch/arm/boot/dts/kirkwood-lsxl.dtsi b/arch/arm/boot/dts/kirkwood-lsxl.dtsi index 7b151acb9984..88b70ba1c8fe 100644 --- a/arch/arm/boot/dts/kirkwood-lsxl.dtsi +++ b/arch/arm/boot/dts/kirkwood-lsxl.dtsi @@ -10,6 +10,11 @@ ocp@f1000000 { pinctrl: pin-controller@10000 { + /* Non-default UART pins */ + pmx_uart0: pmx-uart0 { + marvell,pins = "mpp4", "mpp5"; + }; + pmx_power_hdd: pmx-power-hdd { marvell,pins = "mpp10"; marvell,function = "gpo"; @@ -213,22 +218,11 @@ &mdio { status = "okay"; - ethphy0: ethernet-phy@0 { - reg = <0>; - }; - ethphy1: ethernet-phy@8 { reg = <8>; }; }; -ð0 { - status = "okay"; - ethernet0-port@0 { - phy-handle = <ðphy0>; - }; -}; - ð1 { status = "okay"; ethernet1-port@0 { diff --git a/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts index 2a0a98fe67f0..3240c67e0c39 100644 --- a/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts +++ b/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts @@ -11,3 +11,18 @@ model = "LogicPD Zoom OMAP35xx SOM-LV Development Kit"; compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3430", "ti,omap3"; }; + +&omap3_pmx_core2 { + pinctrl-names = "default"; + pinctrl-0 = <&hsusb2_2_pins>; + hsusb2_2_pins: pinmux_hsusb2_2_pins { + pinctrl-single,pins = < + OMAP3430_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */ + OMAP3430_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */ + OMAP3430_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */ + OMAP3430_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */ + OMAP3430_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */ + OMAP3430_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */ + >; + }; +}; diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts index a604d92221a4..c757f0d7781c 100644 --- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts +++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts @@ -11,3 +11,18 @@ model = "LogicPD Zoom DM3730 SOM-LV Development Kit"; compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3630", "ti,omap3"; }; + +&omap3_pmx_core2 { + pinctrl-names = "default"; + pinctrl-0 = <&hsusb2_2_pins>; + hsusb2_2_pins: pinmux_hsusb2_2_pins { + pinctrl-single,pins = < + OMAP3630_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */ + OMAP3630_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */ + OMAP3630_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */ + OMAP3630_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */ + OMAP3630_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */ + OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */ + >; + }; +}; diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi index b56524cc7fe2..55b619c99e24 100644 --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi @@ -265,21 +265,6 @@ }; }; -&omap3_pmx_core2 { - pinctrl-names = "default"; - pinctrl-0 = <&hsusb2_2_pins>; - hsusb2_2_pins: pinmux_hsusb2_2_pins { - pinctrl-single,pins = < - OMAP3630_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */ - OMAP3630_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */ - OMAP3630_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */ - OMAP3630_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */ - OMAP3630_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */ - OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */ - >; - }; -}; - &uart2 { interrupts-extended = <&intc 73 &omap3_pmx_core OMAP3_UART2_RX>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/motorola-mapphone-common.dtsi b/arch/arm/boot/dts/motorola-mapphone-common.dtsi index d5ded4f794df..5f8f77cfbe59 100644 --- a/arch/arm/boot/dts/motorola-mapphone-common.dtsi +++ b/arch/arm/boot/dts/motorola-mapphone-common.dtsi @@ -430,7 +430,7 @@ pinctrl-names = "default"; pinctrl-0 = <&touchscreen_pins>; - reset-gpios = <&gpio6 13 GPIO_ACTIVE_HIGH>; /* gpio173 */ + reset-gpios = <&gpio6 13 GPIO_ACTIVE_LOW>; /* gpio173 */ /* gpio_183 with sys_nirq2 pad as wakeup */ interrupts-extended = <&gpio6 23 IRQ_TYPE_LEVEL_LOW>, diff --git a/arch/arm/boot/dts/moxart-uc7112lx.dts b/arch/arm/boot/dts/moxart-uc7112lx.dts index eb5291b0ee3a..e07b807b4cec 100644 --- a/arch/arm/boot/dts/moxart-uc7112lx.dts +++ b/arch/arm/boot/dts/moxart-uc7112lx.dts @@ -79,7 +79,7 @@ clocks = <&ref12>; }; -&sdhci { +&mmc { status = "okay"; }; diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi index f5f070a87482..764832ddfa78 100644 --- a/arch/arm/boot/dts/moxart.dtsi +++ b/arch/arm/boot/dts/moxart.dtsi @@ -93,8 +93,8 @@ clock-names = "PCLK"; }; - sdhci: sdhci@98e00000 { - compatible = "moxa,moxart-sdhci"; + mmc: mmc@98e00000 { + compatible = "moxa,moxart-mmc"; reg = <0x98e00000 0x5C>; interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk_apb>; diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi index 80c9e5e34136..cc8a378dd076 100644 --- a/arch/arm/boot/dts/omap3-gta04.dtsi +++ b/arch/arm/boot/dts/omap3-gta04.dtsi @@ -31,6 +31,8 @@ aliases { display0 = &lcd; display1 = &tv0; + /delete-property/ mmc2; + /delete-property/ mmc3; }; ldo_3v3: fixedregulator { diff --git a/arch/arm/boot/dts/ox820.dtsi b/arch/arm/boot/dts/ox820.dtsi index 90846a7655b4..dde4364892bf 100644 --- a/arch/arm/boot/dts/ox820.dtsi +++ b/arch/arm/boot/dts/ox820.dtsi @@ -287,7 +287,7 @@ clocks = <&armclk>; }; - gic: gic@1000 { + gic: interrupt-controller@1000 { compatible = "arm,arm11mp-gic"; interrupt-controller; #interrupt-cells = <3>; diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi index dda2ceec6591..ad9b52d53ef9 100644 --- a/arch/arm/boot/dts/qcom-mdm9615.dtsi +++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi @@ -324,6 +324,7 @@ pmicgpio: gpio@150 { compatible = "qcom,pm8018-gpio", "qcom,ssbi-gpio"; + reg = <0x150>; interrupt-controller; #interrupt-cells = <2>; gpio-controller; diff --git a/arch/arm/boot/dts/qcom-pm8841.dtsi b/arch/arm/boot/dts/qcom-pm8841.dtsi index 2fd59c440903..c73e5b149ac5 100644 --- a/arch/arm/boot/dts/qcom-pm8841.dtsi +++ b/arch/arm/boot/dts/qcom-pm8841.dtsi @@ -25,6 +25,7 @@ compatible = "qcom,spmi-temp-alarm"; reg = <0x2400>; interrupts = <4 0x24 0 IRQ_TYPE_EDGE_RISING>; + #thermal-sensor-cells = <0>; }; }; diff --git a/arch/arm/boot/dts/rk3036-evb.dts b/arch/arm/boot/dts/rk3036-evb.dts index 2a7e6624efb9..ea23ba98625e 100644 --- a/arch/arm/boot/dts/rk3036-evb.dts +++ b/arch/arm/boot/dts/rk3036-evb.dts @@ -31,7 +31,7 @@ &i2c1 { status = "okay"; - hym8563: hym8563@51 { + hym8563: rtc@51 { compatible = "haoyu,hym8563"; reg = <0x51>; #clock-cells = <0>; diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts index b0fef82c0a71..39b913f8d701 100644 --- a/arch/arm/boot/dts/rk3188-radxarock.dts +++ b/arch/arm/boot/dts/rk3188-radxarock.dts @@ -67,7 +67,7 @@ #sound-dai-cells = <0>; }; - ir_recv: gpio-ir-receiver { + ir_recv: ir-receiver { compatible = "gpio-ir-receiver"; gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi index 5e3c1038efa8..83c2159a5e88 100644 --- a/arch/arm/boot/dts/rk3188.dtsi +++ b/arch/arm/boot/dts/rk3188.dtsi @@ -413,7 +413,7 @@ rockchip,pins = <2 RK_PD3 1 &pcfg_pull_none>; }; - lcdc1_rgb24: ldcd1-rgb24 { + lcdc1_rgb24: lcdc1-rgb24 { rockchip,pins = <2 RK_PA0 1 &pcfg_pull_none>, <2 RK_PA1 1 &pcfg_pull_none>, <2 RK_PA2 1 &pcfg_pull_none>, @@ -641,7 +641,6 @@ &global_timer { interrupts = ; - status = "disabled"; }; &local_timer { diff --git a/arch/arm/boot/dts/rk3288-evb-act8846.dts b/arch/arm/boot/dts/rk3288-evb-act8846.dts index 3e9393f1c12a..1a9e81236fd9 100644 --- a/arch/arm/boot/dts/rk3288-evb-act8846.dts +++ b/arch/arm/boot/dts/rk3288-evb-act8846.dts @@ -70,7 +70,7 @@ vin-supply = <&vcc_sys>; }; - hym8563: hym8563@51 { + hym8563: rtc@51 { compatible = "haoyu,hym8563"; reg = <0x51>; diff --git a/arch/arm/boot/dts/rk3288-firefly.dtsi b/arch/arm/boot/dts/rk3288-firefly.dtsi index 7fb582302b32..c560afe3af78 100644 --- a/arch/arm/boot/dts/rk3288-firefly.dtsi +++ b/arch/arm/boot/dts/rk3288-firefly.dtsi @@ -233,7 +233,7 @@ vin-supply = <&vcc_sys>; }; - hym8563: hym8563@51 { + hym8563: rtc@51 { compatible = "haoyu,hym8563"; reg = <0x51>; #clock-cells = <0>; diff --git a/arch/arm/boot/dts/rk3288-miqi.dts b/arch/arm/boot/dts/rk3288-miqi.dts index cf54d5ffff2f..fe265a834e8e 100644 --- a/arch/arm/boot/dts/rk3288-miqi.dts +++ b/arch/arm/boot/dts/rk3288-miqi.dts @@ -157,7 +157,7 @@ vin-supply = <&vcc_sys>; }; - hym8563: hym8563@51 { + hym8563: rtc@51 { compatible = "haoyu,hym8563"; reg = <0x51>; #clock-cells = <0>; diff --git a/arch/arm/boot/dts/rk3288-rock2-square.dts b/arch/arm/boot/dts/rk3288-rock2-square.dts index bd6177c3879a..4d7218521244 100644 --- a/arch/arm/boot/dts/rk3288-rock2-square.dts +++ b/arch/arm/boot/dts/rk3288-rock2-square.dts @@ -166,7 +166,7 @@ }; &i2c0 { - hym8563: hym8563@51 { + hym8563: rtc@51 { compatible = "haoyu,hym8563"; reg = <0x51>; #clock-cells = <0>; diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi index e41d0f56b8c5..637ce78f09cd 100644 --- a/arch/arm/boot/dts/rk3xxx.dtsi +++ b/arch/arm/boot/dts/rk3xxx.dtsi @@ -111,6 +111,13 @@ reg = <0x1013c200 0x20>; interrupts = ; clocks = <&cru CORE_PERI>; + status = "disabled"; + /* The clock source and the sched_clock provided by the arm_global_timer + * on Rockchip rk3066a/rk3188 are quite unstable because their rates + * depend on the CPU frequency. + * Keep the arm_global_timer disabled in order to have the + * DW_APB_TIMER (rk3066a) or ROCKCHIP_TIMER (rk3188) selected by default. + */ }; local_timer: local-timer@1013c600 { diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi index bd4450dbdcb6..984bc8dc5e4b 100644 --- a/arch/arm/boot/dts/s5pv210-aries.dtsi +++ b/arch/arm/boot/dts/s5pv210-aries.dtsi @@ -564,7 +564,6 @@ reset-gpios = <&mp05 5 GPIO_ACTIVE_LOW>; vdd3-supply = <&ldo7_reg>; vci-supply = <&ldo17_reg>; - spi-cs-high; spi-max-frequency = <1200000>; pinctrl-names = "default"; @@ -632,12 +631,12 @@ interrupts = <5 IRQ_TYPE_EDGE_FALLING>; pinctrl-names = "default"; pinctrl-0 = <&ts_irq>; - reset-gpios = <&gpj1 3 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpj1 3 GPIO_ACTIVE_LOW>; }; }; &i2s0 { - dmas = <&pdma0 9>, <&pdma0 10>, <&pdma0 11>; + dmas = <&pdma0 10>, <&pdma0 9>, <&pdma0 11>; status = "okay"; }; @@ -896,7 +895,7 @@ device-wakeup-gpios = <&gpg3 4 GPIO_ACTIVE_HIGH>; interrupt-parent = <&gph2>; interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "host-wake"; + interrupt-names = "host-wakeup"; }; }; diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi index 2871351ab907..eb7e3660ada7 100644 --- a/arch/arm/boot/dts/s5pv210.dtsi +++ b/arch/arm/boot/dts/s5pv210.dtsi @@ -240,8 +240,8 @@ reg = <0xeee30000 0x1000>; interrupt-parent = <&vic2>; interrupts = <16>; - dma-names = "rx", "tx", "tx-sec"; - dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>; + dma-names = "tx", "rx", "tx-sec"; + dmas = <&pdma1 10>, <&pdma1 9>, <&pdma1 11>; clock-names = "iis", "i2s_opclk0", "i2s_opclk1"; @@ -260,8 +260,8 @@ reg = <0xe2100000 0x1000>; interrupt-parent = <&vic2>; interrupts = <17>; - dma-names = "rx", "tx"; - dmas = <&pdma1 12>, <&pdma1 13>; + dma-names = "tx", "rx"; + dmas = <&pdma1 13>, <&pdma1 12>; clock-names = "iis", "i2s_opclk0"; clocks = <&clocks CLK_I2S1>, <&clocks SCLK_AUDIO1>; pinctrl-names = "default"; @@ -275,8 +275,8 @@ reg = <0xe2a00000 0x1000>; interrupt-parent = <&vic2>; interrupts = <18>; - dma-names = "rx", "tx"; - dmas = <&pdma1 14>, <&pdma1 15>; + dma-names = "tx", "rx"; + dmas = <&pdma1 15>, <&pdma1 14>; clock-names = "iis", "i2s_opclk0"; clocks = <&clocks CLK_I2S2>, <&clocks SCLK_AUDIO2>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi index 12f57278ba4a..33f76d14341e 100644 --- a/arch/arm/boot/dts/sama5d2.dtsi +++ b/arch/arm/boot/dts/sama5d2.dtsi @@ -1125,7 +1125,7 @@ clocks = <&pmc PMC_TYPE_PERIPHERAL 55>, <&pmc PMC_TYPE_GCK 55>; clock-names = "pclk", "gclk"; assigned-clocks = <&pmc PMC_TYPE_CORE PMC_I2S1_MUX>; - assigned-parrents = <&pmc PMC_TYPE_GCK 55>; + assigned-clock-parents = <&pmc PMC_TYPE_GCK 55>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/stm32mp151.dtsi b/arch/arm/boot/dts/stm32mp151.dtsi index 7a0ef01de969..9919fc86bdc3 100644 --- a/arch/arm/boot/dts/stm32mp151.dtsi +++ b/arch/arm/boot/dts/stm32mp151.dtsi @@ -543,7 +543,7 @@ compatible = "st,stm32-cec"; reg = <0x40016000 0x400>; interrupts = ; - clocks = <&rcc CEC_K>, <&clk_lse>; + clocks = <&rcc CEC_K>, <&rcc CEC>; clock-names = "cec", "hdmi-cec"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi index 944d38b85eef..f3e0c790a4b1 100644 --- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi +++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi @@ -141,6 +141,7 @@ compatible = "snps,dwmac-mdio"; reset-gpios = <&gpioz 2 GPIO_ACTIVE_LOW>; reset-delay-us = <1000>; + reset-post-delay-us = <1000>; phy0: ethernet-phy@7 { reg = <7>; diff --git a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts index f19ed981da9d..3706216ffb40 100644 --- a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts +++ b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts @@ -169,7 +169,7 @@ flash@0 { #address-cells = <1>; #size-cells = <1>; - compatible = "mxicy,mx25l1606e", "winbond,w25q128"; + compatible = "mxicy,mx25l1606e", "jedec,spi-nor"; reg = <0>; spi-max-frequency = <40000000>; }; diff --git a/arch/arm/boot/dts/suniv-f1c100s.dtsi b/arch/arm/boot/dts/suniv-f1c100s.dtsi index 6100d3b75f61..def830101448 100644 --- a/arch/arm/boot/dts/suniv-f1c100s.dtsi +++ b/arch/arm/boot/dts/suniv-f1c100s.dtsi @@ -104,8 +104,10 @@ wdt: watchdog@1c20ca0 { compatible = "allwinner,suniv-f1c100s-wdt", - "allwinner,sun4i-a10-wdt"; + "allwinner,sun6i-a31-wdt"; reg = <0x01c20ca0 0x20>; + interrupts = <16>; + clocks = <&osc32k>; }; uart0: serial@1c25000 { diff --git a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts index 5dbfb83c1b06..ce87e1ec10dc 100644 --- a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts +++ b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts @@ -446,7 +446,7 @@ interrupt-parent = <&gpio>; interrupts = ; - reset-gpios = <&gpio TEGRA_GPIO(Q, 7) GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio TEGRA_GPIO(Q, 7) GPIO_ACTIVE_LOW>; vdda-supply = <&vdd_3v3_sys>; vdd-supply = <&vdd_3v3_sys>; diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi index e81e5937a60a..03301ddb3403 100644 --- a/arch/arm/boot/dts/uniphier-pxs2.dtsi +++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi @@ -597,8 +597,8 @@ compatible = "socionext,uniphier-dwc3", "snps,dwc3"; status = "disabled"; reg = <0x65a00000 0xcd00>; - interrupt-names = "host", "peripheral"; - interrupts = <0 134 4>, <0 135 4>; + interrupt-names = "dwc_usb3"; + interrupts = <0 134 4>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usb0>, <&pinctrl_usb2>; clock-names = "ref", "bus_early", "suspend"; @@ -693,8 +693,8 @@ compatible = "socionext,uniphier-dwc3", "snps,dwc3"; status = "disabled"; reg = <0x65c00000 0xcd00>; - interrupt-names = "host", "peripheral"; - interrupts = <0 137 4>, <0 138 4>; + interrupt-names = "dwc_usb3"; + interrupts = <0 137 4>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usb1>, <&pinctrl_usb3>; clock-names = "ref", "bus_early", "suspend"; diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 8f26c454ea12..1615597a0438 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += blake2s-arm.o +obj-$(if $(CONFIG_CRYPTO_BLAKE2S_ARM),y) += libblake2s-arm.o obj-$(CONFIG_CRYPTO_BLAKE2B_NEON) += blake2b-neon.o obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o @@ -31,7 +32,8 @@ sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y) sha512-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha512-neon-glue.o sha512-arm-y := sha512-core.o sha512-glue.o $(sha512-arm-neon-y) -blake2s-arm-y := blake2s-core.o blake2s-glue.o +blake2s-arm-y := blake2s-shash.o +libblake2s-arm-y:= blake2s-core.o blake2s-glue.o blake2b-neon-y := blake2b-neon-core.o blake2b-neon-glue.o sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/crypto/blake2s-core.S index 86345751bbf3..df40e46601f1 100644 --- a/arch/arm/crypto/blake2s-core.S +++ b/arch/arm/crypto/blake2s-core.S @@ -167,8 +167,8 @@ .endm // -// void blake2s_compress_arch(struct blake2s_state *state, -// const u8 *block, size_t nblocks, u32 inc); +// void blake2s_compress(struct blake2s_state *state, +// const u8 *block, size_t nblocks, u32 inc); // // Only the first three fields of struct blake2s_state are used: // u32 h[8]; (inout) @@ -176,7 +176,7 @@ // u32 f[2]; (in) // .align 5 -ENTRY(blake2s_compress_arch) +ENTRY(blake2s_compress) push {r0-r2,r4-r11,lr} // keep this an even number .Lnext_block: @@ -303,4 +303,4 @@ ENTRY(blake2s_compress_arch) str r3, [r12], #4 bne 1b b .Lcopy_block_done -ENDPROC(blake2s_compress_arch) +ENDPROC(blake2s_compress) diff --git a/arch/arm/crypto/blake2s-glue.c b/arch/arm/crypto/blake2s-glue.c index f2cc1e5fc9ec..0238a70d9581 100644 --- a/arch/arm/crypto/blake2s-glue.c +++ b/arch/arm/crypto/blake2s-glue.c @@ -1,78 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* - * BLAKE2s digest algorithm, ARM scalar implementation - * - * Copyright 2020 Google LLC - */ #include -#include - #include /* defined in blake2s-core.S */ -EXPORT_SYMBOL(blake2s_compress_arch); - -static int crypto_blake2s_update_arm(struct shash_desc *desc, - const u8 *in, unsigned int inlen) -{ - return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch); -} - -static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out) -{ - return crypto_blake2s_final(desc, out, blake2s_compress_arch); -} - -#define BLAKE2S_ALG(name, driver_name, digest_size) \ - { \ - .base.cra_name = name, \ - .base.cra_driver_name = driver_name, \ - .base.cra_priority = 200, \ - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \ - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \ - .base.cra_module = THIS_MODULE, \ - .digestsize = digest_size, \ - .setkey = crypto_blake2s_setkey, \ - .init = crypto_blake2s_init, \ - .update = crypto_blake2s_update_arm, \ - .final = crypto_blake2s_final_arm, \ - .descsize = sizeof(struct blake2s_state), \ - } - -static struct shash_alg blake2s_arm_algs[] = { - BLAKE2S_ALG("blake2s-128", "blake2s-128-arm", BLAKE2S_128_HASH_SIZE), - BLAKE2S_ALG("blake2s-160", "blake2s-160-arm", BLAKE2S_160_HASH_SIZE), - BLAKE2S_ALG("blake2s-224", "blake2s-224-arm", BLAKE2S_224_HASH_SIZE), - BLAKE2S_ALG("blake2s-256", "blake2s-256-arm", BLAKE2S_256_HASH_SIZE), -}; - -static int __init blake2s_arm_mod_init(void) -{ - return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? - crypto_register_shashes(blake2s_arm_algs, - ARRAY_SIZE(blake2s_arm_algs)) : 0; -} - -static void __exit blake2s_arm_mod_exit(void) -{ - if (IS_REACHABLE(CONFIG_CRYPTO_HASH)) - crypto_unregister_shashes(blake2s_arm_algs, - ARRAY_SIZE(blake2s_arm_algs)); -} - -module_init(blake2s_arm_mod_init); -module_exit(blake2s_arm_mod_exit); - -MODULE_DESCRIPTION("BLAKE2s digest algorithm, ARM scalar implementation"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Eric Biggers "); -MODULE_ALIAS_CRYPTO("blake2s-128"); -MODULE_ALIAS_CRYPTO("blake2s-128-arm"); -MODULE_ALIAS_CRYPTO("blake2s-160"); -MODULE_ALIAS_CRYPTO("blake2s-160-arm"); -MODULE_ALIAS_CRYPTO("blake2s-224"); -MODULE_ALIAS_CRYPTO("blake2s-224-arm"); -MODULE_ALIAS_CRYPTO("blake2s-256"); -MODULE_ALIAS_CRYPTO("blake2s-256-arm"); +EXPORT_SYMBOL(blake2s_compress); diff --git a/arch/arm/crypto/blake2s-shash.c b/arch/arm/crypto/blake2s-shash.c new file mode 100644 index 000000000000..763c73beea2d --- /dev/null +++ b/arch/arm/crypto/blake2s-shash.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * BLAKE2s digest algorithm, ARM scalar implementation + * + * Copyright 2020 Google LLC + */ + +#include +#include + +#include + +static int crypto_blake2s_update_arm(struct shash_desc *desc, + const u8 *in, unsigned int inlen) +{ + return crypto_blake2s_update(desc, in, inlen, false); +} + +static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out) +{ + return crypto_blake2s_final(desc, out, false); +} + +#define BLAKE2S_ALG(name, driver_name, digest_size) \ + { \ + .base.cra_name = name, \ + .base.cra_driver_name = driver_name, \ + .base.cra_priority = 200, \ + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ + .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \ + .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \ + .base.cra_module = THIS_MODULE, \ + .digestsize = digest_size, \ + .setkey = crypto_blake2s_setkey, \ + .init = crypto_blake2s_init, \ + .update = crypto_blake2s_update_arm, \ + .final = crypto_blake2s_final_arm, \ + .descsize = sizeof(struct blake2s_state), \ + } + +static struct shash_alg blake2s_arm_algs[] = { + BLAKE2S_ALG("blake2s-128", "blake2s-128-arm", BLAKE2S_128_HASH_SIZE), + BLAKE2S_ALG("blake2s-160", "blake2s-160-arm", BLAKE2S_160_HASH_SIZE), + BLAKE2S_ALG("blake2s-224", "blake2s-224-arm", BLAKE2S_224_HASH_SIZE), + BLAKE2S_ALG("blake2s-256", "blake2s-256-arm", BLAKE2S_256_HASH_SIZE), +}; + +static int __init blake2s_arm_mod_init(void) +{ + return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? + crypto_register_shashes(blake2s_arm_algs, + ARRAY_SIZE(blake2s_arm_algs)) : 0; +} + +static void __exit blake2s_arm_mod_exit(void) +{ + if (IS_REACHABLE(CONFIG_CRYPTO_HASH)) + crypto_unregister_shashes(blake2s_arm_algs, + ARRAY_SIZE(blake2s_arm_algs)); +} + +module_init(blake2s_arm_mod_init); +module_exit(blake2s_arm_mod_exit); + +MODULE_DESCRIPTION("BLAKE2s digest algorithm, ARM scalar implementation"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Eric Biggers "); +MODULE_ALIAS_CRYPTO("blake2s-128"); +MODULE_ALIAS_CRYPTO("blake2s-128-arm"); +MODULE_ALIAS_CRYPTO("blake2s-160"); +MODULE_ALIAS_CRYPTO("blake2s-160-arm"); +MODULE_ALIAS_CRYPTO("blake2s-224"); +MODULE_ALIAS_CRYPTO("blake2s-224-arm"); +MODULE_ALIAS_CRYPTO("blake2s-256"); +MODULE_ALIAS_CRYPTO("blake2s-256-arm"); diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index a81dda65c576..45180a2cc47c 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -10,7 +10,7 @@ #else #define MAX_DMA_ADDRESS ({ \ extern phys_addr_t arm_dma_zone_size; \ - arm_dma_zone_size && arm_dma_zone_size < (0x10000000 - PAGE_OFFSET) ? \ + arm_dma_zone_size && arm_dma_zone_size < (0x100000000ULL - PAGE_OFFSET) ? \ (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; }) #endif diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index ab2b654084fa..b13e8a6c14b0 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -442,6 +442,9 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); extern int valid_phys_addr_range(phys_addr_t addr, size_t size); extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); extern int devmem_is_allowed(unsigned long pfn); +extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, + unsigned long flags); +#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap #endif /* diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index 92282558caf7..2b8970d8e5a2 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -27,6 +27,7 @@ enum { MT_HIGH_VECTORS, MT_MEMORY_RWX, MT_MEMORY_RW, + MT_MEMORY_RO, MT_ROM, MT_MEMORY_RWX_NONCACHED, MT_MEMORY_RW_DTCM, diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index fe87397c3d8c..bdbc1e590891 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -17,7 +17,7 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); #define perf_arch_fetch_caller_regs(regs, __ip) { \ (regs)->ARM_pc = (__ip); \ - (regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \ + frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \ (regs)->ARM_sp = current_stack_pointer; \ (regs)->ARM_cpsr = SVC_MODE; \ } diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index d16aba48fa0a..090011394477 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h @@ -44,12 +44,6 @@ typedef pte_t *pte_addr_t; -/* - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -#define ZERO_PAGE(vaddr) (virt_to_page(0)) - /* * Mark the prot value as uncacheable and unbufferable. */ diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index c02f24400369..d38d503493cb 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -10,6 +10,15 @@ #include #include +#ifndef __ASSEMBLY__ +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern struct page *empty_zero_page; +#define ZERO_PAGE(vaddr) (empty_zero_page) +#endif + #ifndef CONFIG_MMU #include @@ -156,13 +165,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, #define __S111 __PAGE_SHARED_EXEC #ifndef __ASSEMBLY__ -/* - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -extern struct page *empty_zero_page; -#define ZERO_PAGE(vaddr) (empty_zero_page) - extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 91d6b7856be4..73c83f4d33b3 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -164,5 +164,31 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) ((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \ }) + +/* + * Update ITSTATE after normal execution of an IT block instruction. + * + * The 8 IT state bits are split into two parts in CPSR: + * ITSTATE<1:0> are in CPSR<26:25> + * ITSTATE<7:2> are in CPSR<15:10> + */ +static inline unsigned long it_advance(unsigned long cpsr) +{ + if ((cpsr & 0x06000400) == 0) { + /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */ + cpsr &= ~PSR_IT_MASK; + } else { + /* We need to shift left ITSTATE<4:0> */ + const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */ + unsigned long it = cpsr & mask; + it <<= 1; + it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */ + it &= mask; + cpsr &= ~mask; + cpsr |= it; + } + return cpsr; +} + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 536b6b979f63..eb7ce2747eb0 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -126,6 +126,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, * thread information flags: * TIF_USEDFPU - FPU was used by this task this quantum (SMP) * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED + * + * Any bit in the range of 0..15 will cause do_work_pending() to be invoked. */ #define TIF_SIGPENDING 0 /* signal pending */ #define TIF_NEED_RESCHED 1 /* rescheduling necessary */ @@ -135,6 +137,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ #define TIF_SECCOMP 7 /* seccomp syscall filtering active */ +#define TIF_NOTIFY_SIGNAL 8 /* signal notifications exist */ #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ @@ -148,6 +151,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) /* Checks for any syscall work in entry-common.S */ @@ -158,7 +162,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, * Change these and you break ASM code in entry-common.S */ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ - _TIF_NOTIFY_RESUME | _TIF_UPROBE) + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_NOTIFY_SIGNAL) #endif /* __KERNEL__ */ #endif /* __ASM_ARM_THREAD_INFO_H */ diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h index 7c3b3671d6c2..6d1337c169cd 100644 --- a/arch/arm/include/asm/timex.h +++ b/arch/arm/include/asm/timex.h @@ -11,5 +11,6 @@ typedef unsigned long cycles_t; #define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; }) +#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback()) #endif diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index c3ebe3584103..030351d169aa 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -1043,7 +1043,7 @@ vector_bhb_loop8_\name: @ bhb workaround mov r0, #8 -3: b . + 4 +3: W(b) . + 4 subs r0, r0, #1 bne 3b dsb diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index bd619da73c84..9b3c737575e9 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -53,7 +53,7 @@ __ret_fast_syscall: cmp r2, #TASK_SIZE blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK + movs r1, r1, lsl #16 bne fast_work_pending @@ -90,7 +90,7 @@ __ret_fast_syscall: cmp r2, #TASK_SIZE blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK + movs r1, r1, lsl #16 beq no_work_pending UNWIND(.fnend ) ENDPROC(ret_fast_syscall) @@ -131,7 +131,7 @@ ENTRY(ret_to_user_from_irq) cmp r2, #TASK_SIZE blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] - tst r1, #_TIF_WORK_MASK + movs r1, r1, lsl #16 bne slow_work_pending no_work_pending: asm_trace_hardirqs_on save = 0 diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S index de1f20624be1..d0e898608d30 100644 --- a/arch/arm/kernel/entry-v7m.S +++ b/arch/arm/kernel/entry-v7m.S @@ -59,7 +59,7 @@ __irq_entry: get_thread_info tsk ldr r2, [tsk, #TI_FLAGS] - tst r2, #_TIF_WORK_MASK + movs r2, r2, lsl #16 beq 2f @ no work pending mov r0, #V7M_SCB_ICSR_PENDSVSET str r0, [r1, V7M_SCB_ICSR] @ raise PendSV diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 75cfb105a8db..47a30ff88839 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -245,7 +245,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, thread->cpu_domain = get_domain(); #endif - if (likely(!(p->flags & PF_KTHREAD))) { + if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) { *childregs = *current_pt_regs(); childregs->ARM_r0 = 0; if (stack_start) diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 2f81d3af5f9a..a3a38d0a4c85 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -655,7 +655,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) if (unlikely(!user_mode(regs))) return 0; local_irq_enable(); - if (thread_flags & _TIF_SIGPENDING) { + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { int restart = do_signal(regs, syscall); if (unlikely(restart)) { /* diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index db798eac7431..824774999825 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -53,17 +53,17 @@ int notrace unwind_frame(struct stackframe *frame) return -EINVAL; frame->sp = frame->fp; - frame->fp = *(unsigned long *)(fp); - frame->pc = *(unsigned long *)(fp + 4); + frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); + frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4)); #else /* check current frame pointer is within bounds */ if (fp < low + 12 || fp > high - 4) return -EINVAL; /* restore the registers from the stack frame */ - frame->fp = *(unsigned long *)(fp - 12); - frame->sp = *(unsigned long *)(fp - 8); - frame->pc = *(unsigned long *)(fp - 4); + frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12)); + frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8)); + frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4)); #endif return 0; diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S index b5e8b9ae4c7d..7fd3600db8ef 100644 --- a/arch/arm/lib/findbit.S +++ b/arch/arm/lib/findbit.S @@ -40,8 +40,8 @@ ENDPROC(_find_first_zero_bit_le) * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset) */ ENTRY(_find_next_zero_bit_le) - teq r1, #0 - beq 3b + cmp r2, r1 + bhs 3b ands ip, r2, #7 beq 1b @ If new byte, goto old routine ARM( ldrb r3, [r0, r2, lsr #3] ) @@ -81,8 +81,8 @@ ENDPROC(_find_first_bit_le) * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset) */ ENTRY(_find_next_bit_le) - teq r1, #0 - beq 3b + cmp r2, r1 + bhs 3b ands ip, r2, #7 beq 1b @ If new byte, goto old routine ARM( ldrb r3, [r0, r2, lsr #3] ) @@ -115,8 +115,8 @@ ENTRY(_find_first_zero_bit_be) ENDPROC(_find_first_zero_bit_be) ENTRY(_find_next_zero_bit_be) - teq r1, #0 - beq 3b + cmp r2, r1 + bhs 3b ands ip, r2, #7 beq 1b @ If new byte, goto old routine eor r3, r2, #0x18 @ big endian byte ordering @@ -149,8 +149,8 @@ ENTRY(_find_first_bit_be) ENDPROC(_find_first_bit_be) ENTRY(_find_next_bit_be) - teq r1, #0 - beq 3b + cmp r2, r1 + bhs 3b ands ip, r2, #7 beq 1b @ If new byte, goto old routine eor r3, r2, #0x18 @ big endian byte ordering diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c index b99dd8e1c93f..7ba6cf826162 100644 --- a/arch/arm/lib/xor-neon.c +++ b/arch/arm/lib/xor-neon.c @@ -26,8 +26,9 @@ MODULE_LICENSE("GPL"); * While older versions of GCC do not generate incorrect code, they fail to * recognize the parallel nature of these functions, and emit plain ARM code, * which is known to be slower than the optimized ARM code in asm-arm/xor.h. + * + * #warning This code requires at least version 4.6 of GCC */ -#warning This code requires at least version 4.6 of GCC #endif #pragma GCC diagnostic ignored "-Wunused-variable" diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 3f015cb6ec2b..f2ce2d094925 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -104,7 +104,7 @@ static const struct wakeup_source_info ws_info[] = { static const struct of_device_id sama5d2_ws_ids[] = { { .compatible = "atmel,sama5d2-gem", .data = &ws_info[0] }, - { .compatible = "atmel,at91rm9200-rtc", .data = &ws_info[1] }, + { .compatible = "atmel,sama5d2-rtc", .data = &ws_info[1] }, { .compatible = "atmel,sama5d3-udc", .data = &ws_info[2] }, { .compatible = "atmel,at91rm9200-ohci", .data = &ws_info[2] }, { .compatible = "usb-ohci", .data = &ws_info[2] }, @@ -115,12 +115,12 @@ static const struct of_device_id sama5d2_ws_ids[] = { }; static const struct of_device_id sam9x60_ws_ids[] = { - { .compatible = "atmel,at91sam9x5-rtc", .data = &ws_info[1] }, + { .compatible = "microchip,sam9x60-rtc", .data = &ws_info[1] }, { .compatible = "atmel,at91rm9200-ohci", .data = &ws_info[2] }, { .compatible = "usb-ohci", .data = &ws_info[2] }, { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] }, { .compatible = "usb-ehci", .data = &ws_info[2] }, - { .compatible = "atmel,at91sam9260-rtt", .data = &ws_info[4] }, + { .compatible = "microchip,sam9x60-rtt", .data = &ws_info[4] }, { .compatible = "cdns,sam9x60-macb", .data = &ws_info[5] }, { /* sentinel */ } }; diff --git a/arch/arm/mach-axxia/platsmp.c b/arch/arm/mach-axxia/platsmp.c index 512943eae30a..2e203626eda5 100644 --- a/arch/arm/mach-axxia/platsmp.c +++ b/arch/arm/mach-axxia/platsmp.c @@ -39,6 +39,7 @@ static int axxia_boot_secondary(unsigned int cpu, struct task_struct *idle) return -ENOENT; syscon = of_iomap(syscon_np, 0); + of_node_put(syscon_np); if (!syscon) return -ENOMEM; diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index ae790908fc74..9b594ae98153 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig @@ -211,7 +211,6 @@ config ARCH_BRCMSTB select BCM7038_L1_IRQ select BRCMSTB_L2_IRQ select BCM7120_L2_IRQ - select ARCH_HAS_HOLES_MEMORYMODEL select ZONE_DMA if ARM_LPAE select SOC_BRCMSTB select SOC_BUS diff --git a/arch/arm/mach-bcm/bcm_kona_smc.c b/arch/arm/mach-bcm/bcm_kona_smc.c index 43a16f922b53..513efea655ba 100644 --- a/arch/arm/mach-bcm/bcm_kona_smc.c +++ b/arch/arm/mach-bcm/bcm_kona_smc.c @@ -54,6 +54,7 @@ int __init bcm_kona_smc_init(void) return -ENODEV; prop_val = of_get_address(node, 0, &prop_size, NULL); + of_node_put(node); if (!prop_val) return -EINVAL; diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c index e4f4b20b83a2..3fc4ec830e3a 100644 --- a/arch/arm/mach-cns3xxx/core.c +++ b/arch/arm/mach-cns3xxx/core.c @@ -372,6 +372,7 @@ static void __init cns3xxx_init(void) /* De-Asscer SATA Reset */ cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SATA)); } + of_node_put(dn); dn = of_find_compatible_node(NULL, NULL, "cavium,cns3420-sdhci"); if (of_device_is_available(dn)) { @@ -385,6 +386,7 @@ static void __init cns3xxx_init(void) cns3xxx_pwr_clk_en(CNS3XXX_PWR_CLK_EN(SDIO)); cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SDIO)); } + of_node_put(dn); pm_power_off = cns3xxx_power_off; diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig index f56ff8c24043..de11030748d0 100644 --- a/arch/arm/mach-davinci/Kconfig +++ b/arch/arm/mach-davinci/Kconfig @@ -5,7 +5,6 @@ menuconfig ARCH_DAVINCI depends on ARCH_MULTI_V5 select DAVINCI_TIMER select ZONE_DMA - select ARCH_HAS_HOLES_MEMORYMODEL select PM_GENERIC_DOMAINS if PM select PM_GENERIC_DOMAINS_OF if PM && OF select REGMAP_MMIO diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index 428012687a80..7f7f6bae21c2 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -1101,11 +1101,13 @@ static int __init da850_evm_config_emac(void) int ret; u32 val; struct davinci_soc_info *soc_info = &davinci_soc_info; - u8 rmii_en = soc_info->emac_pdata->rmii_en; + u8 rmii_en; if (!machine_is_davinci_da850_evm()) return 0; + rmii_en = soc_info->emac_pdata->rmii_en; + cfg_chip3_base = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG); val = __raw_readl(cfg_chip3_base); diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index d2d249706ebb..b5df98ee5d17 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -8,7 +8,6 @@ menuconfig ARCH_EXYNOS bool "Samsung Exynos" depends on ARCH_MULTI_V7 - select ARCH_HAS_HOLES_MEMORYMODEL select ARCH_SUPPORTS_BIG_ENDIAN select ARM_AMBA select ARM_GIC @@ -20,7 +19,6 @@ menuconfig ARCH_EXYNOS select EXYNOS_PMU select EXYNOS_SROM select EXYNOS_PM_DOMAINS if PM_GENERIC_DOMAINS - select GPIOLIB select HAVE_ARM_ARCH_TIMER if ARCH_EXYNOS5 select HAVE_ARM_SCU if SMP select HAVE_S3C2410_I2C if I2C diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c index 83d1d1327f96..1276585f72c5 100644 --- a/arch/arm/mach-exynos/exynos.c +++ b/arch/arm/mach-exynos/exynos.c @@ -149,6 +149,7 @@ static void exynos_map_pmu(void) np = of_find_matching_node(NULL, exynos_dt_pmu_match); if (np) pmu_base_addr = of_iomap(np, 0); + of_node_put(np); } static void __init exynos_init_irq(void) diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig index 1bc68913d62c..9de38ce8124f 100644 --- a/arch/arm/mach-highbank/Kconfig +++ b/arch/arm/mach-highbank/Kconfig @@ -2,7 +2,6 @@ config ARCH_HIGHBANK bool "Calxeda ECX-1000/2000 (Highbank/Midway)" depends on ARCH_MULTI_V7 - select ARCH_HAS_HOLES_MEMORYMODEL select ARCH_SUPPORTS_BIG_ENDIAN select ARM_AMBA select ARM_ERRATA_764369 if SMP diff --git a/arch/arm/mach-hisi/platsmp.c b/arch/arm/mach-hisi/platsmp.c index da7a09c1dae5..1cd1d9b0aabf 100644 --- a/arch/arm/mach-hisi/platsmp.c +++ b/arch/arm/mach-hisi/platsmp.c @@ -67,14 +67,17 @@ static void __init hi3xxx_smp_prepare_cpus(unsigned int max_cpus) } ctrl_base = of_iomap(np, 0); if (!ctrl_base) { + of_node_put(np); pr_err("failed to map address\n"); return; } if (of_property_read_u32(np, "smp-offset", &offset) < 0) { + of_node_put(np); pr_err("failed to find smp-offset property\n"); return; } ctrl_base += offset; + of_node_put(np); } } @@ -160,6 +163,7 @@ static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle) if (WARN_ON(!node)) return -1; ctrl_base = of_iomap(node, 0); + of_node_put(node); /* set the secondary core boot from DDR */ remap_reg_value = readl_relaxed(ctrl_base + REG_SC_CTRL); diff --git a/arch/arm/mach-mediatek/Kconfig b/arch/arm/mach-mediatek/Kconfig index 9e0f592d87d8..35a3430c7942 100644 --- a/arch/arm/mach-mediatek/Kconfig +++ b/arch/arm/mach-mediatek/Kconfig @@ -30,6 +30,7 @@ config MACH_MT7623 config MACH_MT7629 bool "MediaTek MT7629 SoCs support" default ARCH_MEDIATEK + select HAVE_ARM_ARCH_TIMER config MACH_MT8127 bool "MediaTek MT8127 SoCs support" diff --git a/arch/arm/mach-meson/platsmp.c b/arch/arm/mach-meson/platsmp.c index 4b8ad728bb42..32ac60b89fdc 100644 --- a/arch/arm/mach-meson/platsmp.c +++ b/arch/arm/mach-meson/platsmp.c @@ -71,6 +71,7 @@ static void __init meson_smp_prepare_cpus(const char *scu_compatible, } sram_base = of_iomap(node, 0); + of_node_put(node); if (!sram_base) { pr_err("Couldn't map SRAM registers\n"); return; @@ -91,6 +92,7 @@ static void __init meson_smp_prepare_cpus(const char *scu_compatible, } scu_base = of_iomap(node, 0); + of_node_put(node); if (!scu_base) { pr_err("Couldn't map SCU registers\n"); return; diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c index c109f47e9cbc..a687e83ad604 100644 --- a/arch/arm/mach-mxs/mach-mxs.c +++ b/arch/arm/mach-mxs/mach-mxs.c @@ -387,8 +387,10 @@ static void __init mxs_machine_init(void) root = of_find_node_by_path("/"); ret = of_property_read_string(root, "model", &soc_dev_attr->machine); - if (ret) + if (ret) { + kfree(soc_dev_attr); return; + } soc_dev_attr->family = "Freescale MXS Family"; soc_dev_attr->soc_id = mxs_get_soc_id(); diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c index bd5be82101f3..d89bda12bf3c 100644 --- a/arch/arm/mach-omap1/clock.c +++ b/arch/arm/mach-omap1/clock.c @@ -41,7 +41,7 @@ static DEFINE_SPINLOCK(clockfw_lock); unsigned long omap1_uart_recalc(struct clk *clk) { unsigned int val = __raw_readl(clk->enable_reg); - return val & clk->enable_bit ? 48000000 : 12000000; + return val & 1 << clk->enable_bit ? 48000000 : 12000000; } unsigned long omap1_sossi_recalc(struct clk *clk) diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 3f62a0c9450d..164985505f9e 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -93,7 +93,6 @@ config SOC_DRA7XX config ARCH_OMAP2PLUS bool select ARCH_HAS_BANDGAP - select ARCH_HAS_HOLES_MEMORYMODEL select ARCH_HAS_RESET_CONTROLLER select ARCH_OMAP select CLKSRC_MMIO diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 6098666e928d..f24d4e56ddfc 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -211,6 +211,7 @@ static int __init omapdss_init_fbdev(void) node = of_find_node_by_name(NULL, "omap4_padconf_global"); if (node) omap4_dsi_mux_syscon = syscon_node_to_regmap(node); + of_node_put(node); return 0; } @@ -259,11 +260,13 @@ static int __init omapdss_init_of(void) if (!pdev) { pr_err("Unable to find DSS platform device\n"); + of_node_put(node); return -ENODEV; } r = of_platform_populate(node, NULL, NULL, &pdev->dev); put_device(&pdev->dev); + of_node_put(node); if (r) { pr_err("Unable to populate DSS submodule devices\n"); return r; diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 5c3845730dbf..0b80f8bcd304 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -314,10 +314,12 @@ void __init omap_gic_of_init(void) np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-gic"); gic_dist_base_addr = of_iomap(np, 0); + of_node_put(np); WARN_ON(!gic_dist_base_addr); np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-twd-timer"); twd_base = of_iomap(np, 0); + of_node_put(np); WARN_ON(!twd_base); skip_errata_init: diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c index 1b442b128569..63e73e9b82bc 100644 --- a/arch/arm/mach-omap2/prm3xxx.c +++ b/arch/arm/mach-omap2/prm3xxx.c @@ -708,6 +708,7 @@ static int omap3xxx_prm_late_init(void) } irq_num = of_irq_get(np, 0); + of_node_put(np); if (irq_num == -EPROBE_DEFER) return irq_num; diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c index 2e35354b61f5..167e871f059e 100644 --- a/arch/arm/mach-pxa/cm-x300.c +++ b/arch/arm/mach-pxa/cm-x300.c @@ -354,13 +354,13 @@ static struct platform_device cm_x300_spi_gpio = { static struct gpiod_lookup_table cm_x300_spi_gpiod_table = { .dev_id = "spi_gpio", .table = { - GPIO_LOOKUP("gpio-pxa", GPIO_LCD_SCL, + GPIO_LOOKUP("pca9555.1", GPIO_LCD_SCL - GPIO_LCD_BASE, "sck", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("gpio-pxa", GPIO_LCD_DIN, + GPIO_LOOKUP("pca9555.1", GPIO_LCD_DIN - GPIO_LCD_BASE, "mosi", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("gpio-pxa", GPIO_LCD_DOUT, + GPIO_LOOKUP("pca9555.1", GPIO_LCD_DOUT - GPIO_LCD_BASE, "miso", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("gpio-pxa", GPIO_LCD_CS, + GPIO_LOOKUP("pca9555.1", GPIO_LCD_CS - GPIO_LCD_BASE, "cs", GPIO_ACTIVE_HIGH), { }, }, diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c index cd9fa465b9b2..9aee8e0f2bb1 100644 --- a/arch/arm/mach-pxa/magician.c +++ b/arch/arm/mach-pxa/magician.c @@ -681,7 +681,7 @@ static struct platform_device bq24022 = { static struct gpiod_lookup_table bq24022_gpiod_table = { .dev_id = "gpio-regulator", .table = { - GPIO_LOOKUP("gpio-pxa", EGPIO_MAGICIAN_BQ24022_ISET2, + GPIO_LOOKUP("htc-egpio-0", EGPIO_MAGICIAN_BQ24022_ISET2 - MAGICIAN_EGPIO_BASE, NULL, GPIO_ACTIVE_HIGH), GPIO_LOOKUP("gpio-pxa", GPIO30_MAGICIAN_BQ24022_nCHARGE_EN, "enable", GPIO_ACTIVE_LOW), diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 431709725d02..ded5e343e198 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -296,9 +296,9 @@ static struct gpiod_lookup_table tosa_mci_gpio_table = { .table = { GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_nSD_DETECT, "cd", GPIO_ACTIVE_LOW), - GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_SD_WP, + GPIO_LOOKUP("sharp-scoop.0", TOSA_GPIO_SD_WP - TOSA_SCOOP_GPIO_BASE, "wp", GPIO_ACTIVE_LOW), - GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_PWR_ON, + GPIO_LOOKUP("sharp-scoop.0", TOSA_GPIO_PWR_ON - TOSA_SCOOP_GPIO_BASE, "power", GPIO_ACTIVE_HIGH), { }, }, diff --git a/arch/arm/mach-s5pv210/Kconfig b/arch/arm/mach-s5pv210/Kconfig index 95d4e8284866..d644b45bc29d 100644 --- a/arch/arm/mach-s5pv210/Kconfig +++ b/arch/arm/mach-s5pv210/Kconfig @@ -8,7 +8,6 @@ config ARCH_S5PV210 bool "Samsung S5PV210/S5PC110" depends on ARCH_MULTI_V7 - select ARCH_HAS_HOLES_MEMORYMODEL select ARM_VIC select CLKSRC_SAMSUNG_PWM select COMMON_CLK_SAMSUNG diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c index 09ef73b99dd8..ba44cec5e59a 100644 --- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c +++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c @@ -125,6 +125,7 @@ remove: list_for_each_entry_safe(pos, tmp, &quirk_list, list) { list_del(&pos->list); + of_node_put(pos->np); kfree(pos); } @@ -174,11 +175,12 @@ static int __init rcar_gen2_regulator_quirk(void) memcpy(&quirk->i2c_msg, id->data, sizeof(quirk->i2c_msg)); quirk->id = id; - quirk->np = np; + quirk->np = of_node_get(np); quirk->i2c_msg.addr = addr; ret = of_irq_parse_one(np, 0, argsa); if (ret) { /* Skip invalid entry and continue */ + of_node_put(np); kfree(quirk); continue; } @@ -225,6 +227,7 @@ err_free: err_mem: list_for_each_entry_safe(pos, tmp, &quirk_list, list) { list_del(&pos->list); + of_node_put(pos->np); kfree(pos); } diff --git a/arch/arm/mach-tango/Kconfig b/arch/arm/mach-tango/Kconfig index 25b2fd434861..a9eeda36aeb1 100644 --- a/arch/arm/mach-tango/Kconfig +++ b/arch/arm/mach-tango/Kconfig @@ -3,7 +3,6 @@ config ARCH_TANGO bool "Sigma Designs Tango4 (SMP87xx)" depends on ARCH_MULTI_V7 # Cortex-A9 MPCore r3p0, PL310 r3p2 - select ARCH_HAS_HOLES_MEMORYMODEL select ARM_ERRATA_754322 select ARM_ERRATA_764369 if SMP select ARM_ERRATA_775420 diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c index a0554d7d04f7..e1adc098f89a 100644 --- a/arch/arm/mach-vexpress/dcscb.c +++ b/arch/arm/mach-vexpress/dcscb.c @@ -144,6 +144,7 @@ static int __init dcscb_init(void) if (!node) return -ENODEV; dcscb_base = of_iomap(node, 0); + of_node_put(node); if (!dcscb_base) return -EADDRNOTAVAIL; cfg = readl_relaxed(dcscb_base + DCS_CFG_R); diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c index 1da11bdb1dfb..1c6500c4e6a1 100644 --- a/arch/arm/mach-vexpress/spc.c +++ b/arch/arm/mach-vexpress/spc.c @@ -580,7 +580,7 @@ static int __init ve_spc_clk_init(void) } cluster = topology_physical_package_id(cpu_dev->id); - if (init_opp_table[cluster]) + if (cluster < 0 || init_opp_table[cluster]) continue; if (ve_init_opp_table(cpu_dev)) diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c index e1ca6a5732d2..15e8a321a713 100644 --- a/arch/arm/mach-zynq/common.c +++ b/arch/arm/mach-zynq/common.c @@ -77,6 +77,7 @@ static int __init zynq_get_revision(void) } zynq_devcfg_base = of_iomap(np, 0); + of_node_put(np); if (!zynq_devcfg_base) { pr_err("%s: Unable to map I/O memory\n", __func__); return -1; diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index ea81e89e7740..bcefe3f51744 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -935,6 +935,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (type == TYPE_LDST) do_alignment_finish_ldst(addr, instr, regs, offset); + if (thumb_mode(regs)) + regs->ARM_cpsr = it_advance(regs->ARM_cpsr); + return 0; bad_or_fault: diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index c18d23a5e5f1..9b9023a92d46 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -342,7 +342,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) addr = start + i * PMD_SIZE; domain = get_domain_name(pmd); if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) - note_page(st, addr, 3, pmd_val(*pmd), domain); + note_page(st, addr, 4, pmd_val(*pmd), domain); else walk_pte(st, pmd, addr, domain); diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 80fb5a4a5c05..2660bdfcad4d 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -479,3 +479,11 @@ void __init early_ioremap_init(void) { early_ioremap_setup(); } + +bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, + unsigned long flags) +{ + unsigned long pfn = PHYS_PFN(offset); + + return memblock_is_map_memory(pfn); +} diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 3e3001998460..0d0c3bf23914 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -296,6 +296,17 @@ static struct mem_type mem_types[] __ro_after_init = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, + [MT_MEMORY_RO] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_XN | L_PTE_RDONLY, + .prot_l1 = PMD_TYPE_TABLE, +#ifdef CONFIG_ARM_LPAE + .prot_sect = PMD_TYPE_SECT | L_PMD_SECT_RDONLY | PMD_SECT_AP2, +#else + .prot_sect = PMD_TYPE_SECT, +#endif + .domain = DOMAIN_KERNEL, + }, [MT_ROM] = { .prot_sect = PMD_TYPE_SECT, .domain = DOMAIN_KERNEL, @@ -490,6 +501,7 @@ static void __init build_mem_type_table(void) /* Also setup NX memory mapping */ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN; + mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_XN; } if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { /* @@ -569,6 +581,7 @@ static void __init build_mem_type_table(void) mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; + mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; #endif /* @@ -588,6 +601,8 @@ static void __init build_mem_type_table(void) mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_RO].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; @@ -648,6 +663,8 @@ static void __init build_mem_type_table(void) mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; + mem_types[MT_MEMORY_RO].prot_sect |= ecc_mask | cp->pmd; + mem_types[MT_MEMORY_RO].prot_pte |= kern_pgprot; mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; mem_types[MT_ROM].prot_sect |= cp->pmd; @@ -1342,7 +1359,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK); map.virtual = FDT_FIXED_BASE; map.length = FDT_FIXED_SIZE; - map.type = MT_ROM; + map.type = MT_MEMORY_RO; create_mapping(&map); } diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 8b3d7191e2b8..959f05701738 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -26,6 +26,13 @@ unsigned long vectors_base; +/* + * empty_zero_page is a special page that is used for + * zero-initialized data and COW. + */ +struct page *empty_zero_page; +EXPORT_SYMBOL(empty_zero_page); + #ifdef CONFIG_ARM_MPU struct mpu_rgn_info mpu_rgn_info; #endif @@ -148,9 +155,21 @@ void __init adjust_lowmem_bounds(void) */ void __init paging_init(const struct machine_desc *mdesc) { + void *zero_page; + early_trap_init((void *)vectors_base); mpu_setup(); + + /* allocate the zero page. */ + zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!zero_page) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); + bootmem_init(); + + empty_zero_page = virt_to_page(zero_page); + flush_dcache_page(empty_zero_page); } /* diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c index 06dbfb968182..8bc7a2d6d6c7 100644 --- a/arch/arm/mm/proc-v7-bugs.c +++ b/arch/arm/mm/proc-v7-bugs.c @@ -108,8 +108,7 @@ static unsigned int spectre_v2_install_workaround(unsigned int method) #else static unsigned int spectre_v2_install_workaround(unsigned int method) { - pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n", - smp_processor_id()); + pr_info_once("Spectre V2: workarounds disabled by configuration\n"); return SPECTRE_VULNERABLE; } @@ -209,10 +208,10 @@ static int spectre_bhb_install_workaround(int method) return SPECTRE_VULNERABLE; spectre_bhb_method = method; - } - pr_info("CPU%u: Spectre BHB: using %s workaround\n", - smp_processor_id(), spectre_bhb_method_name(method)); + pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n", + smp_processor_id(), spectre_bhb_method_name(method)); + } return SPECTRE_MITIGATED; } @@ -288,6 +287,7 @@ void cpu_v7_ca15_ibe(void) { if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0))) cpu_v7_spectre_v2_init(); + cpu_v7_spectre_bhb_init(); } void cpu_v7_bugs_init(void) diff --git a/arch/arm/probes/decode.h b/arch/arm/probes/decode.h index 973173598992..facc889d05ee 100644 --- a/arch/arm/probes/decode.h +++ b/arch/arm/probes/decode.h @@ -14,6 +14,7 @@ #include #include #include +#include #include void __init arm_probes_decode_init(void); @@ -35,31 +36,6 @@ void __init find_str_pc_offset(void); #endif -/* - * Update ITSTATE after normal execution of an IT block instruction. - * - * The 8 IT state bits are split into two parts in CPSR: - * ITSTATE<1:0> are in CPSR<26:25> - * ITSTATE<7:2> are in CPSR<15:10> - */ -static inline unsigned long it_advance(unsigned long cpsr) - { - if ((cpsr & 0x06000400) == 0) { - /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */ - cpsr &= ~PSR_IT_MASK; - } else { - /* We need to shift left ITSTATE<4:0> */ - const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */ - unsigned long it = cpsr & mask; - it <<= 1; - it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */ - it &= mask; - cpsr &= ~mask; - cpsr |= it; - } - return cpsr; -} - static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs) { long cpsr = regs->ARM_cpsr; diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index acb464547a54..4a1991a103ea 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c @@ -62,11 +62,12 @@ out: unsigned long __pfn_to_mfn(unsigned long pfn) { - struct rb_node *n = phys_to_mach.rb_node; + struct rb_node *n; struct xen_p2m_entry *entry; unsigned long irqflags; read_lock_irqsave(&p2m_lock, irqflags); + n = phys_to_mach.rb_node; while (n) { entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); if (entry->pfn <= pfn && @@ -153,10 +154,11 @@ bool __set_phys_to_machine_multi(unsigned long pfn, int rc; unsigned long irqflags; struct xen_p2m_entry *p2m_entry; - struct rb_node *n = phys_to_mach.rb_node; + struct rb_node *n; if (mfn == INVALID_P2M_ENTRY) { write_lock_irqsave(&p2m_lock, irqflags); + n = phys_to_mach.rb_node; while (n) { p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); if (p2m_entry->pfn <= pfn && diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c94121e06222..88ac9f0b1462 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -493,6 +493,22 @@ config ARM64_ERRATUM_834220 If unsure, say Y. +config ARM64_ERRATUM_1742098 + bool "Cortex-A57/A72: 1742098: ELR recorded incorrectly on interrupt taken between cryptographic instructions in a sequence" + depends on COMPAT + default y + help + This option removes the AES hwcap for aarch32 user-space to + workaround erratum 1742098 on Cortex-A57 and Cortex-A72. + + Affected parts may corrupt the AES state if an interrupt is + taken between a pair of AES instructions. These instructions + are only present if the cryptography extensions are present. + All software should have a fallback implementation for CPUs + that don't implement the cryptography extensions. + + If unsure, say Y. + config ARM64_ERRATUM_845719 bool "Cortex-A53: 845719: a load might read incorrect data" depends on COMPAT @@ -671,6 +687,7 @@ config ARM64_ERRATUM_1508412 config ARM64_ERRATUM_2051678 bool "Cortex-A510: 2051678: disable Hardware Update of the page table's dirty bit" + default y help This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678. Affected Coretex-A510 might not respect the ordering rules for @@ -712,6 +729,47 @@ config ARM64_ERRATUM_2067961 If unsure, say Y. +config ARM64_ERRATUM_2454944 + bool "Cortex-A510: 2454944: Unmodified cache line might be written back to memory" + select ARCH_HAS_TEARDOWN_DMA_OPS + select RODATA_FULL_DEFAULT_ENABLED + help + This option adds the workaround for ARM Cortex-A510 erratum 2454944. + + Affected Cortex-A510 core might write unmodified cache lines back to + memory, which breaks the assumptions upon which software coherency + management for non-coherent DMA relies. If a cache line is + speculatively fetched while a non-coherent device is writing directly + to DRAM, and subsequently written back by natural eviction, data + written by the device in the intervening period can be lost. + + The workaround is to enforce as far as reasonably possible that all + non-coherent DMA transfers are bounced and/or remapped to minimise + the chance that any Cacheable alias exists through which speculative + cache fills could occur. To further improve effectiveness of + the workaround, lazy TLB flushing should be disabled. + + This is quite involved and has unavoidable performance impact on + affected systems. + +config ARM64_ERRATUM_2457168 + bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly" + depends on ARM64_AMU_EXTN + default y + help + This option adds the workaround for ARM Cortex-A510 erratum 2457168. + + The AMU counter AMEVCNTR01 (constant counter) should increment at the same rate + as the system counter. On affected Cortex-A510 cores AMEVCNTR01 increments + incorrectly giving a significantly higher output value. + + Work around this problem by keeping the reference values of affected counters + to 0 thus signaling an error case. This effect is the same to firmware disabling + affected counters, in which case 0 will be returned when reading the disabled + counters. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y @@ -1697,7 +1755,8 @@ config ARM64_BTI_KERNEL depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697 depends on !CC_IS_GCC || GCC_VERSION >= 100100 - depends on !(CC_IS_CLANG && GCOV_KERNEL) + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106671 + depends on !CC_IS_GCC # https://bugs.llvm.org/show_bug.cgi?id=46258 depends on !CFI_CLANG || CLANG_VERSION >= 120000 depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 052f4b9f8243..df0d0db5b1ee 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -152,6 +152,7 @@ config ARCH_MEDIATEK config ARCH_MESON bool "Amlogic Platforms" + select COMMON_CLK help This enables support for the arm64 based Amlogic SoCs such as the s905, S905X/D, S912, A113X/D or S905X/D2 @@ -241,6 +242,7 @@ config ARCH_STRATIX10 config ARCH_SYNQUACER bool "Socionext SynQuacer SoC Family" + select IRQ_FASTEOI_HIERARCHY_HANDLERS config ARCH_TEGRA bool "NVIDIA Tegra SoC Family" diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts index 70e31743f0ba..3c08497568ea 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts @@ -40,7 +40,7 @@ leds { compatible = "gpio-leds"; - status { + led-0 { label = "orangepi:green:status"; gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; /* PH11 */ }; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi index d61f43052a34..8e9ad1e51d66 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi @@ -11,26 +11,6 @@ compatible = "operating-points-v2"; opp-shared; - opp-100000000 { - opp-hz = /bits/ 64 <100000000>; - opp-microvolt = <731000>; - }; - - opp-250000000 { - opp-hz = /bits/ 64 <250000000>; - opp-microvolt = <731000>; - }; - - opp-500000000 { - opp-hz = /bits/ 64 <500000000>; - opp-microvolt = <731000>; - }; - - opp-667000000 { - opp-hz = /bits/ 64 <667000000>; - opp-microvolt = <731000>; - }; - opp-1000000000 { opp-hz = /bits/ 64 <1000000000>; opp-microvolt = <761000>; @@ -71,26 +51,6 @@ compatible = "operating-points-v2"; opp-shared; - opp-100000000 { - opp-hz = /bits/ 64 <100000000>; - opp-microvolt = <731000>; - }; - - opp-250000000 { - opp-hz = /bits/ 64 <250000000>; - opp-microvolt = <731000>; - }; - - opp-500000000 { - opp-hz = /bits/ 64 <500000000>; - opp-microvolt = <731000>; - }; - - opp-667000000 { - opp-hz = /bits/ 64 <667000000>; - opp-microvolt = <731000>; - }; - opp-1000000000 { opp-hz = /bits/ 64 <1000000000>; opp-microvolt = <731000>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi index 1e5d0ee5d541..44c23c984034 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi @@ -11,26 +11,6 @@ compatible = "operating-points-v2"; opp-shared; - opp-100000000 { - opp-hz = /bits/ 64 <100000000>; - opp-microvolt = <731000>; - }; - - opp-250000000 { - opp-hz = /bits/ 64 <250000000>; - opp-microvolt = <731000>; - }; - - opp-500000000 { - opp-hz = /bits/ 64 <500000000>; - opp-microvolt = <731000>; - }; - - opp-667000000 { - opp-hz = /bits/ 64 <667000000>; - opp-microvolt = <731000>; - }; - opp-1000000000 { opp-hz = /bits/ 64 <1000000000>; opp-microvolt = <731000>; @@ -76,26 +56,6 @@ compatible = "operating-points-v2"; opp-shared; - opp-100000000 { - opp-hz = /bits/ 64 <100000000>; - opp-microvolt = <751000>; - }; - - opp-250000000 { - opp-hz = /bits/ 64 <250000000>; - opp-microvolt = <751000>; - }; - - opp-500000000 { - opp-hz = /bits/ 64 <500000000>; - opp-microvolt = <751000>; - }; - - opp-667000000 { - opp-hz = /bits/ 64 <667000000>; - opp-microvolt = <751000>; - }; - opp-1000000000 { opp-hz = /bits/ 64 <1000000000>; opp-microvolt = <771000>; diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi index c309517abae3..defe0b8d4d27 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi @@ -95,26 +95,6 @@ compatible = "operating-points-v2"; opp-shared; - opp-100000000 { - opp-hz = /bits/ 64 <100000000>; - opp-microvolt = <730000>; - }; - - opp-250000000 { - opp-hz = /bits/ 64 <250000000>; - opp-microvolt = <730000>; - }; - - opp-500000000 { - opp-hz = /bits/ 64 <500000000>; - opp-microvolt = <730000>; - }; - - opp-667000000 { - opp-hz = /bits/ 64 <666666666>; - opp-microvolt = <750000>; - }; - opp-1000000000 { opp-hz = /bits/ 64 <1000000000>; opp-microvolt = <770000>; diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi index 2c0161125ece..cb45a2f0537a 100644 --- a/arch/arm64/boot/dts/arm/juno-base.dtsi +++ b/arch/arm64/boot/dts/arm/juno-base.dtsi @@ -595,12 +595,26 @@ polling-delay = <1000>; polling-delay-passive = <100>; thermal-sensors = <&scpi_sensors0 0>; + trips { + pmic_crit0: trip0 { + temperature = <90000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; soc { polling-delay = <1000>; polling-delay-passive = <100>; thermal-sensors = <&scpi_sensors0 3>; + trips { + soc_crit0: trip0 { + temperature = <80000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; big_cluster_thermal_zone: big-cluster { diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi index d6b9dedd168f..5667009aae13 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi @@ -167,6 +167,7 @@ pinctrl-0 = <&pinctrl_uart3>; assigned-clocks = <&clk IMX8MM_CLK_UART3>; assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_80M>; + uart-has-rtscts; status = "okay"; }; @@ -237,6 +238,8 @@ fsl,pins = < MX8MM_IOMUXC_ECSPI1_SCLK_UART3_DCE_RX 0x40 MX8MM_IOMUXC_ECSPI1_MOSI_UART3_DCE_TX 0x40 + MX8MM_IOMUXC_ECSPI1_MISO_UART3_DCE_CTS_B 0x40 + MX8MM_IOMUXC_ECSPI1_SS0_UART3_DCE_RTS_B 0x40 >; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi index 49082529764f..0fac1f3f7f47 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi @@ -89,12 +89,12 @@ pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>; ti,x-min = /bits/ 16 <125>; - touchscreen-size-x = /bits/ 16 <4008>; + touchscreen-size-x = <4008>; ti,y-min = /bits/ 16 <282>; - touchscreen-size-y = /bits/ 16 <3864>; + touchscreen-size-y = <3864>; ti,x-plate-ohms = /bits/ 16 <180>; - touchscreen-max-pressure = /bits/ 16 <255>; - touchscreen-average-samples = /bits/ 16 <10>; + touchscreen-max-pressure = <255>; + touchscreen-average-samples = <10>; ti,debounce-tol = /bits/ 16 <3>; ti,debounce-rep = /bits/ 16 <1>; ti,settle-delay-usec = /bits/ 16 <150>; diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi index f4d7bb75707d..3490619a9ba9 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi @@ -939,10 +939,10 @@ clocks = <&clk IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK>; }; - gpmi: nand-controller@33002000{ + gpmi: nand-controller@33002000 { compatible = "fsl,imx8mm-gpmi-nand", "fsl,imx7d-gpmi-nand"; #address-cells = <1>; - #size-cells = <1>; + #size-cells = <0>; reg = <0x33002000 0x2000>, <0x33004000 0x4000>; reg-names = "gpmi-nand", "bch"; interrupts = ; diff --git a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts index 7dfee715a2c4..d8ce217c6016 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts @@ -59,6 +59,10 @@ interrupts = <3 IRQ_TYPE_LEVEL_LOW>; rohm,reset-snvs-powered; + #clock-cells = <0>; + clocks = <&osc_32k 0>; + clock-output-names = "clk-32k-out"; + regulators { buck1_reg: BUCK1 { regulator-name = "buck1"; diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi index 7f356edf9f91..f6287f174355 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi @@ -70,12 +70,12 @@ pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>; ti,x-min = /bits/ 16 <125>; - touchscreen-size-x = /bits/ 16 <4008>; + touchscreen-size-x = <4008>; ti,y-min = /bits/ 16 <282>; - touchscreen-size-y = /bits/ 16 <3864>; + touchscreen-size-y = <3864>; ti,x-plate-ohms = /bits/ 16 <180>; - touchscreen-max-pressure = /bits/ 16 <255>; - touchscreen-average-samples = /bits/ 16 <10>; + touchscreen-max-pressure = <255>; + touchscreen-average-samples = <10>; ti,debounce-tol = /bits/ 16 <3>; ti,debounce-rep = /bits/ 16 <1>; ti,settle-delay-usec = /bits/ 16 <150>; diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi index aea723eb2ba3..7dba83041264 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi @@ -809,7 +809,7 @@ gpmi: nand-controller@33002000 { compatible = "fsl,imx8mn-gpmi-nand", "fsl,imx7d-gpmi-nand"; #address-cells = <1>; - #size-cells = <1>; + #size-cells = <0>; reg = <0x33002000 0x2000>, <0x33004000 0x4000>; reg-names = "gpmi-nand", "bch"; interrupts = ; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts index c13b4a02d12f..c016f5b7d24a 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts @@ -148,27 +148,27 @@ pinctrl_gpio_led: gpioledgrp { fsl,pins = < - MX8MP_IOMUXC_NAND_READY_B__GPIO3_IO16 0x19 + MX8MP_IOMUXC_NAND_READY_B__GPIO3_IO16 0x140 >; }; pinctrl_i2c3: i2c3grp { fsl,pins = < - MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c3 - MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x400001c3 + MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c2 + MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x400001c2 >; }; pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp { fsl,pins = < - MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x41 + MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x40 >; }; pinctrl_uart2: uart2grp { fsl,pins = < - MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x49 - MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX 0x49 + MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x140 + MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX 0x140 >; }; @@ -180,7 +180,7 @@ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0 MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0 MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0 - MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1 + MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0 >; }; @@ -192,7 +192,7 @@ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4 MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4 MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4 - MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1 + MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0 >; }; @@ -204,7 +204,7 @@ MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6 MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6 MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6 - MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1 + MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0 >; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi index e3c6d1272198..325ea100969a 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi @@ -899,6 +899,7 @@ interrupts = <20 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_gauge>; + power-supplies = <&bq25895>; maxim,over-heat-temp = <700>; maxim,over-volt = <4500>; maxim,rsns-microohm = <5000>; diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts index 9a11e5c60c26..3053f484c8cc 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts +++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts @@ -49,7 +49,7 @@ wps { label = "wps"; linux,code = ; - gpios = <&pio 102 GPIO_ACTIVE_HIGH>; + gpios = <&pio 102 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi index d71b7a1140fe..216dc30fa26c 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi @@ -75,7 +75,7 @@ /* SDMMC1 (SD/MMC) */ mmc@3400000 { - cd-gpios = <&gpio TEGRA194_MAIN_GPIO(A, 0) GPIO_ACTIVE_LOW>; + cd-gpios = <&gpio TEGRA194_MAIN_GPIO(G, 7) GPIO_ACTIVE_LOW>; }; /* SDMMC4 (eMMC) */ diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi index 89e5ee6b78cd..0b7d4f9a6f82 100644 --- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi @@ -13,7 +13,7 @@ clocks { sleep_clk: sleep_clk { compatible = "fixed-clock"; - clock-frequency = <32000>; + clock-frequency = <32768>; #clock-cells = <0>; }; @@ -383,7 +383,7 @@ status = "disabled"; }; - qpic_nand: nand@79b0000 { + qpic_nand: nand-controller@79b0000 { compatible = "qcom,ipq8074-nand"; reg = <0x079b0000 0x10000>; #address-cells = <1>; diff --git a/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts index cb82864a90ef..42f2b235011f 100644 --- a/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts +++ b/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts @@ -64,7 +64,7 @@ vdd_l17_29-supply = <&vreg_vph_pwr>; vdd_l20_21-supply = <&vreg_vph_pwr>; vdd_l25-supply = <&pm8994_s5>; - vdd_lvs1_2 = <&pm8994_s4>; + vdd_lvs1_2-supply = <&pm8994_s4>; pm8994_s1: s1 { regulator-min-microvolt = <800000>; diff --git a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts index 4f64ca3ea1ef..6ed2a9c01e8c 100644 --- a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts +++ b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts @@ -151,7 +151,7 @@ vdd_l17_29-supply = <&vreg_vph_pwr>; vdd_l20_21-supply = <&vreg_vph_pwr>; vdd_l25-supply = <&pm8994_s5>; - vdd_lvs1_2 = <&pm8994_s4>; + vdd_lvs1_2-supply = <&pm8994_s4>; pm8994_s1: s1 { /* unused */ diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi index 45f9a44326a6..aeb5762566e9 100644 --- a/arch/arm64/boot/dts/qcom/msm8994.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi @@ -92,7 +92,7 @@ CPU6: cpu@102 { device_type = "cpu"; compatible = "arm,cortex-a57"; - reg = <0x0 0x101>; + reg = <0x0 0x102>; enable-method = "psci"; next-level-cache = <&L2_1>; }; @@ -100,7 +100,7 @@ CPU7: cpu@103 { device_type = "cpu"; compatible = "arm,cortex-a57"; - reg = <0x0 0x101>; + reg = <0x0 0x103>; enable-method = "psci"; next-level-cache = <&L2_1>; }; @@ -316,7 +316,7 @@ #dma-cells = <1>; qcom,ee = <0>; qcom,controlled-remotely; - num-channels = <18>; + num-channels = <24>; qcom,num-ees = <4>; }; @@ -412,7 +412,7 @@ #dma-cells = <1>; qcom,ee = <0>; qcom,controlled-remotely; - num-channels = <18>; + num-channels = <24>; qcom,num-ees = <4>; }; diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi index b654b802e95c..7bddc5ebc6aa 100644 --- a/arch/arm64/boot/dts/qcom/qcs404.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi @@ -548,7 +548,7 @@ compatible = "snps,dwc3"; reg = <0x07580000 0xcd00>; interrupts = ; - phys = <&usb2_phy_sec>, <&usb3_phy>; + phys = <&usb2_phy_prim>, <&usb3_phy>; phy-names = "usb2-phy", "usb3-phy"; snps,has-lpm-erratum; snps,hird-threshold = /bits/ 8 <0x10>; @@ -577,7 +577,7 @@ compatible = "snps,dwc3"; reg = <0x078c0000 0xcc00>; interrupts = ; - phys = <&usb2_phy_prim>; + phys = <&usb2_phy_sec>; phy-names = "usb2-phy"; snps,has-lpm-erratum; snps,hird-threshold = /bits/ 8 <0x10>; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lte-sku.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lte-sku.dtsi index 44956e3165a1..469aad4e5948 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lte-sku.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lte-sku.dtsi @@ -9,6 +9,10 @@ label = "proximity-wifi-lte"; }; +&mpss_mem { + reg = <0x0 0x86000000 0x0 0x8c00000>; +}; + &remoteproc_mpss { firmware-name = "qcom/sc7180-trogdor/modem/mba.mbn", "qcom/sc7180-trogdor/modem/qdsp6sw.mbn"; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi index 5b2a616c6257..cb2c47f13a8a 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi @@ -39,7 +39,7 @@ }; mpss_mem: memory@86000000 { - reg = <0x0 0x86000000 0x0 0x8c00000>; + reg = <0x0 0x86000000 0x0 0x2000000>; no-map; }; diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi index bc4bb5dd8bae..53e1d43cbecf 100644 --- a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi +++ b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi @@ -145,7 +145,7 @@ }; }; - reg_audio: regulator_audio { + reg_audio: regulator-audio { compatible = "regulator-fixed"; regulator-name = "audio-1.8V"; regulator-min-microvolt = <1800000>; @@ -173,7 +173,7 @@ vin-supply = <®_lcd>; }; - reg_cam0: regulator_camera { + reg_cam0: regulator-cam0 { compatible = "regulator-fixed"; regulator-name = "reg_cam0"; regulator-min-microvolt = <1800000>; @@ -182,7 +182,7 @@ enable-active-high; }; - reg_cam1: regulator_camera { + reg_cam1: regulator-cam1 { compatible = "regulator-fixed"; regulator-name = "reg_cam1"; regulator-min-microvolt = <1800000>; diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi index e0e54342cd4c..4c7d7e8f8e28 100644 --- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi @@ -1929,7 +1929,7 @@ cpu-thermal { polling-delay-passive = <250>; polling-delay = <0>; - thermal-sensors = <&thermal 0>; + thermal-sensors = <&thermal>; sustainable-power = <717>; cooling-maps { diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi index 33d7e657bd9c..37159b9408e8 100644 --- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi @@ -2029,7 +2029,7 @@ cpu-thermal { polling-delay-passive = <250>; polling-delay = <0>; - thermal-sensors = <&thermal 0>; + thermal-sensors = <&thermal>; sustainable-power = <717>; cooling-maps { diff --git a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts index 6d0abb382ff2..d9292da324ce 100644 --- a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts @@ -13,7 +13,7 @@ stdout-path = "serial2:1500000n8"; }; - ir_rx { + ir-receiver { compatible = "gpio-ir-receiver"; gpios = <&gpio0 RK_PC0 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts index e6c1c94c8d69..07737b65d7a3 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts @@ -87,3 +87,8 @@ }; }; }; + +&wlan_host_wake_l { + /* Kevin has an external pull up, but Bob does not. */ + rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi index 1384dabbdf40..739937f70f8d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi @@ -237,6 +237,14 @@ &edp { status = "okay"; + /* + * eDP PHY/clk don't sync reliably at anything other than 24 MHz. Only + * set this here, because rk3399-gru.dtsi ensures we can generate this + * off GPLL=600MHz, whereas some other RK3399 boards may not. + */ + assigned-clocks = <&cru PCLK_EDP>; + assigned-clock-rates = <24000000>; + ports { edp_out: port@1 { reg = <1>; @@ -395,6 +403,7 @@ ap_i2c_tp: &i2c5 { }; wlan_host_wake_l: wlan-host-wake-l { + /* Kevin has an external pull up, but Bob does not */ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts index a8d363568fd6..3fc761c8d550 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts @@ -203,7 +203,7 @@ cap-sd-highspeed; cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>; disable-wp; - max-frequency = <150000000>; + max-frequency = <40000000>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; vmmc-supply = <&vcc3v3_baseboard>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index 544110aaffc5..95bc7a5f61dd 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -102,7 +102,6 @@ vcc5v0_host: vcc5v0-host-regulator { compatible = "regulator-fixed"; gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>; - enable-active-low; pinctrl-names = "default"; pinctrl-0 = <&vcc5v0_host_en>; regulator-name = "vcc5v0_host"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi index f121203081b9..64df64339119 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi @@ -448,7 +448,6 @@ &i2s1 { rockchip,playback-channels = <2>; rockchip,capture-channels = <2>; - status = "okay"; }; &i2s2 { diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index f29484201228..af24b9864153 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi @@ -1633,6 +1633,7 @@ reg = <0xf780 0x24>; clocks = <&sdhci>; clock-names = "emmcclk"; + drive-impedance-ohm = <50>; #phy-cells = <0>; status = "disabled"; }; @@ -1643,7 +1644,6 @@ clock-names = "refclk"; #phy-cells = <1>; resets = <&cru SRST_PCIEPHY>; - drive-impedance-ohm = <50>; reset-names = "phy"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi index be97da132258..ba75adedbf79 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi +++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi @@ -599,8 +599,8 @@ compatible = "socionext,uniphier-dwc3", "snps,dwc3"; status = "disabled"; reg = <0x65a00000 0xcd00>; - interrupt-names = "host", "peripheral"; - interrupts = <0 134 4>, <0 135 4>; + interrupt-names = "dwc_usb3"; + interrupts = <0 134 4>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usb0>, <&pinctrl_usb2>; clock-names = "ref", "bus_early", "suspend"; @@ -701,8 +701,8 @@ compatible = "socionext,uniphier-dwc3", "snps,dwc3"; status = "disabled"; reg = <0x65c00000 0xcd00>; - interrupt-names = "host", "peripheral"; - interrupts = <0 137 4>, <0 138 4>; + interrupt-names = "dwc_usb3"; + interrupts = <0 137 4>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usb1>, <&pinctrl_usb3>; clock-names = "ref", "bus_early", "suspend"; diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig index da8ab23b9ce5..d6adcfbd974d 100644 --- a/arch/arm64/configs/gki_defconfig +++ b/arch/arm64/configs/gki_defconfig @@ -51,6 +51,7 @@ CONFIG_ARCH_SUNXI=y CONFIG_ARCH_HISI=y CONFIG_ARCH_QCOM=y CONFIG_ARCH_SPRD=y +CONFIG_ARM64_ERRATUM_2454944=y CONFIG_SCHED_MC=y CONFIG_NR_CPUS=32 CONFIG_PARAVIRT=y @@ -513,6 +514,7 @@ CONFIG_MMC_CRYPTO=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_LEDS_CLASS_FLASH=y +CONFIG_LEDS_CLASS_MULTICOLOR=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_LEDS_TRIGGER_TRANSIENT=y CONFIG_EDAC=y @@ -647,7 +649,6 @@ CONFIG_HARDENED_USERCOPY=y CONFIG_STATIC_USERMODEHELPER=y CONFIG_STATIC_USERMODEHELPER_PATH="" CONFIG_SECURITY_SELINUX=y -CONFIG_INIT_STACK_ALL_ZERO=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y CONFIG_CRYPTO_CHACHA20POLY1305=y CONFIG_CRYPTO_ADIANTUM=y @@ -696,4 +697,5 @@ CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_TRACE_MMIO_ACCESS=y CONFIG_TRACEFS_DISABLE_AUTOMOUNT=y CONFIG_HIST_TRIGGERS=y +CONFIG_FUNCTION_ERROR_INJECTION=y # CONFIG_RUNTIME_TESTING_MENU is not set diff --git a/arch/arm64/configs/rockchip_gki.fragment b/arch/arm64/configs/rockchip_gki.fragment index 36645c46d0b4..1eb35920205b 100644 --- a/arch/arm64/configs/rockchip_gki.fragment +++ b/arch/arm64/configs/rockchip_gki.fragment @@ -1,20 +1,24 @@ +CONFIG_AP6XXX=m CONFIG_ARCH_ROCKCHIP=y CONFIG_ARM_ROCKCHIP_BUS_DEVFREQ=m CONFIG_ARM_ROCKCHIP_CPUFREQ=m -# CONFIG_ATA_SFF is not set +CONFIG_ARM_ROCKCHIP_DMC_DEVFREQ=m CONFIG_BACKLIGHT_PWM=m CONFIG_BATTERY_CW2015=m +CONFIG_BATTERY_CW2017=m +CONFIG_BATTERY_CW221X=m CONFIG_BATTERY_RK817=m CONFIG_BATTERY_RK818=m CONFIG_BLK_DEV_NVME=m CONFIG_BMA2XX_ACC=m CONFIG_CHARGER_BQ25700=m +CONFIG_CHARGER_BQ25890=m CONFIG_CHARGER_RK817=m CONFIG_CHARGER_RK818=m +CONFIG_CHARGER_SC89890=m +CONFIG_CHARGER_SGM41542=m CONFIG_CHR_DEV_SCH=m CONFIG_CHR_DEV_SG=m -# CONFIG_CLK_RK1808 is not set -# CONFIG_CLK_RK3308 is not set CONFIG_COMMON_CLK_PWM=m CONFIG_COMMON_CLK_RK808=m CONFIG_COMMON_CLK_ROCKCHIP=m @@ -25,12 +29,10 @@ CONFIG_COMPASS_DEVICE=m CONFIG_CPUFREQ_DT=m CONFIG_CPU_FREQ_GOV_ONDEMAND=m CONFIG_CPU_FREQ_GOV_USERSPACE=m -CONFIG_CPU_PX30=y -CONFIG_CPU_RK3328=y -CONFIG_CPU_RK3368=y -CONFIG_CPU_RK3399=y -CONFIG_CPU_RK3568=y +CONFIG_CPU_RK3588=y CONFIG_CRYPTO_AES_ARM64_CE_CCM=m +CONFIG_CRYPTO_DEV_ROCKCHIP=m +CONFIG_CRYPTO_DEV_ROCKCHIP_DEV=m CONFIG_CRYPTO_GHASH_ARM64_CE=m CONFIG_CRYPTO_SHA1_ARM64_CE=m CONFIG_CRYPTO_TWOFISH=m @@ -38,13 +40,26 @@ CONFIG_DEVFREQ_EVENT_ROCKCHIP_NOCP=m CONFIG_DMABUF_HEAPS_CMA=m CONFIG_DMABUF_HEAPS_SYSTEM=m CONFIG_DRAGONRISE_FF=y +CONFIG_DRM_DISPLAY_CONNECTOR=m CONFIG_DRM_DW_HDMI_CEC=m CONFIG_DRM_DW_HDMI_I2S_AUDIO=m +CONFIG_DRM_MAXIM_MAX96745=m +CONFIG_DRM_MAXIM_MAX96752F=m +CONFIG_DRM_MAXIM_MAX96755F=m CONFIG_DRM_PANEL_SIMPLE=m +CONFIG_DRM_RK1000_TVE=m +CONFIG_DRM_RK630_TVE=m CONFIG_DRM_ROCKCHIP=m +CONFIG_DRM_ROCKCHIP_RK618=m +CONFIG_DRM_ROCKCHIP_RK628=m +CONFIG_DRM_ROHM_BU18XL82=m CONFIG_DRM_SII902X=m CONFIG_DTC_SYMBOLS=y # CONFIG_DWMAC_GENERIC is not set +# CONFIG_DWMAC_IPQ806X is not set +# CONFIG_DWMAC_QCOM_ETHQOS is not set +# CONFIG_DWMAC_SUN8I is not set +# CONFIG_DWMAC_SUNXI is not set CONFIG_DW_WATCHDOG=m CONFIG_GPIO_ROCKCHIP=m CONFIG_GREENASIA_FF=y @@ -116,13 +131,12 @@ CONFIG_I2C_CHARDEV=m CONFIG_I2C_GPIO=m CONFIG_I2C_HID=m CONFIG_I2C_RK3X=m +CONFIG_IEP=m CONFIG_IIO_BUFFER_CB=m CONFIG_INPUT_RK805_PWRKEY=m -CONFIG_ION=y -CONFIG_ION_SYSTEM_HEAP=y -CONFIG_JOLIET=y CONFIG_KEYBOARD_ADC=m CONFIG_LEDS_GPIO=m +CONFIG_LEDS_RGB13H=m CONFIG_LEDS_TRIGGER_BACKLIGHT=m CONFIG_LEDS_TRIGGER_DEFAULT_ON=m CONFIG_LEDS_TRIGGER_HEARTBEAT=m @@ -131,7 +145,17 @@ CONFIG_LSM330_ACC=m CONFIG_LS_CM3217=m CONFIG_LS_CM3218=m CONFIG_LS_STK3410=m -# CONFIG_MALI400_PROFILING is not set +CONFIG_LS_UCS14620=m +CONFIG_MALI_BIFROST=m +CONFIG_MALI_BIFROST_DEBUG=y +CONFIG_MALI_BIFROST_EXPERT=y +CONFIG_MALI_CSF_SUPPORT=y +CONFIG_MALI_PLATFORM_NAME="rk" +CONFIG_MALI_PWRSOFT_765=y +CONFIG_MFD_RK618=m +CONFIG_MFD_RK628=m +CONFIG_MFD_RK630_I2C=m +CONFIG_MFD_RK806_SPI=m CONFIG_MFD_RK808=m CONFIG_MMC_DW=m CONFIG_MMC_DW_ROCKCHIP=m @@ -141,48 +165,78 @@ CONFIG_MPU6500_ACC=m CONFIG_MPU6880_ACC=m CONFIG_OPTEE=m CONFIG_PANTHERLORD_FF=y +CONFIG_PCIEASPM_EXT=m +CONFIG_PCIE_DW_ROCKCHIP=m +CONFIG_PCIE_ROCKCHIP_HOST=m +CONFIG_PHY_ROCKCHIP_CSI2_DPHY=m CONFIG_PHY_ROCKCHIP_DP=m CONFIG_PHY_ROCKCHIP_EMMC=m CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY=m +CONFIG_PHY_ROCKCHIP_INNO_HDMI=m CONFIG_PHY_ROCKCHIP_INNO_USB2=m CONFIG_PHY_ROCKCHIP_INNO_USB3=m CONFIG_PHY_ROCKCHIP_NANENG_COMBO_PHY=m CONFIG_PHY_ROCKCHIP_NANENG_EDP=m CONFIG_PHY_ROCKCHIP_PCIE=m +CONFIG_PHY_ROCKCHIP_SAMSUNG_DCPHY=m +CONFIG_PHY_ROCKCHIP_SAMSUNG_HDPTX=m +CONFIG_PHY_ROCKCHIP_SAMSUNG_HDPTX_HDMI=m CONFIG_PHY_ROCKCHIP_SNPS_PCIE3=m CONFIG_PHY_ROCKCHIP_TYPEC=m CONFIG_PHY_ROCKCHIP_USB=m +CONFIG_PHY_ROCKCHIP_USBDP=m CONFIG_PINCTRL_RK805=m +CONFIG_PINCTRL_RK806=m CONFIG_PINCTRL_ROCKCHIP=m CONFIG_PL330_DMA=m CONFIG_PROXIMITY_DEVICE=m CONFIG_PS_STK3410=m +CONFIG_PS_UCS14620=m CONFIG_PWM_ROCKCHIP=m +CONFIG_PWRSEQ_SIMPLE=m CONFIG_REGULATOR_ACT8865=m CONFIG_REGULATOR_FAN53555=m CONFIG_REGULATOR_GPIO=m CONFIG_REGULATOR_LP8752=m CONFIG_REGULATOR_MP8865=m CONFIG_REGULATOR_PWM=m +CONFIG_REGULATOR_RK806=m CONFIG_REGULATOR_RK808=m +CONFIG_REGULATOR_RK860X=m CONFIG_REGULATOR_TPS65132=m +CONFIG_REGULATOR_WL2868C=m CONFIG_REGULATOR_XZ3216=m +CONFIG_RFKILL_RK=m CONFIG_RK_CONSOLE_THREAD=y -CONFIG_RK_NAND=m +CONFIG_RK_HEADSET=m CONFIG_ROCKCHIP_ANALOGIX_DP=y CONFIG_ROCKCHIP_CDN_DP=y CONFIG_ROCKCHIP_CPUINFO=m CONFIG_ROCKCHIP_DEBUG=m +CONFIG_ROCKCHIP_DW_DP=y +CONFIG_ROCKCHIP_DW_HDCP2=m CONFIG_ROCKCHIP_DW_HDMI=y CONFIG_ROCKCHIP_DW_MIPI_DSI=y CONFIG_ROCKCHIP_EFUSE=m -CONFIG_ROCKCHIP_FIQ_DEBUGGER=m CONFIG_ROCKCHIP_GRF=m CONFIG_ROCKCHIP_INNO_HDMI=y CONFIG_ROCKCHIP_IODOMAIN=m CONFIG_ROCKCHIP_IOMMU=m CONFIG_ROCKCHIP_IPA=m CONFIG_ROCKCHIP_LVDS=y +CONFIG_ROCKCHIP_MPP_AV1DEC=y +CONFIG_ROCKCHIP_MPP_IEP2=y +CONFIG_ROCKCHIP_MPP_JPGDEC=y +CONFIG_ROCKCHIP_MPP_RKVDEC=y +CONFIG_ROCKCHIP_MPP_RKVDEC2=y +CONFIG_ROCKCHIP_MPP_RKVENC=y +CONFIG_ROCKCHIP_MPP_RKVENC2=y +CONFIG_ROCKCHIP_MPP_SERVICE=m +CONFIG_ROCKCHIP_MPP_VDPU1=y +CONFIG_ROCKCHIP_MPP_VDPU2=y +CONFIG_ROCKCHIP_MPP_VEPU1=y +CONFIG_ROCKCHIP_MPP_VEPU2=y +CONFIG_ROCKCHIP_MULTI_RGA=m CONFIG_ROCKCHIP_OPP=m CONFIG_ROCKCHIP_OTP=m CONFIG_ROCKCHIP_PHY=m @@ -190,15 +244,21 @@ CONFIG_ROCKCHIP_PM_DOMAINS=m CONFIG_ROCKCHIP_PVTM=m CONFIG_ROCKCHIP_REMOTECTL=m CONFIG_ROCKCHIP_REMOTECTL_PWM=m +CONFIG_ROCKCHIP_MULTI_RGA=m CONFIG_ROCKCHIP_RGB=y +CONFIG_ROCKCHIP_RKNPU=m CONFIG_ROCKCHIP_SARADC=m CONFIG_ROCKCHIP_SIP=m +CONFIG_ROCKCHIP_SUSPEND_MODE=m CONFIG_ROCKCHIP_SYSTEM_MONITOR=m CONFIG_ROCKCHIP_THERMAL=m +CONFIG_ROCKCHIP_TIMER=m CONFIG_ROCKCHIP_VENDOR_STORAGE=m CONFIG_ROCKCHIP_VENDOR_STORAGE_UPDATE_LOADER=y +CONFIG_RTC_DRV_HYM8563=m CONFIG_RTC_DRV_RK808=m CONFIG_SENSOR_DEVICE=m +CONFIG_SERIAL_8250_DW=m CONFIG_SMARTJOYPLUS_FF=y CONFIG_SND_SIMPLE_CARD=m CONFIG_SND_SOC_BT_SCO=m @@ -209,29 +269,84 @@ CONFIG_SND_SOC_ES7210=m CONFIG_SND_SOC_ES7243E=m CONFIG_SND_SOC_ES8311=m CONFIG_SND_SOC_ES8316=m +CONFIG_SND_SOC_ES8323=m +CONFIG_SND_SOC_ES8326=m CONFIG_SND_SOC_ES8396=m CONFIG_SND_SOC_RK3328=m CONFIG_SND_SOC_RK817=m CONFIG_SND_SOC_RK_CODEC_DIGITAL=m CONFIG_SND_SOC_ROCKCHIP=m +CONFIG_SND_SOC_ROCKCHIP_HDMI=m +CONFIG_SND_SOC_ROCKCHIP_I2S=m +CONFIG_SND_SOC_ROCKCHIP_I2S_TDM=m +CONFIG_SND_SOC_ROCKCHIP_MULTICODECS=m CONFIG_SND_SOC_ROCKCHIP_PDM=m CONFIG_SND_SOC_ROCKCHIP_SPDIF=m +CONFIG_SND_SOC_ROCKCHIP_SPDIFRX=m CONFIG_SND_SOC_RT5640=m CONFIG_SND_SOC_SPDIF=m CONFIG_SPI_ROCKCHIP=m CONFIG_SPI_SPIDEV=m +CONFIG_STMMAC_ETH=m CONFIG_SW_SYNC=m CONFIG_SYSCON_REBOOT_MODE=m CONFIG_TEE=m CONFIG_TEST_POWER=m +CONFIG_TOUCHSCREEN_ELAN5515=m +CONFIG_TOUCHSCREEN_GSL3673=m CONFIG_TOUCHSCREEN_GSLX680_PAD=m -CONFIG_TOUCHSCREEN_GT1X=m CONFIG_TYPEC_DP_ALTMODE=m CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_HUSB311=m +CONFIG_UCS12CM0=m +CONFIG_USB_DWC2=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_OHCI_HCD=m +# CONFIG_USB_OHCI_HCD_PCI is not set +CONFIG_USB_OHCI_HCD_PLATFORM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_TRANCEVIBRATOR=m +CONFIG_VIDEO_AW36518=m +CONFIG_VIDEO_AW8601=m +CONFIG_VIDEO_CN3927V=m CONFIG_VIDEO_DW9714=m -CONFIG_VIDEO_OV2680=m +CONFIG_VIDEO_FP5510=m +CONFIG_VIDEO_GC2145=m +CONFIG_VIDEO_GC2385=m +CONFIG_VIDEO_GC4C33=m +CONFIG_VIDEO_GC8034=m +CONFIG_VIDEO_IMX415=m +CONFIG_VIDEO_LT6911UXC=m +CONFIG_VIDEO_LT7911D=m +CONFIG_VIDEO_NVP6188=m +CONFIG_VIDEO_OV02B10=m +CONFIG_VIDEO_OV13850=m +CONFIG_VIDEO_OV13855=m +CONFIG_VIDEO_OV50C40=m CONFIG_VIDEO_OV5695=m -CONFIG_ZISOFS=y +CONFIG_VIDEO_OV8858=m +CONFIG_VIDEO_RK628_BT1120=m +CONFIG_VIDEO_RK628_CSI=m +CONFIG_VIDEO_RK_IRCUT=m +CONFIG_VIDEO_ROCKCHIP_CIF=m +CONFIG_VIDEO_ROCKCHIP_HDMIRX=m +CONFIG_VIDEO_ROCKCHIP_ISP=m +CONFIG_VIDEO_S5K3L6XX=m +CONFIG_VIDEO_S5KJN1=m +CONFIG_VIDEO_SGM3784=m +CONFIG_VIDEO_THCV244=m +CONFIG_VL6180=m +CONFIG_WIFI_BUILD_MODULE=y +CONFIG_WL_ROCKCHIP=m CONFIG_ZRAM=m CONFIG_ZSMALLOC=m # CONFIG_USB_DUMMY_HCD is not set diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 454621a20eaa..c4f150460bf1 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -59,6 +59,7 @@ config CRYPTO_GHASH_ARM64_CE select CRYPTO_HASH select CRYPTO_GF128MUL select CRYPTO_LIB_AES + select CRYPTO_AEAD config CRYPTO_CRCT10DIF_ARM64_CE tristate "CRCT10DIF digest algorithm using PMULL instructions" diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c index 01e22fe40823..9f4599014854 100644 --- a/arch/arm64/crypto/poly1305-glue.c +++ b/arch/arm64/crypto/poly1305-glue.c @@ -52,7 +52,7 @@ static void neon_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, { if (unlikely(!dctx->sset)) { if (!dctx->rset) { - poly1305_init_arch(dctx, src); + poly1305_init_arm64(&dctx->h, src); src += POLY1305_BLOCK_SIZE; len -= POLY1305_BLOCK_SIZE; dctx->rset = 1; diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index a074459f8f2f..7c2181c72116 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -6,6 +6,7 @@ #define __ASM_CACHE_H #include +#include #define CTR_L1IP_SHIFT 14 #define CTR_L1IP_MASK 3 @@ -49,15 +50,21 @@ */ #define ARCH_DMA_MINALIGN (128) -#ifdef CONFIG_KASAN_SW_TAGS -#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) -#elif defined(CONFIG_KASAN_HW_TAGS) -#define ARCH_SLAB_MINALIGN MTE_GRANULE_SIZE -#endif - #ifndef __ASSEMBLY__ #include +#include + +#ifdef CONFIG_KASAN_SW_TAGS +#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) +#elif defined(CONFIG_KASAN_HW_TAGS) +static inline unsigned int arch_slab_minalign(void) +{ + return kasan_hw_tags_enabled() ? MTE_GRANULE_SIZE : + __alignof__(unsigned long long); +} +#define arch_slab_minalign() arch_slab_minalign() +#endif #define ICACHEF_ALIASING 0 #define ICACHEF_VPIPT 1 diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index ed39d674de5f..b75ef1d90c06 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -71,8 +71,10 @@ #define ARM64_KVM_PROTECTED_MODE 60 #define ARM64_WORKAROUND_TSB_FLUSH_FAILURE 61 #define ARM64_SPECTRE_BHB 62 +#define ARM64_WORKAROUND_2457168 63 +#define ARM64_WORKAROUND_1742098 64 -/* kabi: reserve 63 - 76 for future cpu capabilities */ +/* kabi: reserve 65 - 76 for future cpu capabilities */ #define ARM64_NCAPS 76 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 717551d45fe8..076fa73c9c75 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -662,7 +662,8 @@ static inline bool system_supports_4kb_granule(void) val = cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN4_SHIFT); - return val == ID_AA64MMFR0_TGRAN4_SUPPORTED; + return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) && + (val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX); } static inline bool system_supports_64kb_granule(void) @@ -674,7 +675,8 @@ static inline bool system_supports_64kb_granule(void) val = cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN64_SHIFT); - return val == ID_AA64MMFR0_TGRAN64_SUPPORTED; + return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) && + (val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX); } static inline bool system_supports_16kb_granule(void) @@ -686,7 +688,8 @@ static inline bool system_supports_16kb_granule(void) val = cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN16_SHIFT); - return val == ID_AA64MMFR0_TGRAN16_SUPPORTED; + return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) && + (val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX); } static inline bool system_supports_mixed_endian_el0(void) diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index bfbf0c4c7c5e..9cf5d9551e99 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -41,7 +41,7 @@ (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) #define MIDR_CPU_MODEL(imp, partnum) \ - (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \ + ((_AT(u32, imp) << MIDR_IMPLEMENTOR_SHIFT) | \ (0xf << MIDR_ARCHITECTURE_SHIFT) | \ ((partnum) << MIDR_PARTNUM_SHIFT)) @@ -60,6 +60,7 @@ #define ARM_CPU_IMP_FUJITSU 0x46 #define ARM_CPU_IMP_HISI 0x48 #define ARM_CPU_IMP_APPLE 0x61 +#define ARM_CPU_IMP_AMPERE 0xC0 #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 @@ -75,6 +76,7 @@ #define ARM_CPU_PART_CORTEX_A77 0xD0D #define ARM_CPU_PART_NEOVERSE_V1 0xD40 #define ARM_CPU_PART_CORTEX_A78 0xD41 +#define ARM_CPU_PART_CORTEX_A78AE 0xD42 #define ARM_CPU_PART_CORTEX_X1 0xD44 #define ARM_CPU_PART_CORTEX_A510 0xD46 #define ARM_CPU_PART_CORTEX_A710 0xD47 @@ -111,6 +113,8 @@ #define APPLE_CPU_PART_M1_ICESTORM 0x022 #define APPLE_CPU_PART_M1_FIRESTORM 0x023 +#define AMPERE_CPU_PART_AMPERE1 0xAC3 + #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) @@ -123,6 +127,7 @@ #define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77) #define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1) #define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78) +#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE) #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1) #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510) #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) @@ -149,6 +154,7 @@ #define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110) #define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM) #define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM) +#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ #define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX diff --git a/arch/arm64/include/asm/dma-mapping-noalias.h b/arch/arm64/include/asm/dma-mapping-noalias.h new file mode 100644 index 000000000000..f71300626167 --- /dev/null +++ b/arch/arm64/include/asm/dma-mapping-noalias.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2022-2023 ARM Ltd. + */ +#ifndef __ASM_DMA_MAPPING_NOALIAS_H +#define __ASM_DMA_MAPPING_NOALIAS_H + +#ifdef CONFIG_ARM64_ERRATUM_2454944 +void arm64_noalias_setup_dma_ops(struct device *dev); +#else +static inline void arm64_noalias_setup_dma_ops(struct device *dev) +{ +} +#endif +#endif /* __ASM_DMA_MAPPING_NOALIAS_H */ diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 973b14415271..16892f0d05ad 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -25,6 +25,7 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); ({ \ efi_virtmap_load(); \ __efi_fpsimd_begin(); \ + spin_lock(&efi_rt_lock); \ }) #define arch_efi_call_virt(p, f, args...) \ @@ -36,10 +37,12 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); #define arch_efi_call_virt_teardown() \ ({ \ + spin_unlock(&efi_rt_lock); \ __efi_fpsimd_end(); \ efi_virtmap_unload(); \ }) +extern spinlock_t efi_rt_lock; efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...); #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index bda918948471..692b234255e5 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -107,7 +107,7 @@ msr_s SYS_ICC_SRE_EL2, x0 isb // Make sure SRE is now set mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back, - tbz x0, #0, 1f // and check that it sticks + tbz x0, #0, .Lskip_gicv3_\@ // and check that it sticks msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults .Lskip_gicv3_\@: .endm diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h index b15eb4a3e6b2..840a35ed92ec 100644 --- a/arch/arm64/include/asm/extable.h +++ b/arch/arm64/include/asm/extable.h @@ -22,15 +22,6 @@ struct exception_table_entry #define ARCH_HAS_RELATIVE_EXTABLE -static inline bool in_bpf_jit(struct pt_regs *regs) -{ - if (!IS_ENABLED(CONFIG_BPF_JIT)) - return false; - - return regs->pc >= BPF_JIT_REGION_START && - regs->pc < BPF_JIT_REGION_END; -} - #ifdef CONFIG_BPF_JIT int arm64_bpf_fixup_exception(const struct exception_table_entry *ex, struct pt_regs *regs); diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 95813dca24ec..b4d2e864a5ce 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -219,4 +219,8 @@ extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); extern int devmem_is_allowed(unsigned long pfn); +extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, + unsigned long flags); +#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap + #endif /* __ASM_IO_H */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 4e9ee0676b53..b7fa62950b78 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -780,6 +780,10 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); #define kvm_vcpu_has_pmu(vcpu) \ (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) +#define kvm_supports_32bit_el0() \ + (system_supports_32bit_el0() && \ + !static_branch_unlikely(&arm64_mismatched_32bit_el0)) + int kvm_trng_call(struct kvm_vcpu *vcpu); #ifdef CONFIG_KVM extern phys_addr_t hyp_mem_base; diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index b864fdb79df0..a1f0e752d1cd 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -44,11 +44,8 @@ #define _PAGE_OFFSET(va) (-(UL(1) << (va))) #define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) #define KIMAGE_VADDR (MODULES_END) -#define BPF_JIT_REGION_START (KASAN_SHADOW_END) -#define BPF_JIT_REGION_SIZE (SZ_128M) -#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) -#define MODULES_VADDR (BPF_JIT_REGION_END) +#define MODULES_VADDR (KASAN_SHADOW_END) #define MODULES_VSIZE (SZ_128M) #define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M) #define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE) diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h index 0b736bff0bdf..7237af98a8ac 100644 --- a/arch/arm64/include/asm/module.lds.h +++ b/arch/arm64/include/asm/module.lds.h @@ -1,8 +1,8 @@ #ifdef CONFIG_ARM64_MODULE_PLTS SECTIONS { - .plt 0 (NOLOAD) : { BYTE(0) } - .init.plt 0 (NOLOAD) : { BYTE(0) } - .text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) } + .plt 0 : { BYTE(0) } + .init.plt 0 : { BYTE(0) } + .text.ftrace_trampoline 0 : { BYTE(0) } #ifdef CONFIG_CRYPTO_FIPS140 /* diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h index 41c48c1ba610..6537d2deb728 100644 --- a/arch/arm64/include/asm/mte-kasan.h +++ b/arch/arm64/include/asm/mte-kasan.h @@ -5,6 +5,8 @@ #ifndef __ASM_MTE_KASAN_H #define __ASM_MTE_KASAN_H +#include +#include #include #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 8f1661603b78..b9ba19dbdb69 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -10,6 +10,7 @@ #include #include #include +#include static inline void set_my_cpu_offset(unsigned long off) { diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 7a3db964a673..35fe1ca3cc6b 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -522,13 +522,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, #define pmd_none(pmd) (!pmd_val(pmd)) -#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT)) - #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ PMD_TYPE_TABLE) #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ PMD_TYPE_SECT) -#define pmd_leaf(pmd) pmd_sect(pmd) +#define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd)) +#define pmd_bad(pmd) (!pmd_table(pmd)) #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 static inline bool pud_sect(pud_t pud) { return false; } @@ -619,9 +618,9 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) #define pud_none(pud) (!pud_val(pud)) -#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT)) +#define pud_bad(pud) (!pud_table(pud)) #define pud_present(pud) pte_present(pud_pte(pud)) -#define pud_leaf(pud) pud_sect(pud) +#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) #define pud_valid(pud) pte_valid(pud_pte(pud)) static inline void set_pud(pud_t *pudp, pud_t pud) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index e85a712f1e3d..a01d40f3c01a 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -207,8 +207,9 @@ void tls_preserve_current_state(void); static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) { + s32 previous_syscall = regs->syscallno; memset(regs, 0, sizeof(*regs)); - forget_syscall(regs); + regs->syscallno = previous_syscall; regs->pc = pc; if (system_uses_irq_prio_masking()) diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h index c04d01dd4457..71060c1c23fe 100644 --- a/arch/arm64/include/asm/spectre.h +++ b/arch/arm64/include/asm/spectre.h @@ -67,7 +67,8 @@ struct bp_hardening_data { DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); -static inline void arm64_apply_bp_hardening(void) +/* Called during entry so must be __always_inline */ +static __always_inline void arm64_apply_bp_hardening(void) { struct bp_hardening_data *d; diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h index b383b4802a7b..d30217c21eff 100644 --- a/arch/arm64/include/asm/syscall_wrapper.h +++ b/arch/arm64/include/asm/syscall_wrapper.h @@ -8,7 +8,7 @@ #ifndef __ASM_SYSCALL_WRAPPER_H #define __ASM_SYSCALL_WRAPPER_H -struct pt_regs; +#include #define SC_ARM64_REGS_TO_ARGS(x, ...) \ __MAP(x,__SC_ARGS \ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index df5a60ce1aa9..b242c815beca 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -852,15 +852,24 @@ #define ID_AA64MMFR0_ASID_SHIFT 4 #define ID_AA64MMFR0_PARANGE_SHIFT 0 -#define ID_AA64MMFR0_TGRAN4_NI 0xf -#define ID_AA64MMFR0_TGRAN4_SUPPORTED 0x0 -#define ID_AA64MMFR0_TGRAN64_NI 0xf -#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0 -#define ID_AA64MMFR0_TGRAN16_NI 0x0 -#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1 +#define ID_AA64MMFR0_TGRAN4_NI 0xf +#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0 +#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_TGRAN64_NI 0xf +#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN 0x0 +#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_TGRAN16_NI 0x0 +#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN 0x1 +#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX 0xf + #define ID_AA64MMFR0_PARANGE_48 0x5 #define ID_AA64MMFR0_PARANGE_52 0x6 +#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0 +#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1 +#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2 +#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7 + #ifdef CONFIG_ARM64_PA_BITS_52 #define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52 #else @@ -1026,14 +1035,17 @@ #define ID_PFR1_PROGMOD_SHIFT 0 #if defined(CONFIG_ARM64_4K_PAGES) -#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED +#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX #elif defined(CONFIG_ARM64_16K_PAGES) -#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN16_SUPPORTED +#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX #elif defined(CONFIG_ARM64_64K_PAGES) -#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED +#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX #endif #define MVFR2_FPMISC_SHIFT 4 diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 1fbab854a51b..cdcf307764aa 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -68,6 +68,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ #define TIF_FSCHECK 5 /* Check FS is USER_DS on return */ #define TIF_MTE_ASYNC_FAULT 6 /* MTE Asynchronous Tag Check Fault */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ @@ -98,10 +99,12 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_SVE (1 << TIF_SVE) #define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ - _TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT) + _TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT | \ + _TIF_NOTIFY_SIGNAL) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index d32363da9017..87df1eeb3c01 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -42,7 +42,7 @@ bool alternative_is_applied(u16 cpufeature) /* * Check if the target PC is within an alternative block. */ -static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) +static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) { unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt); return !(pc >= replptr && pc <= (replptr + alt->alt_len)); @@ -50,7 +50,7 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1)) -static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr) +static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr) { u32 insn; @@ -95,7 +95,7 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp return insn; } -static void patch_alternative(struct alt_instr *alt, +static noinstr void patch_alternative(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst) { __le32 *replptr; diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 0e86e8b9cedd..c5da9d1e954a 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -59,6 +59,7 @@ struct insn_emulation { static LIST_HEAD(insn_emulation); static int nr_insn_emulated __initdata; static DEFINE_RAW_SPINLOCK(insn_emulation_lock); +static DEFINE_MUTEX(insn_emulation_mutex); static void register_emulation_hooks(struct insn_emulation_ops *ops) { @@ -207,10 +208,10 @@ static int emulation_proc_handler(struct ctl_table *table, int write, loff_t *ppos) { int ret = 0; - struct insn_emulation *insn = (struct insn_emulation *) table->data; + struct insn_emulation *insn = container_of(table->data, struct insn_emulation, current_mode); enum insn_emulation_mode prev_mode = insn->current_mode; - table->data = &insn->current_mode; + mutex_lock(&insn_emulation_mutex); ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret || !write || prev_mode == insn->current_mode) @@ -223,7 +224,7 @@ static int emulation_proc_handler(struct ctl_table *table, int write, update_insn_emulation_mode(insn, INSN_UNDEF); } ret: - table->data = insn; + mutex_unlock(&insn_emulation_mutex); return ret; } @@ -247,7 +248,7 @@ static void __init register_insn_emulation_sysctl(void) sysctl->maxlen = sizeof(int); sysctl->procname = insn->ops->name; - sysctl->data = insn; + sysctl->data = &insn->current_mode; sysctl->extra1 = &insn->min; sysctl->extra2 = &insn->max; sysctl->proc_handler = emulation_proc_handler; diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c index 587543c6c51c..97c42be71338 100644 --- a/arch/arm64/kernel/cacheinfo.c +++ b/arch/arm64/kernel/cacheinfo.c @@ -45,7 +45,8 @@ static void ci_leaf_init(struct cacheinfo *this_leaf, int init_cache_level(unsigned int cpu) { - unsigned int ctype, level, leaves, fw_level; + unsigned int ctype, level, leaves; + int fw_level; struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) { @@ -63,6 +64,9 @@ int init_cache_level(unsigned int cpu) else fw_level = acpi_find_last_cache_level(cpu); + if (fw_level < 0) + return fw_level; + if (level < fw_level) { /* * some external caches not specified in CLIDR_EL1 diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index e458bf3e9e8d..f0484c063dc9 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -211,6 +211,10 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = { { ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), }, + { + /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */ + ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe), + }, #endif {}, }; @@ -354,6 +358,14 @@ static const struct midr_range tsb_flush_fail_cpus[] = { }; #endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */ +#ifdef CONFIG_ARM64_ERRATUM_1742098 +static struct midr_range broken_aarch32_aes[] = { + MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + {}, +}; +#endif + const struct arm64_cpu_capabilities arm64_errata[] = { #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE { @@ -553,6 +565,23 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE, ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus), }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_2457168 + { + .desc = "ARM erratum 2457168", + .capability = ARM64_WORKAROUND_2457168, + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + /* Cortex-A510 r0p0-r1p1 */ + CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1) + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_1742098 + { + .desc = "ARM erratum 1742098", + .capability = ARM64_WORKAROUND_1742098, + CAP_MIDR_RANGE_LIST(broken_aarch32_aes), + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + }, #endif { } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 40ac42cc3216..4be4d5410a7b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -79,6 +79,7 @@ #include #include #include +#include #include #include #include @@ -532,7 +533,7 @@ static const struct arm64_ftr_bits ftr_id_pfr2[] = { static const struct arm64_ftr_bits ftr_id_dfr0[] = { /* [31:28] TraceFilt */ - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_PERFMON_SHIFT, 4, 0xf), + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_PERFMON_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MPROFDBG_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPTRC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPTRC_SHIFT, 4, 0), @@ -1706,7 +1707,10 @@ static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n", smp_processor_id()); cpumask_set_cpu(smp_processor_id(), &amu_cpus); - init_cpu_freq_invariance_counters(); + + /* 0 reference values signal broken/disabled counters */ + if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168)) + init_cpu_freq_invariance_counters(); } } @@ -1894,6 +1898,14 @@ static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, in } #endif /* CONFIG_KVM */ +static void elf_hwcap_fixup(void) +{ +#ifdef CONFIG_ARM64_ERRATUM_1742098 + if (cpus_have_const_cap(ARM64_WORKAROUND_1742098)) + compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES; +#endif /* ARM64_ERRATUM_1742098 */ +} + /* Internal helper functions to match cpu capability type */ static bool cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap) @@ -2918,8 +2930,10 @@ void __init setup_cpu_features(void) setup_system_capabilities(); setup_elf_hwcaps(arm64_elf_hwcaps); - if (system_supports_32bit_el0()) + if (system_supports_32bit_el0()) { setup_elf_hwcaps(compat_elf_hwcaps); + elf_hwcap_fixup(); + } if (system_uses_ttbr0_pan()) pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n"); diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index b512b5503f6e..d4ff9ae673fa 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -54,6 +54,9 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu) struct acpi_lpi_state *lpi; struct acpi_processor *pr = per_cpu(processors, cpu); + if (unlikely(!pr || !pr->flags.has_lpi)) + return -EINVAL; + /* * If the PSCI cpu_suspend function hook has not been initialized * idle states must not be enabled, so bail out @@ -61,9 +64,6 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu) if (!psci_ops.cpu_suspend) return -EOPNOTSUPP; - if (unlikely(!pr || !pr->flags.has_lpi)) - return -EINVAL; - count = pr->power.count - 1; if (count <= 0) return -ENODEV; diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 852abc57c2ce..d7f904cd005b 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -283,13 +283,11 @@ void register_user_break_hook(struct break_hook *hook) { register_debug_hook(&hook->node, &user_break_hook); } -EXPORT_SYMBOL_GPL(register_user_break_hook); void unregister_user_break_hook(struct break_hook *hook) { unregister_debug_hook(&hook->node); } -EXPORT_SYMBOL_GPL(unregister_user_break_hook); void register_kernel_break_hook(struct break_hook *hook) { @@ -301,7 +299,6 @@ void unregister_kernel_break_hook(struct break_hook *hook) { unregister_debug_hook(&hook->node); } -EXPORT_SYMBOL_GPL(unregister_kernel_break_hook); static int call_break_hook(struct pt_regs *regs, unsigned int esr) { diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S index 75691a2641c1..2d3c4b02393e 100644 --- a/arch/arm64/kernel/efi-rt-wrapper.S +++ b/arch/arm64/kernel/efi-rt-wrapper.S @@ -4,6 +4,7 @@ */ #include +#include SYM_FUNC_START(__efi_rt_asm_wrapper) stp x29, x30, [sp, #-32]! @@ -16,6 +17,12 @@ SYM_FUNC_START(__efi_rt_asm_wrapper) */ stp x1, x18, [sp, #16] + ldr_l x16, efi_rt_stack_top + mov sp, x16 +#ifdef CONFIG_SHADOW_CALL_STACK + str x18, [sp, #-16]! +#endif + /* * We are lucky enough that no EFI runtime services take more than * 5 arguments, so all are passed in registers rather than via the @@ -29,6 +36,7 @@ SYM_FUNC_START(__efi_rt_asm_wrapper) mov x4, x6 blr x8 + mov sp, x29 ldp x1, x2, [sp, #16] cmp x2, x18 ldp x29, x30, [sp], #32 @@ -42,6 +50,10 @@ SYM_FUNC_START(__efi_rt_asm_wrapper) * called with preemption disabled and a separate shadow stack is used * for interrupts. */ - mov x18, x2 +#ifdef CONFIG_SHADOW_CALL_STACK + ldr_l x18, efi_rt_stack_top + ldr x18, [x18, #-16] +#endif + b efi_handle_corrupted_x18 // tail call SYM_FUNC_END(__efi_rt_asm_wrapper) diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index fa02efb28e88..72f432d23ec5 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -12,6 +12,14 @@ #include +static bool region_is_misaligned(const efi_memory_desc_t *md) +{ + if (PAGE_SIZE == EFI_PAGE_SIZE) + return false; + return !PAGE_ALIGNED(md->phys_addr) || + !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT); +} + /* * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be * executable, everything else can be mapped with the XN bits @@ -25,14 +33,22 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md) if (type == EFI_MEMORY_MAPPED_IO) return PROT_DEVICE_nGnRE; - if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr), - "UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?")) + if (region_is_misaligned(md)) { + static bool __initdata code_is_misaligned; + /* - * If the region is not aligned to the page size of the OS, we - * can not use strict permissions, since that would also affect - * the mapping attributes of the adjacent regions. + * Regions that are not aligned to the OS page size cannot be + * mapped with strict permissions, as those might interfere + * with the permissions that are needed by the adjacent + * region's mapping. However, if we haven't encountered any + * misaligned runtime code regions so far, we can safely use + * non-executable permissions for non-code regions. */ - return pgprot_val(PAGE_KERNEL_EXEC); + code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE); + + return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC) + : pgprot_val(PAGE_KERNEL); + } /* R-- */ if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) == @@ -62,19 +78,16 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE || md->type == EFI_RUNTIME_SERVICES_DATA); - if (!PAGE_ALIGNED(md->phys_addr) || - !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) { - /* - * If the end address of this region is not aligned to page - * size, the mapping is rounded up, and may end up sharing a - * page frame with the next UEFI memory region. If we create - * a block entry now, we may need to split it again when mapping - * the next region, and support for that is going to be removed - * from the MMU routines. So avoid block mappings altogether in - * that case. - */ + /* + * If this region is not aligned to the page size used by the OS, the + * mapping will be rounded outwards, and may end up sharing a page + * frame with an adjacent runtime memory region. Given that the page + * table descriptor covering the shared page will be rewritten when the + * adjacent region gets mapped, we must avoid block mappings here so we + * don't have to worry about splitting them when that happens. + */ + if (region_is_misaligned(md)) page_mappings_only = true; - } create_pgd_mapping(mm, md->phys_addr, md->virt_addr, md->num_pages << EFI_PAGE_SHIFT, @@ -101,6 +114,9 @@ int __init efi_set_mapping_permissions(struct mm_struct *mm, BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE && md->type != EFI_RUNTIME_SERVICES_DATA); + if (region_is_misaligned(md)) + return 0; + /* * Calling apply_to_page_range() is only safe on regions that are * guaranteed to be mapped down to pages. Since we are only called @@ -127,3 +143,30 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f) pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f); return s; } + +DEFINE_SPINLOCK(efi_rt_lock); + +asmlinkage u64 *efi_rt_stack_top __ro_after_init; + +/* EFI requires 8 KiB of stack space for runtime services */ +static_assert(THREAD_SIZE >= SZ_8K); + +static int __init arm64_efi_rt_init(void) +{ + void *p; + + if (!efi_enabled(EFI_RUNTIME_SERVICES)) + return 0; + + p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL, + NUMA_NO_NODE, &&l); +l: if (!p) { + pr_warn("Failed to allocate EFI runtime stack\n"); + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); + return -ENOMEM; + } + + efi_rt_stack_top = p + THREAD_SIZE; + return 0; +} +core_initcall(arm64_efi_rt_init); diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 86a5cf9bc19a..402a24f845b9 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -76,6 +76,66 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) return NULL; } +/* + * Find the address the callsite must branch to in order to reach '*addr'. + * + * Due to the limited range of 'BL' instructions, modules may be placed too far + * away to branch directly and must use a PLT. + * + * Returns true when '*addr' contains a reachable target address, or has been + * modified to contain a PLT address. Returns false otherwise. + */ +static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, + struct module *mod, + unsigned long *addr) +{ + unsigned long pc = rec->ip; + long offset = (long)*addr - (long)pc; + struct plt_entry *plt; + + /* + * When the target is within range of the 'BL' instruction, use 'addr' + * as-is and branch to that directly. + */ + if (offset >= -SZ_128M && offset < SZ_128M) + return true; + + /* + * When the target is outside of the range of a 'BL' instruction, we + * must use a PLT to reach it. We can only place PLTs for modules, and + * only when module PLT support is built-in. + */ + if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) + return false; + + /* + * 'mod' is only set at module load time, but if we end up + * dealing with an out-of-range condition, we can assume it + * is due to a module being loaded far away from the kernel. + * + * NOTE: __module_text_address() must be called with preemption + * disabled, but we can rely on ftrace_lock to ensure that 'mod' + * retains its validity throughout the remainder of this code. + */ + if (!mod) { + preempt_disable(); + mod = __module_text_address(pc); + preempt_enable(); + } + + if (WARN_ON(!mod)) + return false; + + plt = get_ftrace_plt(mod, *addr); + if (!plt) { + pr_err("ftrace: no module PLT for %ps\n", (void *)*addr); + return false; + } + + *addr = (unsigned long)plt; + return true; +} + /* * Turn on the call to ftrace_caller() in instrumented function */ @@ -83,40 +143,9 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned long pc = rec->ip; u32 old, new; - long offset = (long)pc - (long)addr; - if (offset < -SZ_128M || offset >= SZ_128M) { - struct module *mod; - struct plt_entry *plt; - - if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) - return -EINVAL; - - /* - * On kernels that support module PLTs, the offset between the - * branch instruction and its target may legally exceed the - * range of an ordinary relative 'bl' opcode. In this case, we - * need to branch via a trampoline in the module. - * - * NOTE: __module_text_address() must be called with preemption - * disabled, but we can rely on ftrace_lock to ensure that 'mod' - * retains its validity throughout the remainder of this code. - */ - preempt_disable(); - mod = __module_text_address(pc); - preempt_enable(); - - if (WARN_ON(!mod)) - return -EINVAL; - - plt = get_ftrace_plt(mod, addr); - if (!plt) { - pr_err("ftrace: no module PLT for %ps\n", (void *)addr); - return -EINVAL; - } - - addr = (unsigned long)plt; - } + if (!ftrace_find_callable_addr(rec, NULL, &addr)) + return -EINVAL; old = aarch64_insn_gen_nop(); new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); @@ -131,6 +160,11 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long pc = rec->ip; u32 old, new; + if (!ftrace_find_callable_addr(rec, NULL, &old_addr)) + return -EINVAL; + if (!ftrace_find_callable_addr(rec, NULL, &addr)) + return -EINVAL; + old = aarch64_insn_gen_branch_imm(pc, old_addr, AARCH64_INSN_BRANCH_LINK); new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); @@ -180,54 +214,30 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { unsigned long pc = rec->ip; - bool validate = true; u32 old = 0, new; - long offset = (long)pc - (long)addr; - - if (offset < -SZ_128M || offset >= SZ_128M) { - u32 replaced; - - if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) - return -EINVAL; - - /* - * 'mod' is only set at module load time, but if we end up - * dealing with an out-of-range condition, we can assume it - * is due to a module being loaded far away from the kernel. - */ - if (!mod) { - preempt_disable(); - mod = __module_text_address(pc); - preempt_enable(); - - if (WARN_ON(!mod)) - return -EINVAL; - } - - /* - * The instruction we are about to patch may be a branch and - * link instruction that was redirected via a PLT entry. In - * this case, the normal validation will fail, but we can at - * least check that we are dealing with a branch and link - * instruction that points into the right module. - */ - if (aarch64_insn_read((void *)pc, &replaced)) - return -EFAULT; - - if (!aarch64_insn_is_bl(replaced) || - !within_module(pc + aarch64_get_branch_offset(replaced), - mod)) - return -EINVAL; - - validate = false; - } else { - old = aarch64_insn_gen_branch_imm(pc, addr, - AARCH64_INSN_BRANCH_LINK); - } new = aarch64_insn_gen_nop(); - return ftrace_modify_code(pc, old, new, validate); + /* + * When using mcount, callsites in modules may have been initalized to + * call an arbitrary module PLT (which redirects to the _mcount stub) + * rather than the ftrace PLT we'll use at runtime (which redirects to + * the ftrace trampoline). We can ignore the old PLT when initializing + * the callsite. + * + * Note: 'mod' is only set at module load time. + */ + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && + IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) { + return aarch64_insn_patch_text_nosync((void *)pc, new); + } + + if (!ftrace_find_callable_addr(rec, mod, &addr)) + return -EINVAL; + + old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); + + return ftrace_modify_code(pc, old, new, true); } void arch_ftrace_update_code(int command) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 047eac4046e0..f196eab73d3b 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -671,8 +671,10 @@ SYM_FUNC_END(__secondary_too_slow) SYM_FUNC_START(__enable_mmu) mrs x2, ID_AA64MMFR0_EL1 ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4 - cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED - b.ne __no_granule_support + cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN + b.lt __no_granule_support + cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX + b.gt __no_granule_support update_early_cpu_boot_status 0, x2, x3 adrp x2, idmap_pg_dir phys_to_ttbr x1, x1 diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 6c0de2f60ea9..7d4fdf974542 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -216,8 +216,8 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg) int i, ret = 0; struct aarch64_insn_patch *pp = arg; - /* The first CPU becomes master */ - if (atomic_inc_return(&pp->cpu_count) == 1) { + /* The last CPU becomes master */ + if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) { for (i = 0; ret == 0 && i < pp->insn_cnt; i++) ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], pp->new_insns[i]); diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index acd43c57d9a0..c6a37d93539b 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -67,6 +67,9 @@ void mte_sync_tags(pte_t *ptep, pte_t pte) if (!test_and_set_bit(PG_mte_tagged, &page->flags)) mte_sync_page_tags(page, ptep, check_swap); } + + /* ensure the tags are visible before the PTE is set */ + smp_wmb(); } int memcmp_pages(struct page *page1, struct page *page2) diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c index c07d7a034941..69ec670bcc70 100644 --- a/arch/arm64/kernel/paravirt.c +++ b/arch/arm64/kernel/paravirt.c @@ -30,7 +30,7 @@ struct paravirt_patch_template pv_ops; EXPORT_SYMBOL_GPL(pv_ops); struct pv_time_stolen_time_region { - struct pvclock_vcpu_stolen_time *kaddr; + struct pvclock_vcpu_stolen_time __rcu *kaddr; }; static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region); @@ -47,7 +47,9 @@ early_param("no-steal-acc", parse_no_stealacc); /* return stolen time in ns by asking the hypervisor */ static u64 pv_steal_clock(int cpu) { + struct pvclock_vcpu_stolen_time *kaddr = NULL; struct pv_time_stolen_time_region *reg; + u64 ret = 0; reg = per_cpu_ptr(&stolen_time_region, cpu); @@ -56,28 +58,37 @@ static u64 pv_steal_clock(int cpu) * online notification callback runs. Until the callback * has run we just return zero. */ - if (!reg->kaddr) + rcu_read_lock(); + kaddr = rcu_dereference(reg->kaddr); + if (!kaddr) { + rcu_read_unlock(); return 0; + } - return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time)); + ret = le64_to_cpu(READ_ONCE(kaddr->stolen_time)); + rcu_read_unlock(); + return ret; } static int stolen_time_cpu_down_prepare(unsigned int cpu) { + struct pvclock_vcpu_stolen_time *kaddr = NULL; struct pv_time_stolen_time_region *reg; reg = this_cpu_ptr(&stolen_time_region); if (!reg->kaddr) return 0; - memunmap(reg->kaddr); - memset(reg, 0, sizeof(*reg)); + kaddr = rcu_replace_pointer(reg->kaddr, NULL, true); + synchronize_rcu(); + memunmap(kaddr); return 0; } static int stolen_time_cpu_online(unsigned int cpu) { + struct pvclock_vcpu_stolen_time *kaddr = NULL; struct pv_time_stolen_time_region *reg; struct arm_smccc_res res; @@ -88,17 +99,19 @@ static int stolen_time_cpu_online(unsigned int cpu) if (res.a0 == SMCCC_RET_NOT_SUPPORTED) return -EINVAL; - reg->kaddr = memremap(res.a0, + kaddr = memremap(res.a0, sizeof(struct pvclock_vcpu_stolen_time), MEMREMAP_WB); + rcu_assign_pointer(reg->kaddr, kaddr); + if (!reg->kaddr) { pr_warn("Failed to map stolen time data structure\n"); return -ENOMEM; } - if (le32_to_cpu(reg->kaddr->revision) != 0 || - le32_to_cpu(reg->kaddr->attributes) != 0) { + if (le32_to_cpu(kaddr->revision) != 0 || + le32_to_cpu(kaddr->attributes) != 0) { pr_warn_once("Unexpected revision or attributes in stolen time data\n"); return -ENXIO; } diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index cdb3d4549b3a..e41cee4e1bb9 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -1116,17 +1116,32 @@ static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name, return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL); } -static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_pmuv3", - armv8_pmuv3_map_event); +#define PMUV3_INIT_SIMPLE(name) \ +static int name##_pmu_init(struct arm_pmu *cpu_pmu) \ +{ \ + return armv8_pmu_init_nogroups(cpu_pmu, #name, armv8_pmuv3_map_event);\ } -static int armv8_a34_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a34", - armv8_pmuv3_map_event); -} +PMUV3_INIT_SIMPLE(armv8_pmuv3) + +PMUV3_INIT_SIMPLE(armv8_cortex_a34) +PMUV3_INIT_SIMPLE(armv8_cortex_a55) +PMUV3_INIT_SIMPLE(armv8_cortex_a65) +PMUV3_INIT_SIMPLE(armv8_cortex_a75) +PMUV3_INIT_SIMPLE(armv8_cortex_a76) +PMUV3_INIT_SIMPLE(armv8_cortex_a77) +PMUV3_INIT_SIMPLE(armv8_cortex_a78) +PMUV3_INIT_SIMPLE(armv9_cortex_a510) +PMUV3_INIT_SIMPLE(armv9_cortex_a710) +PMUV3_INIT_SIMPLE(armv8_cortex_x1) +PMUV3_INIT_SIMPLE(armv9_cortex_x2) +PMUV3_INIT_SIMPLE(armv8_neoverse_e1) +PMUV3_INIT_SIMPLE(armv8_neoverse_n1) +PMUV3_INIT_SIMPLE(armv9_neoverse_n2) +PMUV3_INIT_SIMPLE(armv8_neoverse_v1) + +PMUV3_INIT_SIMPLE(armv8_nvidia_carmel) +PMUV3_INIT_SIMPLE(armv8_nvidia_denver) static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu) { @@ -1140,24 +1155,12 @@ static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) armv8_a53_map_event); } -static int armv8_a55_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a55", - armv8_pmuv3_map_event); -} - static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) { return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57", armv8_a57_map_event); } -static int armv8_a65_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a65", - armv8_pmuv3_map_event); -} - static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) { return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72", @@ -1170,36 +1173,6 @@ static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu) armv8_a73_map_event); } -static int armv8_a75_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a75", - armv8_pmuv3_map_event); -} - -static int armv8_a76_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a76", - armv8_pmuv3_map_event); -} - -static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a77", - armv8_pmuv3_map_event); -} - -static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_e1", - armv8_pmuv3_map_event); -} - -static int armv8_n1_pmu_init(struct arm_pmu *cpu_pmu) -{ - return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_n1", - armv8_pmuv3_map_event); -} - static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu) { return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder", @@ -1213,22 +1186,31 @@ static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu) } static const struct of_device_id armv8_pmu_of_device_ids[] = { - {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init}, - {.compatible = "arm,cortex-a34-pmu", .data = armv8_a34_pmu_init}, + {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_pmu_init}, + {.compatible = "arm,cortex-a34-pmu", .data = armv8_cortex_a34_pmu_init}, {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init}, {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init}, - {.compatible = "arm,cortex-a55-pmu", .data = armv8_a55_pmu_init}, + {.compatible = "arm,cortex-a55-pmu", .data = armv8_cortex_a55_pmu_init}, {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init}, - {.compatible = "arm,cortex-a65-pmu", .data = armv8_a65_pmu_init}, + {.compatible = "arm,cortex-a65-pmu", .data = armv8_cortex_a65_pmu_init}, {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init}, {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init}, - {.compatible = "arm,cortex-a75-pmu", .data = armv8_a75_pmu_init}, - {.compatible = "arm,cortex-a76-pmu", .data = armv8_a76_pmu_init}, - {.compatible = "arm,cortex-a77-pmu", .data = armv8_a77_pmu_init}, - {.compatible = "arm,neoverse-e1-pmu", .data = armv8_e1_pmu_init}, - {.compatible = "arm,neoverse-n1-pmu", .data = armv8_n1_pmu_init}, + {.compatible = "arm,cortex-a75-pmu", .data = armv8_cortex_a75_pmu_init}, + {.compatible = "arm,cortex-a76-pmu", .data = armv8_cortex_a76_pmu_init}, + {.compatible = "arm,cortex-a77-pmu", .data = armv8_cortex_a77_pmu_init}, + {.compatible = "arm,cortex-a78-pmu", .data = armv8_cortex_a78_pmu_init}, + {.compatible = "arm,cortex-a510-pmu", .data = armv9_cortex_a510_pmu_init}, + {.compatible = "arm,cortex-a710-pmu", .data = armv9_cortex_a710_pmu_init}, + {.compatible = "arm,cortex-x1-pmu", .data = armv8_cortex_x1_pmu_init}, + {.compatible = "arm,cortex-x2-pmu", .data = armv9_cortex_x2_pmu_init}, + {.compatible = "arm,neoverse-e1-pmu", .data = armv8_neoverse_e1_pmu_init}, + {.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init}, + {.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init}, + {.compatible = "arm,neoverse-v1-pmu", .data = armv8_neoverse_v1_pmu_init}, {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init}, {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init}, + {.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init}, + {.compatible = "nvidia,denver-pmu", .data = armv8_nvidia_denver_pmu_init}, {}, }; @@ -1251,7 +1233,7 @@ static int __init armv8_pmu_driver_init(void) if (acpi_disabled) return platform_driver_register(&armv8_pmu_driver); else - return arm_pmu_acpi_probe(armv8_pmuv3_init); + return arm_pmu_acpi_probe(armv8_pmuv3_pmu_init); } device_initcall(armv8_pmu_driver_init) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 48d4f785e311..c38a5ab3a6b5 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -478,7 +478,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ptrauth_thread_init_kernel(p); - if (likely(!(p->flags & PF_KTHREAD))) { + if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) { *childregs = *current_pt_regs(); childregs->regs[0] = 0; diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index 6d45c63c6454..428cfabd11c4 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -233,17 +233,20 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn) __this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT); } -static void call_smc_arch_workaround_1(void) +/* Called during entry so must be noinstr */ +static noinstr void call_smc_arch_workaround_1(void) { arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); } -static void call_hvc_arch_workaround_1(void) +/* Called during entry so must be noinstr */ +static noinstr void call_hvc_arch_workaround_1(void) { arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); } -static void qcom_link_stack_sanitisation(void) +/* Called during entry so must be noinstr */ +static noinstr void qcom_link_stack_sanitisation(void) { u64 tmp; @@ -850,6 +853,7 @@ u8 spectre_bhb_loop_affected(int scope) if (scope == SCOPE_LOCAL_CPU) { static const struct midr_range spectre_bhb_k32_list[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE), MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), @@ -864,6 +868,10 @@ u8 spectre_bhb_loop_affected(int scope) MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), {}, }; + static const struct midr_range spectre_bhb_k11_list[] = { + MIDR_ALL_VERSIONS(MIDR_AMPERE1), + {}, + }; static const struct midr_range spectre_bhb_k8_list[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), @@ -874,6 +882,8 @@ u8 spectre_bhb_loop_affected(int scope) k = 32; else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) k = 24; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list)) + k = 11; else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) k = 8; diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 0dab5679a97d..b6fbbd527dd7 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -938,7 +938,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, (void __user *)NULL, current); } - if (thread_flags & _TIF_SIGPENDING) + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); if (thread_flags & _TIF_NOTIFY_RESUME) { diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index 265fe3eb1069..2df3cd477176 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -114,6 +114,6 @@ long compat_arm_syscall(struct pt_regs *regs, int scno) addr = instruction_pointer(regs) - (compat_thumb_mode(regs) ? 2 : 4); arm64_notify_die("Oops - bad compat syscall(2)", regs, - SIGILL, ILL_ILLTRP, addr, scno); + SIGILL, ILL_ILLTRP, addr, 0); return 0; } diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 543c67cae02f..f35af19b7055 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -22,46 +22,6 @@ #include #include -void store_cpu_topology(unsigned int cpuid) -{ - struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; - u64 mpidr; - - if (cpuid_topo->package_id != -1) - goto topology_populated; - - mpidr = read_cpuid_mpidr(); - - /* Uniprocessor systems can rely on default topology values */ - if (mpidr & MPIDR_UP_BITMASK) - return; - - /* - * This would be the place to create cpu topology based on MPIDR. - * - * However, it cannot be trusted to depict the actual topology; some - * pieces of the architecture enforce an artificial cap on Aff0 values - * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an - * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up - * having absolutely no relationship to the actual underlying system - * topology, and cannot be reasonably used as core / package ID. - * - * If the MT bit is set, Aff0 *could* be used to define a thread ID, but - * we still wouldn't be able to obtain a sane core ID. This means we - * need to entirely ignore MPIDR for any topology deduction. - */ - cpuid_topo->thread_id = -1; - cpuid_topo->core_id = cpuid; - cpuid_topo->package_id = cpu_to_node(cpuid); - - pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n", - cpuid, cpuid_topo->package_id, cpuid_topo->core_id, - cpuid_topo->thread_id, mpidr); - -topology_populated: - update_siblings_masks(cpuid); -} - #ifdef CONFIG_ACPI static bool __init acpi_cpu_is_threaded(int cpu) { @@ -158,7 +118,7 @@ static int validate_cpu_freq_invariance_counters(int cpu) } /* Convert maximum frequency from KHz to Hz and validate */ - max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000; + max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000ULL; if (unlikely(!max_freq_hz)) { pr_debug("CPU%d: invalid maximum frequency.\n", cpu); return -EINVAL; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index acc7e7647cbb..a6ba4362e36d 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -931,7 +931,7 @@ static struct break_hook bug_break_hook = { static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr) { pr_err("%s generated an invalid instruction at %pS!\n", - in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching", + "Kernel text patching", (void *)instruction_pointer(regs)); /* We cannot handle this */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 67749fd3604d..78550c856c24 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -695,8 +695,7 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu) if (likely(!vcpu_mode_is_32bit(vcpu))) return false; - return !system_supports_32bit_el0() || - static_branch_unlikely(&arm64_mismatched_32bit_el0); + return !kvm_supports_32bit_el0(); } /** diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index c763808cacdf..27b783a711bb 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -225,7 +225,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK; switch (mode) { case PSR_AA32_MODE_USR: - if (!system_supports_32bit_el0()) + if (!kvm_supports_32bit_el0()) return -EINVAL; break; case PSR_AA32_MODE_FIQ: diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index cebe39f3b1b6..cb0054ea7332 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -240,6 +240,14 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index) { struct kvm_run *run = vcpu->run; + if (ARM_SERROR_PENDING(exception_index)) { + /* + * The SError is handled by handle_exit_early(). If the guest + * survives it will re-execute the original instruction. + */ + return 1; + } + exception_index = ARM_EXCEPTION_CODE(exception_index); switch (exception_index) { diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c index 73629094f903..0a99cb4e0c38 100644 --- a/arch/arm64/kvm/hyp/exception.c +++ b/arch/arm64/kvm/hyp/exception.c @@ -38,7 +38,10 @@ static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg) static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val) { - write_sysreg_el1(val, SYS_SPSR); + if (has_vhe()) + write_sysreg_el1(val, SYS_SPSR); + else + __vcpu_sys_reg(vcpu, SPSR_EL1) = val; } static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 16807a46ab9f..a22555212681 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -423,7 +423,8 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR); - if (ARM_SERROR_PENDING(*exit_code)) { + if (ARM_SERROR_PENDING(*exit_code) && + ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) { u8 esr_ec = kvm_vcpu_trap_get_class(vcpu); /* diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index e9f6ea704d07..7c8278f53da3 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -285,5 +285,5 @@ void __noreturn hyp_panic(void) asmlinkage void kvm_unexpected_el2_exception(void) { - return __kvm_unexpected_el2_exception(); + __kvm_unexpected_el2_exception(); } diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index e57e5d35a017..1e503711a670 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -231,5 +231,5 @@ void __noreturn hyp_panic(void) asmlinkage void kvm_unexpected_el2_exception(void) { - return __kvm_unexpected_el2_exception(); + __kvm_unexpected_el2_exception(); } diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index a802f52a2379..46ce3765450f 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -344,16 +344,18 @@ int kvm_set_ipa_limit(void) } switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) { - default: - case 1: + case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE: kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n"); return -EINVAL; - case 0: + case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT: kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n"); break; - case 2: + case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX: kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n"); break; + default: + kvm_err("Unsupported value for TGRAN_2, giving up\n"); + return -EINVAL; } kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 187576986aef..b9bb810991c5 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -618,7 +618,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) */ val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); - if (!system_supports_32bit_el0()) + if (!kvm_supports_32bit_el0()) val |= ARMV8_PMU_PMCR_LC; __vcpu_sys_reg(vcpu, r->reg) = val; } @@ -667,7 +667,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, val = __vcpu_sys_reg(vcpu, PMCR_EL0); val &= ~ARMV8_PMU_PMCR_MASK; val |= p->regval & ARMV8_PMU_PMCR_MASK; - if (!system_supports_32bit_el0()) + if (!kvm_supports_32bit_el0()) val |= ARMV8_PMU_PMCR_LC; __vcpu_sys_reg(vcpu, PMCR_EL0) = val; kvm_pmu_handle_pmcr(vcpu, val); diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 40cbaca81333..9a40021c6e71 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -2096,7 +2096,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz, memset(entry, 0, esz); - while (len > 0) { + while (true) { int next_offset; size_t byte_offset; @@ -2109,6 +2109,9 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz, return next_offset; byte_offset = next_offset * esz; + if (byte_offset >= len) + break; + id += next_offset; gpa += byte_offset; len -= byte_offset; diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v2.c b/arch/arm64/kvm/vgic/vgic-mmio-v2.c index a016f07adc28..b3cc51795650 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v2.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v2.c @@ -418,11 +418,11 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET, vgic_mmio_read_pending, vgic_mmio_write_spending, - NULL, vgic_uaccess_write_spending, 1, + vgic_uaccess_read_pending, vgic_uaccess_write_spending, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR, vgic_mmio_read_pending, vgic_mmio_write_cpending, - NULL, vgic_uaccess_write_cpending, 1, + vgic_uaccess_read_pending, vgic_uaccess_write_cpending, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, vgic_mmio_read_active, vgic_mmio_write_sactive, diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c index 9e1459534ce5..5b441777937b 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.c +++ b/arch/arm64/kvm/vgic/vgic-mmio.c @@ -226,8 +226,9 @@ int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu, return 0; } -unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, - gpa_t addr, unsigned int len) +static unsigned long __read_pending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + bool is_user) { u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 value = 0; @@ -248,7 +249,7 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, IRQCHIP_STATE_PENDING, &val); WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); - } else if (vgic_irq_is_mapped_level(irq)) { + } else if (!is_user && vgic_irq_is_mapped_level(irq)) { val = vgic_get_phys_line_level(irq); } else { val = irq_is_pending(irq); @@ -263,6 +264,18 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, return value; } +unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + return __read_pending(vcpu, addr, len, false); +} + +unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + return __read_pending(vcpu, addr, len, true); +} + static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq) { return (vgic_irq_is_sgi(irq->intid) && diff --git a/arch/arm64/kvm/vgic/vgic-mmio.h b/arch/arm64/kvm/vgic/vgic-mmio.h index fefcca2b14dc..dcea44015985 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.h +++ b/arch/arm64/kvm/vgic/vgic-mmio.h @@ -149,6 +149,9 @@ int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu, unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len); +unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len); + void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val); diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index 9ceda0f401f1..0828b3b115a6 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -15,3 +15,4 @@ obj-$(CONFIG_KASAN) += kasan_init.o KASAN_SANITIZE_kasan_init.o := n obj-$(CONFIG_ROCKCHIP_ARM64_ALIGN_FAULT_FIX) += fixup_fault.o +obj-$(CONFIG_ARM64_ERRATUM_2454944) += dma-mapping-noalias.o diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 2d881f34dd9d..83a0b6f9960b 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -229,7 +229,7 @@ SYM_FUNC_END_PI(__dma_flush_area) */ SYM_FUNC_START_PI(__dma_map_area) cmp w2, #DMA_FROM_DEVICE - b.eq __dma_inv_area + b.eq __dma_flush_area b __dma_clean_area SYM_FUNC_END_PI(__dma_map_area) diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index b5447e53cd73..0dea80bf6de4 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -16,8 +16,8 @@ void copy_highpage(struct page *to, struct page *from) { - struct page *kto = page_address(to); - struct page *kfrom = page_address(from); + void *kto = page_address(to); + void *kfrom = page_address(from); copy_page(kto, kfrom); diff --git a/arch/arm64/mm/dma-mapping-noalias.c b/arch/arm64/mm/dma-mapping-noalias.c new file mode 100644 index 000000000000..5a9bba17af22 --- /dev/null +++ b/arch/arm64/mm/dma-mapping-noalias.c @@ -0,0 +1,576 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Support for uncached DMA mappings. + * Part of Cortex-A510 erratum 2454944 workaround. + * + * Copyright (C) 2022-2023 ARM Ltd. + * Author: Robin Murphy + * Activating swiotlb + disabling lazy vunmap: Beata Michalska + */ +#include +#include +#include +#include +#include +#include +#include + +/* + * Bits [58:55] of the translation table descriptor are being reserved + * by the architecture for software use purposes. With the assumption that + * those should not be used on linear map addresses (which is not without + * any guarantee though), those bits are being leveraged to trace potential + * cacheable aliases. This is still far from being perfect, to say at least: + * ... categorically the worst, but oh well, needs must... + */ +#define REFCOUNT_INC BIT(55) +#define PTE_REFCOUNT(pte) (((pte) >> 55) & 0xf) + +static int pte_set_nc(pte_t *ptep, unsigned long addr, void *data) +{ + pteval_t old_pte, new_pte, pte; + unsigned int refcount; + + pte = pte_val(READ_ONCE(*ptep)); + do { + /* Avoid racing against the transient invalid state */ + old_pte = pte | PTE_VALID; + new_pte = old_pte + REFCOUNT_INC; + refcount = PTE_REFCOUNT(pte); + if (WARN_ON(refcount == 15)) + return -EINVAL; + if (refcount == 0) { + new_pte &= ~(PTE_ATTRINDX_MASK | PTE_VALID); + new_pte |= PTE_ATTRINDX(MT_NORMAL_NC); + } + pte = cmpxchg_relaxed(&pte_val(*ptep), old_pte, new_pte); + } while (pte != old_pte); + + *(unsigned int *)data = refcount; + if (refcount) + return 0; + + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + WRITE_ONCE(*ptep, __pte(new_pte | PTE_VALID)); + return 0; +} + +static int pte_clear_nc(pte_t *ptep, unsigned long addr, void *data) +{ + pteval_t old_pte, new_pte, pte; + unsigned int refcount; + + pte = pte_val(READ_ONCE(*ptep)); + do { + old_pte = pte | PTE_VALID; + new_pte = old_pte - REFCOUNT_INC; + refcount = PTE_REFCOUNT(pte); + if (WARN_ON(refcount == 0)) + return -EINVAL; + if (refcount == 1) { + new_pte &= ~(PTE_ATTRINDX_MASK | PTE_VALID); + new_pte |= PTE_ATTRINDX(MT_NORMAL_TAGGED); + } + pte = cmpxchg_relaxed(&pte_val(*ptep), old_pte, new_pte); + } while (pte != old_pte); + + if (refcount > 1) + return 0; + + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + WRITE_ONCE(*ptep, __pte(new_pte | PTE_VALID)); + return 0; +} + +static int set_nc(void *addr, size_t size) +{ + unsigned int count; + int ret = apply_to_existing_page_range(&init_mm, (unsigned long)addr, + size, pte_set_nc, &count); + + WARN_RATELIMIT(count == 0 && page_mapped(virt_to_page(addr)), + "changing linear mapping but cacheable aliases may still exist\n"); + dsb(ishst); + isb(); + __flush_dcache_area(addr, size); + return ret; +} + +static int clear_nc(void *addr, size_t size) +{ + int ret = apply_to_existing_page_range(&init_mm, (unsigned long)addr, + size, pte_clear_nc, NULL); + dsb(ishst); + isb(); + __inval_dcache_area(addr, size); + return ret; +} + +static phys_addr_t __arm64_noalias_map(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, + unsigned long attrs, bool bounce) +{ + bounce = bounce || (phys | size) & ~PAGE_MASK; + if (bounce) { + phys = swiotlb_tbl_map_single(dev, phys, size, PAGE_ALIGN(size), + dir, attrs); + if (phys == DMA_MAPPING_ERROR) + return DMA_MAPPING_ERROR; + } + if (set_nc(phys_to_virt(phys & PAGE_MASK), PAGE_ALIGN(size))) + goto out_unmap; + + return phys; +out_unmap: + if (bounce) + swiotlb_tbl_unmap_single(dev, phys, size, PAGE_ALIGN(size), dir, + attrs | DMA_ATTR_SKIP_CPU_SYNC); + return DMA_MAPPING_ERROR; + +} + +static void __arm64_noalias_unmap(struct device *dev, phys_addr_t phys, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + clear_nc(phys_to_virt(phys & PAGE_MASK), PAGE_ALIGN(size)); + if (is_swiotlb_buffer(phys)) + swiotlb_tbl_unmap_single(dev, phys, size, PAGE_ALIGN(size), dir, attrs); +} + +static void __arm64_noalias_sync_for_device(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir) +{ + if (is_swiotlb_buffer(phys)) + swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE); + else + arch_sync_dma_for_device(phys, size, dir); +} + +static void __arm64_noalias_sync_for_cpu(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir) +{ + if (is_swiotlb_buffer(phys)) + swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU); + else + arch_sync_dma_for_cpu(phys, size, dir); +} + +static void *arm64_noalias_alloc(struct device *dev, size_t size, + dma_addr_t *dma_addr, gfp_t gfp, unsigned long attrs) +{ + struct page *page; + void *ret; + + if (attrs & DMA_ATTR_NO_WARN) + gfp |= __GFP_NOWARN; + + size = PAGE_ALIGN(size); + page = dma_direct_alloc_pages(dev, size, dma_addr, 0, gfp & ~__GFP_ZERO); + if (!page) + return NULL; + + ret = page_address(page); + if (set_nc(ret, size)) { + dma_direct_free_pages(dev, size, page, *dma_addr, 0); + return NULL; + } + return ret; +} + +static void arm64_noalias_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs) +{ + size = PAGE_ALIGN(size); + clear_nc(cpu_addr, size); + dma_direct_free_pages(dev, size, virt_to_page(cpu_addr), dma_addr, 0); +} + +static dma_addr_t arm64_noalias_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + phys_addr_t phys = page_to_phys(page) + offset; + bool bounce = !dma_capable(dev, phys_to_dma(dev, phys), size, true); + + if (!bounce && dir == DMA_TO_DEVICE) { + arch_sync_dma_for_device(phys, size, dir); + return phys_to_dma(dev, phys); + } + + bounce = bounce || page_mapped(page); + phys = __arm64_noalias_map(dev, phys, size, dir, attrs, bounce); + if (phys == DMA_MAPPING_ERROR) + return DMA_MAPPING_ERROR; + + return phys_to_dma(dev, phys); +} + +static void arm64_noalias_unmap_page(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + if (dir == DMA_TO_DEVICE) + return; + __arm64_noalias_unmap(dev, dma_to_phys(dev, dma_addr), size, dir, attrs); +} + +static void arm64_noalias_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + if (dir == DMA_TO_DEVICE) + return; + for_each_sg(sgl, sg, nents, i) + __arm64_noalias_unmap(dev, dma_to_phys(dev, sg->dma_address), + sg->length, dir, attrs); +} + +static int arm64_noalias_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + sg->dma_address = arm64_noalias_map_page(dev, sg_page(sg), sg->offset, + sg->length, dir, attrs); + if (sg->dma_address == DMA_MAPPING_ERROR) + goto out_unmap; + sg->dma_length = sg->length; + } + + return nents; + +out_unmap: + arm64_noalias_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); + return 0; +} + +static void arm64_noalias_sync_single_for_device(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + __arm64_noalias_sync_for_device(dev, dma_to_phys(dev, addr), size, dir); +} + +static void arm64_noalias_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + __arm64_noalias_sync_for_cpu(dev, dma_to_phys(dev, addr), size, dir); +} + +static void arm64_noalias_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + arm64_noalias_sync_single_for_device(dev, sg->dma_address, sg->length, dir); +} + +static void arm64_noalias_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + arm64_noalias_sync_single_for_cpu(dev, sg->dma_address, sg->length, dir); +} + +static const struct dma_map_ops arm64_noalias_ops = { + .alloc = arm64_noalias_alloc, + .free = arm64_noalias_free, + .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, + .map_page = arm64_noalias_map_page, + .unmap_page = arm64_noalias_unmap_page, + .map_sg = arm64_noalias_map_sg, + .unmap_sg = arm64_noalias_unmap_sg, + .sync_single_for_cpu = arm64_noalias_sync_single_for_cpu, + .sync_single_for_device = arm64_noalias_sync_single_for_device, + .sync_sg_for_cpu = arm64_noalias_sync_sg_for_cpu, + .sync_sg_for_device = arm64_noalias_sync_sg_for_device, + .dma_supported = dma_direct_supported, + .get_required_mask = dma_direct_get_required_mask, + .max_mapping_size = swiotlb_max_mapping_size, +}; + +#ifdef CONFIG_IOMMU_DMA +static const struct dma_map_ops *iommu_dma_ops; + +static void *arm64_iommu_alloc(struct device *dev, size_t size, + dma_addr_t *dma_addr, gfp_t gfp, unsigned long attrs) +{ + struct page **pages; + void *ret; + int i; + + size = PAGE_ALIGN(size); + if (!gfpflags_allow_blocking(gfp) || (attrs & DMA_ATTR_FORCE_CONTIGUOUS)) { + ret = dma_common_alloc_pages(dev, size, dma_addr, 0, gfp); + return ret ? page_address(ret) : NULL; + } + + ret = iommu_dma_ops->alloc(dev, size, dma_addr, gfp, attrs); + if (ret) { + pages = dma_common_find_pages(ret); + for (i = 0; i < size / PAGE_SIZE; i++) + if (set_nc(page_address(pages[i]), PAGE_SIZE)) + goto err; + } + return ret; + +err: + while (i--) + clear_nc(page_address(pages[i]), PAGE_SIZE); + iommu_dma_ops->free(dev, size, ret, *dma_addr, attrs); + return NULL; +} + +static void arm64_iommu_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs) +{ + struct page **pages = dma_common_find_pages(cpu_addr); + int i; + + size = PAGE_ALIGN(size); + if (!pages) + return dma_common_free_pages(dev, size, virt_to_page(cpu_addr), dma_addr, 0); + + for (i = 0; i < size / PAGE_SIZE; i++) + clear_nc(page_address(pages[i]), PAGE_SIZE); + iommu_dma_ops->free(dev, size, cpu_addr, dma_addr, attrs); +} + +static dma_addr_t arm64_iommu_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + phys_addr_t phys = page_to_phys(page) + offset; + dma_addr_t ret; + + if (dir == DMA_TO_DEVICE) + return iommu_dma_ops->map_page(dev, page, offset, size, dir, attrs); + + phys = __arm64_noalias_map(dev, phys, size, dir, attrs, page_mapped(page)); + if (phys == DMA_MAPPING_ERROR) + return DMA_MAPPING_ERROR; + + attrs |= DMA_ATTR_SKIP_CPU_SYNC; + ret = iommu_dma_ops->map_page(dev, phys_to_page(phys), offset_in_page(phys), + size, dir, attrs); + if (ret == DMA_MAPPING_ERROR) + __arm64_noalias_unmap(dev, phys, size, dir, attrs); + return ret; +} + +static void arm64_iommu_unmap_page(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + phys_addr_t phys; + + if (dir == DMA_TO_DEVICE) + return iommu_dma_ops->unmap_page(dev, addr, size, dir, attrs); + + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), addr); + iommu_dma_ops->unmap_page(dev, addr, size, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); + __arm64_noalias_unmap(dev, phys, size, dir, attrs); +} + +static int arm64_iommu_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs) +{ + int i, ret; + struct scatterlist *sg; + phys_addr_t *orig_phys; + + if (dir == DMA_TO_DEVICE) + return iommu_dma_ops->map_sg(dev, sgl, nents, dir, attrs); + + orig_phys = kmalloc_array(nents, sizeof(*orig_phys), GFP_ATOMIC); + if (!orig_phys) + return 0; + + for_each_sg(sgl, sg, nents, i) { + phys_addr_t phys = sg_phys(sg); + /* + * Note we do not have the page_mapped() check here, since + * bouncing plays complete havoc with dma-buf imports. Those + * may well be mapped in userspace, but we hope and pray that + * it's via dma_mmap_attrs() so any such mappings are safely + * non-cacheable. DO NOT allow a block device or other similar + * scatterlist user to get here (disable IOMMUs if necessary), + * since we can't mitigate for both conflicting use-cases. + */ + phys = __arm64_noalias_map(dev, phys, sg->length, dir, attrs, false); + if (phys == DMA_MAPPING_ERROR) + goto out_unmap; + + orig_phys[i] = sg_phys(sg); + sg_assign_page(sg, phys_to_page(phys)); + sg->offset = offset_in_page(phys); + } + ret = iommu_dma_ops->map_sg(dev, sgl, nents, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); + if (ret <= 0) + goto out_unmap; + + for_each_sg(sgl, sg, nents, i) { + sg_assign_page(sg, phys_to_page(orig_phys[i])); + sg->offset = offset_in_page(orig_phys[i]); + } + + kfree(orig_phys); + return ret; + +out_unmap: + for_each_sg(sgl, sg, nents, i) { + __arm64_noalias_unmap(dev, sg_phys(sg), sg->length, dir, attrs); + sg_assign_page(sg, phys_to_page(orig_phys[i])); + sg->offset = offset_in_page(orig_phys[i]); + } + kfree(orig_phys); + return 0; +} + +static void arm64_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs) +{ + struct iommu_domain *domain; + struct scatterlist *sg, *tmp; + dma_addr_t iova; + int i; + + if (dir == DMA_TO_DEVICE) + return iommu_dma_ops->unmap_sg(dev, sgl, nents, dir, attrs); + + domain = iommu_get_dma_domain(dev); + iova = sgl->dma_address; + tmp = sgl; + for_each_sg(sgl, sg, nents, i) { + phys_addr_t phys = iommu_iova_to_phys(domain, iova); + + __arm64_noalias_unmap(dev, phys, sg->length, dir, attrs); + iova += sg->length; + if (iova == tmp->dma_address + tmp->dma_length && !sg_is_last(tmp)) { + tmp = sg_next(tmp); + iova = tmp->dma_address; + } + } + iommu_dma_ops->unmap_sg(dev, sgl, nents, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); +} + +static void arm64_iommu_sync_single_for_device(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + phys_addr_t phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), addr); + + __arm64_noalias_sync_for_device(dev, phys, size, dir); +} + +static void arm64_iommu_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + phys_addr_t phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), addr); + + __arm64_noalias_sync_for_cpu(dev, phys, size, dir); +} + +static void arm64_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir) +{ + struct iommu_domain *domain = iommu_get_dma_domain(dev); + struct scatterlist *sg, *tmp = sgl; + dma_addr_t iova = sgl->dma_address; + int i; + + for_each_sg(sgl, sg, nents, i) { + phys_addr_t phys = iommu_iova_to_phys(domain, iova); + + __arm64_noalias_sync_for_device(dev, phys, sg->length, dir); + iova += sg->length; + if (iova == tmp->dma_address + tmp->dma_length && !sg_is_last(tmp)) { + tmp = sg_next(tmp); + iova = tmp->dma_address; + } + } +} + +static void arm64_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir) +{ + struct iommu_domain *domain = iommu_get_dma_domain(dev); + struct scatterlist *sg, *tmp = sgl; + dma_addr_t iova = sgl->dma_address; + int i; + + for_each_sg(sgl, sg, nents, i) { + phys_addr_t phys = iommu_iova_to_phys(domain, iova); + + __arm64_noalias_sync_for_cpu(dev, phys, sg->length, dir); + iova += sg->length; + if (iova == tmp->dma_address + tmp->dma_length && !sg_is_last(tmp)) { + tmp = sg_next(tmp); + iova = tmp->dma_address; + } + } +} + +static struct dma_map_ops arm64_iommu_ops = { + .alloc = arm64_iommu_alloc, + .free = arm64_iommu_free, + .alloc_pages = dma_common_alloc_pages, + .free_pages = dma_common_free_pages, + .map_page = arm64_iommu_map_page, + .unmap_page = arm64_iommu_unmap_page, + .map_sg = arm64_iommu_map_sg, + .unmap_sg = arm64_iommu_unmap_sg, + .sync_single_for_cpu = arm64_iommu_sync_single_for_cpu, + .sync_single_for_device = arm64_iommu_sync_single_for_device, + .sync_sg_for_cpu = arm64_iommu_sync_sg_for_cpu, + .sync_sg_for_device = arm64_iommu_sync_sg_for_device, +}; + +#endif /* CONFIG_IOMMU_DMA */ + +static inline void arm64_noalias_prepare(void) +{ + if (!is_swiotlb_active()) + swiotlb_late_init_with_default_size(swiotlb_size_or_default()); + if (lazy_vunmap_enable) { + lazy_vunmap_enable = false; + vm_unmap_aliases(); + } +} + +void arm64_noalias_setup_dma_ops(struct device *dev) +{ + if (dev_is_dma_coherent(dev)) + return; + + dev_info(dev, "applying no-alias DMA workaround\n"); + if (!dev->dma_ops) { + dev->dma_ops = &arm64_noalias_ops; + goto done; + } + + if (IS_ENABLED(CONFIG_IOMMU_DMA)) { + dev->dma_ops = &arm64_iommu_ops; + if (iommu_dma_ops) + goto done; + + iommu_dma_ops = dev->dma_ops; + arm64_iommu_ops.mmap = iommu_dma_ops->mmap; + arm64_iommu_ops.get_sgtable = iommu_dma_ops->get_sgtable; + arm64_iommu_ops.map_resource = iommu_dma_ops->map_resource; + arm64_iommu_ops.unmap_resource = iommu_dma_ops->unmap_resource; + arm64_iommu_ops.get_merge_boundary = iommu_dma_ops->get_merge_boundary; + } +done: + arm64_noalias_prepare(); +} +EXPORT_SYMBOL_GPL(arm64_noalias_setup_dma_ops); diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index b44bd952b831..2ee48958d459 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -11,6 +11,7 @@ #include #include #include +#include #include @@ -56,6 +57,9 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, trace_android_rvh_iommu_setup_dma_ops(dev, dma_base, size); } + /* Allow vendor modules to opt-in for the 2454944 erratum workaround */ + trace_android_rvh_setup_dma_ops(dev); + #ifdef CONFIG_XEN if (xen_initial_domain()) dev->dma_ops = &xen_swiotlb_dma_ops; diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index aa0060178343..60a8b6a8a42b 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -9,14 +9,19 @@ int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; + unsigned long addr; - fixup = search_exception_tables(instruction_pointer(regs)); + addr = instruction_pointer(regs); + + /* Search the BPF tables first, these are formatted differently */ + fixup = search_bpf_extables(addr); + if (fixup) + return arm64_bpf_fixup_exception(fixup, regs); + + fixup = search_exception_tables(addr); if (!fixup) return 0; - if (in_bpf_jit(regs)) - return arm64_bpf_fixup_exception(fixup, regs); - regs->pc = (unsigned long)&fixup->fixup + fixup->fixup; return 1; } diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index b5e83c46b23e..f173a01a0c0e 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -99,3 +100,11 @@ void __init early_ioremap_init(void) { early_ioremap_setup(); } + +bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, + unsigned long flags) +{ + unsigned long pfn = PHYS_PFN(offset); + + return memblock_is_map_memory(pfn); +} diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c index 04137a8f3d2d..bf8ddeac5d8f 100644 --- a/arch/arm64/mm/ptdump.c +++ b/arch/arm64/mm/ptdump.c @@ -41,8 +41,6 @@ static struct addr_marker address_markers[] = { { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" }, { KASAN_SHADOW_END, "Kasan shadow end" }, #endif - { BPF_JIT_REGION_START, "BPF start" }, - { BPF_JIT_REGION_END, "BPF end" }, { MODULES_VADDR, "Modules start" }, { MODULES_END, "Modules end" }, { VMALLOC_START, "vmalloc() area" }, diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index b08c0b1eb43a..86a252da69ab 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1112,6 +1112,7 @@ skip_init_ctx: bpf_jit_binary_free(header); prog->bpf_func = NULL; prog->jited = 0; + prog->jited_len = 0; goto out_off; } bpf_jit_binary_lock_ro(header); @@ -1147,15 +1148,12 @@ out: u64 bpf_jit_alloc_exec_limit(void) { - return BPF_JIT_REGION_SIZE; + return VMALLOC_END - VMALLOC_START; } void *bpf_jit_alloc_exec(unsigned long size) { - return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START, - BPF_JIT_REGION_END, GFP_KERNEL, - PAGE_KERNEL, 0, NUMA_NO_NODE, - __builtin_return_address(0)); + return vmalloc(size); } void bpf_jit_free_exec(void *addr) diff --git a/arch/c6x/include/asm/thread_info.h b/arch/c6x/include/asm/thread_info.h index f70382844b96..dd8913d57189 100644 --- a/arch/c6x/include/asm/thread_info.h +++ b/arch/c6x/include/asm/thread_info.h @@ -82,6 +82,7 @@ struct thread_info *current_thread_info(void) #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_MEMDIE 17 /* OOM killer killed process */ diff --git a/arch/c6x/kernel/asm-offsets.c b/arch/c6x/kernel/asm-offsets.c index 0f8fde494875..4a264ef87dcb 100644 --- a/arch/c6x/kernel/asm-offsets.c +++ b/arch/c6x/kernel/asm-offsets.c @@ -116,6 +116,7 @@ void foo(void) DEFINE(_TIF_NOTIFY_RESUME, (1< #include +#include #include #include @@ -313,7 +314,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags, int syscall) { /* deal with pending signal delivery */ - if (thread_info_flags & (1 << TIF_SIGPENDING)) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs, syscall); if (thread_info_flags & (1 << TIF_NOTIFY_RESUME)) diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h index 4800f6563abb..baf9388d5952 100644 --- a/arch/csky/include/asm/processor.h +++ b/arch/csky/include/asm/processor.h @@ -6,9 +6,9 @@ #include #include +#include #include #include -#include #include #include #include diff --git a/arch/csky/include/asm/thread_info.h b/arch/csky/include/asm/thread_info.h index 68e7a1227170..21456a3737c2 100644 --- a/arch/csky/include/asm/thread_info.h +++ b/arch/csky/include/asm/thread_info.h @@ -64,6 +64,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_TRACE 4 /* syscall trace active */ #define TIF_SYSCALL_TRACEPOINT 5 /* syscall tracepoint instrumentation */ #define TIF_SYSCALL_AUDIT 6 /* syscall auditing */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ #define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ @@ -75,6 +76,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_MEMDIE (1 << TIF_MEMDIE) @@ -82,7 +84,8 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ - _TIF_NOTIFY_RESUME | _TIF_UPROBE) + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_NOTIFY_SIGNAL) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c index 589f090f48b9..79272dde72db 100644 --- a/arch/csky/kernel/probes/kprobes.c +++ b/arch/csky/kernel/probes/kprobes.c @@ -28,7 +28,7 @@ static int __kprobes patch_text_cb(void *priv) struct csky_insn_patch *param = priv; unsigned int addr = (unsigned int)param->addr; - if (atomic_inc_return(¶m->cpu_count) == 1) { + if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) { *(u16 *) addr = cpu_to_le16(param->opcode); dcache_wb_range(addr, addr + 2); atomic_inc(¶m->cpu_count); @@ -124,6 +124,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { + if (p->ainsn.api.insn) { + free_insn_slot(p->ainsn.api.insn, 0); + p->ainsn.api.insn = NULL; + } } static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c index 69af6bc87e64..3d0ca22cd0e2 100644 --- a/arch/csky/kernel/process.c +++ b/arch/csky/kernel/process.c @@ -49,7 +49,7 @@ int copy_thread(unsigned long clone_flags, /* setup thread.sp for switch_to !!! */ p->thread.sp = (unsigned long)childstack; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childregs, 0, sizeof(struct pt_regs)); childstack->r15 = (unsigned long) ret_from_kernel_thread; childstack->r10 = kthread_arg; diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c index 243228b0aa07..f7c1677e5971 100644 --- a/arch/csky/kernel/signal.c +++ b/arch/csky/kernel/signal.c @@ -261,7 +261,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, uprobe_notify_resume(regs); /* Handle pending signal delivery */ - if (thread_info_flags & _TIF_SIGPENDING) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h index 0cdaa302d3d2..a518214d4ddd 100644 --- a/arch/h8300/include/asm/thread_info.h +++ b/arch/h8300/include/asm/thread_info.h @@ -73,6 +73,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ #define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling TIF_NEED_RESCHED */ +#define TIF_NOTIFY_SIGNAL 10 /* signal notifications exist */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) @@ -83,6 +84,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) /* work to do in syscall trace */ #define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ @@ -92,7 +94,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ _TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \ - _TIF_SYSCALL_TRACEPOINT) + _TIF_SYSCALL_TRACEPOINT | _TIF_NOTIFY_SIGNAL) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index bc1364db58fe..46b1342ce515 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c @@ -112,7 +112,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childregs, 0, sizeof(struct pt_regs)); childregs->retpc = (unsigned long) ret_from_kernel_thread; childregs->er4 = topstk; /* arg */ diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c index 75d9b7e626b2..75a1c36b105a 100644 --- a/arch/h8300/kernel/signal.c +++ b/arch/h8300/kernel/signal.c @@ -279,7 +279,7 @@ static void do_signal(struct pt_regs *regs) asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) { - if (thread_info_flags & _TIF_SIGPENDING) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index f2afabbadd43..cc2c1ae48e62 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -32,6 +32,7 @@ config HEXAGON select MODULES_USE_ELF_RELA select GENERIC_CPU_DEVICES select SET_FS + select ARCH_WANT_LD_ORPHAN_WARN help Qualcomm Hexagon is a processor architecture designed for high performance and low power across a wide variety of applications. diff --git a/arch/hexagon/include/asm/thread_info.h b/arch/hexagon/include/asm/thread_info.h index 563da1986464..535976665bf0 100644 --- a/arch/hexagon/include/asm/thread_info.h +++ b/arch/hexagon/include/asm/thread_info.h @@ -95,6 +95,7 @@ register struct thread_info *__current_thread_info asm(QUOTED_THREADINFO_REG); #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SINGLESTEP 4 /* restore ss @ return to usr mode */ #define TIF_RESTORE_SIGMASK 6 /* restore sig mask in do_signal() */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 17 /* OOM killer killed process */ @@ -103,6 +104,7 @@ register struct thread_info *__current_thread_info asm(QUOTED_THREADINFO_REG); #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) /* work to do on interrupt/exception return - All but TIF_SYSCALL_TRACE */ #define _TIF_WORK_MASK (0x0000FFFF & ~_TIF_SYSCALL_TRACE) diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c index 67767c5ed98c..c61165c99ae0 100644 --- a/arch/hexagon/kernel/process.c +++ b/arch/hexagon/kernel/process.c @@ -73,7 +73,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, sizeof(*ss)); ss->lr = (unsigned long)ret_from_fork; p->thread.switch_sp = ss; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childregs, 0, sizeof(struct pt_regs)); /* r24 <- fn, r25 <- arg */ ss->r24 = usp; @@ -174,7 +174,7 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags) return 1; } - if (thread_info_flags & _TIF_SIGPENDING) { + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { do_signal(regs); return 1; } diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 2d8bcdc27d7f..05e7c9ad1a96 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -542,7 +542,7 @@ ia64_get_irr(unsigned int vector) { unsigned int reg = vector / 64; unsigned int bit = vector % 64; - u64 irr; + unsigned long irr; switch (reg) { case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break; diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index 64a1011f6812..51d20cb37706 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -103,6 +103,7 @@ struct thread_info { #define TIF_SYSCALL_TRACE 2 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */ #define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notification exist */ #define TIF_NOTIFY_RESUME 6 /* resumption notification requested */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ @@ -115,6 +116,7 @@ struct thread_info { #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_MCA_INIT (1 << TIF_MCA_INIT) @@ -124,7 +126,7 @@ struct thread_info { /* "work to do on user-return" bits */ #define TIF_ALLWORK_MASK (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SYSCALL_AUDIT|\ - _TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE) + _TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_NOTIFY_SIGNAL) /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h index 869a3ac6bf23..7ccc077a60be 100644 --- a/arch/ia64/include/asm/timex.h +++ b/arch/ia64/include/asm/timex.h @@ -39,6 +39,7 @@ get_cycles (void) ret = ia64_getreg(_IA64_REG_AR_ITC); return ret; } +#define get_cycles get_cycles extern void ia64_cpu_local_tick (void); extern unsigned long long ia64_native_sched_clock (void); diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index c9ff8796b509..8159b7af5509 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -171,7 +171,8 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) } /* deal with pending signal delivery */ - if (test_thread_flag(TIF_SIGPENDING)) { + if (test_thread_flag(TIF_SIGPENDING) || + test_thread_flag(TIF_NOTIFY_SIGNAL)) { local_irq_enable(); /* force interrupt enable */ ia64_do_signal(scr, in_syscall); } @@ -337,7 +338,7 @@ copy_thread(unsigned long clone_flags, unsigned long user_stack_base, ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { if (unlikely(!user_stack_base)) { /* fork_idle() called us */ return 0; diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index e67b22fc3c60..c1b299760bf7 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -341,7 +341,8 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) * need to push through a forced SIGSEGV. */ while (1) { - get_signal(&ksig); + if (!get_signal(&ksig)) + break; /* * get_signal() may have run a debugger (via notify_parent()) diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index f34964271101..6cd002e8163d 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -106,5 +106,6 @@ int memory_add_physaddr_to_nid(u64 addr) return 0; return nid; } +EXPORT_SYMBOL(memory_add_physaddr_to_nid); #endif #endif diff --git a/arch/m68k/Kconfig.bus b/arch/m68k/Kconfig.bus index d1e93a39cd3b..f1be832e2b74 100644 --- a/arch/m68k/Kconfig.bus +++ b/arch/m68k/Kconfig.bus @@ -63,7 +63,7 @@ source "drivers/zorro/Kconfig" endif -if COLDFIRE +if !MMU config ISA_DMA_API def_bool !M5272 diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu index c17205da47fe..936cd9619bf0 100644 --- a/arch/m68k/Kconfig.cpu +++ b/arch/m68k/Kconfig.cpu @@ -312,7 +312,7 @@ comment "Processor Specific Options" config M68KFPU_EMU bool "Math emulation support" - depends on MMU + depends on M68KCLASSIC && FPU help At some point in the future, this will cause floating-point math instructions to be emulated by the kernel on machines that lack a diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine index 51a878803fb6..16730561d166 100644 --- a/arch/m68k/Kconfig.machine +++ b/arch/m68k/Kconfig.machine @@ -321,6 +321,7 @@ comment "Machine Options" config UBOOT bool "Support for U-Boot command line parameters" + depends on COLDFIRE help If you say Y here kernel will try to collect command line parameters from the initial u-boot stack. diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h index 87151d67d91e..bce5ca56c388 100644 --- a/arch/m68k/include/asm/pgtable_no.h +++ b/arch/m68k/include/asm/pgtable_no.h @@ -42,7 +42,8 @@ extern void paging_init(void); * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ -#define ZERO_PAGE(vaddr) (virt_to_page(0)) +extern void *empty_zero_page; +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) /* * All 32bit addresses are effectively valid for vmalloc... diff --git a/arch/m68k/include/asm/raw_io.h b/arch/m68k/include/asm/raw_io.h index 80eb2396d01e..3ba40bc1dfaa 100644 --- a/arch/m68k/include/asm/raw_io.h +++ b/arch/m68k/include/asm/raw_io.h @@ -80,14 +80,14 @@ ({ u16 __v = le16_to_cpu(*(__force volatile u16 *) (addr)); __v; }) #define rom_out_8(addr, b) \ - ({u8 __maybe_unused __w, __v = (b); u32 _addr = ((u32) (addr)); \ + (void)({u8 __maybe_unused __w, __v = (b); u32 _addr = ((u32) (addr)); \ __w = ((*(__force volatile u8 *) ((_addr | 0x10000) + (__v<<1)))); }) #define rom_out_be16(addr, w) \ - ({u16 __maybe_unused __w, __v = (w); u32 _addr = ((u32) (addr)); \ + (void)({u16 __maybe_unused __w, __v = (w); u32 _addr = ((u32) (addr)); \ __w = ((*(__force volatile u16 *) ((_addr & 0xFFFF0000UL) + ((__v & 0xFF)<<1)))); \ __w = ((*(__force volatile u16 *) ((_addr | 0x10000) + ((__v >> 8)<<1)))); }) #define rom_out_le16(addr, w) \ - ({u16 __maybe_unused __w, __v = (w); u32 _addr = ((u32) (addr)); \ + (void)({u16 __maybe_unused __w, __v = (w); u32 _addr = ((u32) (addr)); \ __w = ((*(__force volatile u16 *) ((_addr & 0xFFFF0000UL) + ((__v >> 8)<<1)))); \ __w = ((*(__force volatile u16 *) ((_addr | 0x10000) + ((__v & 0xFF)<<1)))); }) diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h index 3689c6718c88..15a757073fa5 100644 --- a/arch/m68k/include/asm/thread_info.h +++ b/arch/m68k/include/asm/thread_info.h @@ -60,6 +60,7 @@ static inline struct thread_info *current_thread_info(void) * bits 0-7 are tested at every exception exit * bits 8-15 are also tested at syscall exit */ +#define TIF_NOTIFY_SIGNAL 4 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */ #define TIF_SIGPENDING 6 /* signal pending */ #define TIF_NEED_RESCHED 7 /* rescheduling necessary */ diff --git a/arch/m68k/include/asm/timex.h b/arch/m68k/include/asm/timex.h index 6a21d9358280..f4a7a340f4ca 100644 --- a/arch/m68k/include/asm/timex.h +++ b/arch/m68k/include/asm/timex.h @@ -35,7 +35,7 @@ static inline unsigned long random_get_entropy(void) { if (mach_random_get_entropy) return mach_random_get_entropy(); - return 0; + return random_get_entropy_fallback(); } #define random_get_entropy random_get_entropy diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index 08359a6e058f..da83cc83e791 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -157,7 +157,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, */ p->thread.fs = get_fs().seg; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ memset(frame, 0, sizeof(struct fork_frame)); frame->regs.sr = PS_S; diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index fd916844a683..5d12736b4b28 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -1129,7 +1129,8 @@ static void do_signal(struct pt_regs *regs) void do_notify_resume(struct pt_regs *regs) { - if (test_thread_flag(TIF_SIGPENDING)) + if (test_thread_flag(TIF_NOTIFY_SIGNAL) || + test_thread_flag(TIF_SIGPENDING)) do_signal(regs); if (test_thread_flag(TIF_NOTIFY_RESUME)) diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index b13463d39b38..3c837abbebf0 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h @@ -162,7 +162,8 @@ extern int page_is_ram(unsigned long pfn); # define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) # else /* CONFIG_MMU */ # define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT) -# define pfn_valid(pfn) ((pfn) < (max_mapnr + ARCH_PFN_OFFSET)) +# define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && \ + (pfn) < (max_mapnr + ARCH_PFN_OFFSET)) # endif /* CONFIG_MMU */ # endif /* __ASSEMBLY__ */ diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h index ad8e8fcb90d3..44f5ca331862 100644 --- a/arch/microblaze/include/asm/thread_info.h +++ b/arch/microblaze/include/asm/thread_info.h @@ -107,6 +107,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ /* restore singlestep on return to user mode */ #define TIF_SINGLESTEP 4 +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_MEMDIE 6 /* is terminating due to OOM killer */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ #define TIF_SECCOMP 10 /* secure computing */ @@ -119,6 +120,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index f99860771ff4..ee000ae17e39 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c @@ -59,7 +59,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct pt_regs *childregs = task_pt_regs(p); struct thread_info *ti = task_thread_info(p); - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* if we're creating a new kernel thread then just zeroing all * the registers. That's OK for a brand new thread.*/ memset(childregs, 0, sizeof(struct pt_regs)); diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index f11a0ccccabc..5a8d173d7b75 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c @@ -313,7 +313,8 @@ static void do_signal(struct pt_regs *regs, int in_syscall) asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall) { - if (test_thread_flag(TIF_SIGPENDING)) + if (test_thread_flag(TIF_SIGPENDING) || + test_thread_flag(TIF_NOTIFY_SIGNAL)) do_signal(regs, in_syscall); if (test_thread_flag(TIF_NOTIFY_RESUME)) diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c index 3e2a8166377f..22509b5fab74 100644 --- a/arch/mips/bcm47xx/prom.c +++ b/arch/mips/bcm47xx/prom.c @@ -86,7 +86,7 @@ static __init void prom_init_mem(void) pr_debug("Assume 128MB RAM\n"); break; } - if (!memcmp(prom_init, prom_init + mem, 32)) + if (!memcmp((void *)prom_init, (void *)prom_init + mem, 32)) break; } lowmem = mem; @@ -163,7 +163,7 @@ void __init bcm47xx_prom_highmem_init(void) off = EXTVBASE + __pa(off); for (extmem = 128 << 20; extmem < 512 << 20; extmem <<= 1) { - if (!memcmp(prom_init, (void *)(off + extmem), 16)) + if (!memcmp((void *)prom_init, (void *)(off + extmem), 16)) break; } extmem -= lowmem; diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c index 19308df5f577..1b06b25aea87 100644 --- a/arch/mips/bmips/setup.c +++ b/arch/mips/bmips/setup.c @@ -167,7 +167,7 @@ void __init plat_mem_setup(void) dtb = phys_to_virt(fw_arg2); else if (fw_passed_dtb) /* UHI interface or appended dtb */ dtb = (void *)fw_passed_dtb; - else if (__dtb_start != __dtb_end) + else if (&__dtb_start != &__dtb_end) dtb = (void *)__dtb_start; else panic("no dtb found"); diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi index dfb5a7e1bb21..830e5dd3550e 100644 --- a/arch/mips/boot/dts/ingenic/jz4780.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi @@ -429,7 +429,7 @@ #address-cells = <1>; #size-cells = <1>; - eth0_addr: eth-mac-addr@0x22 { + eth0_addr: eth-mac-addr@22 { reg = <0x22 0x6>; }; }; diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 6501a842c41a..191bcaf56513 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -127,6 +127,16 @@ static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq) static int octeon_irq_force_ciu_mapping(struct irq_domain *domain, int irq, int line, int bit) { + struct device_node *of_node; + int ret; + + of_node = irq_domain_get_of_node(domain); + if (!of_node) + return -EINVAL; + ret = irq_alloc_desc_at(irq, of_node_to_nid(of_node)); + if (ret < 0) + return ret; + return irq_domain_associate(domain, irq, line << 6 | bit); } diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c index a994022e32c9..ce05c0dd3acd 100644 --- a/arch/mips/cavium-octeon/octeon-platform.c +++ b/arch/mips/cavium-octeon/octeon-platform.c @@ -86,11 +86,12 @@ static void octeon2_usb_clocks_start(struct device *dev) "refclk-frequency", &clock_rate); if (i) { dev_err(dev, "No UCTL \"refclk-frequency\"\n"); + of_node_put(uctl_node); goto exit; } i = of_property_read_string(uctl_node, "refclk-type", &clock_type); - + of_node_put(uctl_node); if (!i && strcmp("crystal", clock_type) == 0) is_crystal_clock = true; } diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h index d0ef8b4892bb..d0494ce4b337 100644 --- a/arch/mips/include/asm/fw/fw.h +++ b/arch/mips/include/asm/fw/fw.h @@ -26,6 +26,6 @@ extern char *fw_getcmdline(void); extern void fw_meminit(void); extern char *fw_getenv(char *name); extern unsigned long fw_getenvl(char *name); -extern void fw_init_early_console(char port); +extern void fw_init_early_console(void); #endif /* __ASM_FW_H_ */ diff --git a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h index 58f829c9b6c7..79d6fd249583 100644 --- a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h @@ -26,7 +26,6 @@ #define cpu_has_3k_cache 0 #define cpu_has_4k_cache 1 #define cpu_has_tx39_cache 0 -#define cpu_has_fpu 1 #define cpu_has_nofpuex 0 #define cpu_has_32fpr 1 #define cpu_has_counter 1 diff --git a/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h index 49a93e82c252..2635b6ba1cb5 100644 --- a/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h @@ -29,7 +29,6 @@ #define cpu_has_3k_cache 0 #define cpu_has_4k_cache 1 #define cpu_has_tx39_cache 0 -#define cpu_has_fpu 1 #define cpu_has_nofpuex 0 #define cpu_has_32fpr 1 #define cpu_has_counter 1 diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h index bb36a400203d..8c56b862fd9c 100644 --- a/arch/mips/include/asm/setup.h +++ b/arch/mips/include/asm/setup.h @@ -16,7 +16,7 @@ static inline void setup_8250_early_printk_port(unsigned long base, unsigned int reg_shift, unsigned int timeout) {} #endif -extern void set_handler(unsigned long offset, void *addr, unsigned long len); +void set_handler(unsigned long offset, const void *addr, unsigned long len); extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len); typedef void (*vi_handler_t)(void); diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index ee26f9a4575d..e2c352da3877 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -115,6 +115,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SECCOMP 4 /* secure computing */ #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */ #define TIF_UPROBE 6 /* breakpointed or singlestepping */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ @@ -139,6 +140,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SECCOMP (1<= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0)))) + prid > (PRID_IMP_R4000 | PRID_REV_ENCODE_44(15, 15)))) return 1; else return 0; @@ -76,25 +76,24 @@ static inline cycles_t get_cycles(void) else return 0; /* no usable counter */ } +#define get_cycles get_cycles /* * Like get_cycles - but where c0_count is not available we desperately * use c0_random in an attempt to get at least a little bit of entropy. - * - * R6000 and R6000A neither have a count register nor a random register. - * That leaves no entropy source in the CPU itself. */ static inline unsigned long random_get_entropy(void) { - unsigned int prid = read_c0_prid(); - unsigned int imp = prid & PRID_IMP_MASK; + unsigned int c0_random; - if (can_use_mips_counter(prid)) + if (can_use_mips_counter(read_c0_prid())) return read_c0_count(); - else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A)) - return read_c0_random(); + + if (cpu_has_3kex) + c0_random = (read_c0_random() >> 8) & 0x3f; else - return 0; /* no usable register */ + c0_random = read_c0_random() & 0x3f; + return (random_get_entropy_fallback() << 6) | (0x3f - c0_random); } #define random_get_entropy random_get_entropy diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c index 662c8db9f45b..9f5b1247b4ba 100644 --- a/arch/mips/kernel/jump_label.c +++ b/arch/mips/kernel/jump_label.c @@ -56,7 +56,7 @@ void arch_jump_label_transform(struct jump_entry *e, * The branch offset must fit in the instruction's 26 * bit field. */ - WARN_ON((offset >= BIT(25)) || + WARN_ON((offset >= (long)BIT(25)) || (offset < -(long)BIT(25))); insn.j_format.opcode = bc6_op; diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c index 8d2535123f11..d005be84c482 100644 --- a/arch/mips/kernel/mips-cpc.c +++ b/arch/mips/kernel/mips-cpc.c @@ -27,6 +27,7 @@ phys_addr_t __weak mips_cpc_default_phys_base(void) cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc"); if (cpc_node) { err = of_address_to_resource(cpc_node, 0, &res); + of_node_put(cpc_node); if (!err) return res.start; } diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 4184d641f05e..33a02f3814f5 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -172,7 +172,7 @@ static void *c_start(struct seq_file *m, loff_t *pos) { unsigned long i = *pos; - return i < NR_CPUS ? (void *) (i + 1) : NULL; + return i < nr_cpu_ids ? (void *) (i + 1) : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 75ebd8d7bd5d..98ecaf6f3edb 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -135,7 +135,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, /* Put the stack after the struct pt_regs. */ childksp = (unsigned long) childregs; p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ unsigned long status = p->thread.cp0_status; memset(childregs, 0, sizeof(struct pt_regs)); diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 50d0515bea21..f1e985109da0 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -903,7 +903,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, uprobe_notify_resume(regs); /* deal with pending signal delivery */ - if (thread_info_flags & _TIF_SIGPENDING) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index caa01457dce6..ed339d7979f3 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -141,15 +141,10 @@ static __init int cpu_has_mfc0_count_bug(void) case CPU_R4400MC: /* * The published errata for the R4400 up to 3.0 say the CPU - * has the mfc0 from count bug. + * has the mfc0 from count bug. This seems the last version + * produced. */ - if ((current_cpu_data.processor_id & 0xff) <= 0x30) - return 1; - - /* - * we assume newer revisions are ok - */ - return 0; + return 1; } return 0; diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e0352958e2f7..b1fe4518bd22 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -2097,19 +2097,19 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) * If no shadow set is selected then use the default handler * that does normal register saving and standard interrupt exit */ - extern char except_vec_vi, except_vec_vi_lui; - extern char except_vec_vi_ori, except_vec_vi_end; - extern char rollback_except_vec_vi; - char *vec_start = using_rollback_handler() ? - &rollback_except_vec_vi : &except_vec_vi; + extern const u8 except_vec_vi[], except_vec_vi_lui[]; + extern const u8 except_vec_vi_ori[], except_vec_vi_end[]; + extern const u8 rollback_except_vec_vi[]; + const u8 *vec_start = using_rollback_handler() ? + rollback_except_vec_vi : except_vec_vi; #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) - const int lui_offset = &except_vec_vi_lui - vec_start + 2; - const int ori_offset = &except_vec_vi_ori - vec_start + 2; + const int lui_offset = except_vec_vi_lui - vec_start + 2; + const int ori_offset = except_vec_vi_ori - vec_start + 2; #else - const int lui_offset = &except_vec_vi_lui - vec_start; - const int ori_offset = &except_vec_vi_ori - vec_start; + const int lui_offset = except_vec_vi_lui - vec_start; + const int ori_offset = except_vec_vi_ori - vec_start; #endif - const int handler_len = &except_vec_vi_end - vec_start; + const int handler_len = except_vec_vi_end - vec_start; if (handler_len > VECTORSPACING) { /* @@ -2317,7 +2317,7 @@ void per_cpu_trap_init(bool is_boot_cpu) } /* Install CPU exception handler */ -void set_handler(unsigned long offset, void *addr, unsigned long size) +void set_handler(unsigned long offset, const void *addr, unsigned long size) { #ifdef CONFIG_CPU_MICROMIPS memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c index 7a623684d9b5..2d5a0bcb0cec 100644 --- a/arch/mips/lantiq/clk.c +++ b/arch/mips/lantiq/clk.c @@ -50,6 +50,7 @@ struct clk *clk_get_io(void) { return &cpu_clk_generic[2]; } +EXPORT_SYMBOL_GPL(clk_get_io); struct clk *clk_get_ppe(void) { diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c index 42222f849bd2..446a2536999b 100644 --- a/arch/mips/lantiq/falcon/sysctrl.c +++ b/arch/mips/lantiq/falcon/sysctrl.c @@ -167,6 +167,8 @@ static inline void clkdev_add_sys(const char *dev, unsigned int module, { struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); + if (!clk) + return; clk->cl.dev_id = dev; clk->cl.con_id = NULL; clk->cl.clk = clk; diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c index 51a218f04fe0..3f568f5aae2d 100644 --- a/arch/mips/lantiq/prom.c +++ b/arch/mips/lantiq/prom.c @@ -79,7 +79,7 @@ void __init plat_mem_setup(void) if (fw_passed_dtb) /* UHI interface */ dtb = (void *)fw_passed_dtb; - else if (__dtb_start != __dtb_end) + else if (&__dtb_start != &__dtb_end) dtb = (void *)__dtb_start; else panic("no dtb found"); diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c index 3d5683e75cf1..200fe9ff641d 100644 --- a/arch/mips/lantiq/xway/gptu.c +++ b/arch/mips/lantiq/xway/gptu.c @@ -122,6 +122,8 @@ static inline void clkdev_add_gptu(struct device *dev, const char *con, { struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); + if (!clk) + return; clk->cl.dev_id = dev_name(dev); clk->cl.con_id = con; clk->cl.clk = clk; diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index 917fac1636b7..084f6caba5f2 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -315,6 +315,8 @@ static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate, { struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); + if (!clk) + return; clk->cl.dev_id = dev; clk->cl.con_id = con; clk->cl.clk = clk; @@ -338,6 +340,8 @@ static void clkdev_add_cgu(const char *dev, const char *con, { struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); + if (!clk) + return; clk->cl.dev_id = dev; clk->cl.con_id = con; clk->cl.clk = clk; @@ -356,24 +360,28 @@ static void clkdev_add_pci(void) struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL); /* main pci clock */ - clk->cl.dev_id = "17000000.pci"; - clk->cl.con_id = NULL; - clk->cl.clk = clk; - clk->rate = CLOCK_33M; - clk->rates = valid_pci_rates; - clk->enable = pci_enable; - clk->disable = pmu_disable; - clk->module = 0; - clk->bits = PMU_PCI; - clkdev_add(&clk->cl); + if (clk) { + clk->cl.dev_id = "17000000.pci"; + clk->cl.con_id = NULL; + clk->cl.clk = clk; + clk->rate = CLOCK_33M; + clk->rates = valid_pci_rates; + clk->enable = pci_enable; + clk->disable = pmu_disable; + clk->module = 0; + clk->bits = PMU_PCI; + clkdev_add(&clk->cl); + } /* use internal/external bus clock */ - clk_ext->cl.dev_id = "17000000.pci"; - clk_ext->cl.con_id = "external"; - clk_ext->cl.clk = clk_ext; - clk_ext->enable = pci_ext_enable; - clk_ext->disable = pci_ext_disable; - clkdev_add(&clk_ext->cl); + if (clk_ext) { + clk_ext->cl.dev_id = "17000000.pci"; + clk_ext->cl.con_id = "external"; + clk_ext->cl.clk = clk_ext; + clk_ext->enable = pci_ext_enable; + clk_ext->disable = pci_ext_disable; + clkdev_add(&clk_ext->cl); + } } /* xway socs can generate clocks on gpio pins */ @@ -393,9 +401,15 @@ static void clkdev_add_clkout(void) char *name; name = kzalloc(sizeof("clkout0"), GFP_KERNEL); + if (!name) + continue; sprintf(name, "clkout%d", i); clk = kzalloc(sizeof(struct clk), GFP_KERNEL); + if (!clk) { + kfree(name); + continue; + } clk->cl.dev_id = "1f103000.cgu"; clk->cl.con_id = name; clk->cl.clk = clk; diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c index 794c96c2a4cd..311dc1580bbd 100644 --- a/arch/mips/loongson32/common/platform.c +++ b/arch/mips/loongson32/common/platform.c @@ -98,7 +98,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) if (plat_dat->bus_id) { __raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 | GMAC1_USE_UART0, LS1X_MUX_CTRL0); - switch (plat_dat->interface) { + switch (plat_dat->phy_interface) { case PHY_INTERFACE_MODE_RGMII: val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23); break; @@ -107,12 +107,12 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) break; default: pr_err("unsupported mii mode %d\n", - plat_dat->interface); + plat_dat->phy_interface); return -ENOTSUPP; } val &= ~GMAC1_SHUT; } else { - switch (plat_dat->interface) { + switch (plat_dat->phy_interface) { case PHY_INTERFACE_MODE_RGMII: val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01); break; @@ -121,7 +121,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) break; default: pr_err("unsupported mii mode %d\n", - plat_dat->interface); + plat_dat->phy_interface); return -ENOTSUPP; } val &= ~GMAC0_SHUT; @@ -131,7 +131,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) plat_dat = dev_get_platdata(&pdev->dev); val &= ~PHY_INTF_SELI; - if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) + if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) val |= 0x4 << PHY_INTF_SELI_SHIFT; __raw_writel(val, LS1X_MUX_CTRL1); @@ -146,9 +146,9 @@ static struct plat_stmmacenet_data ls1x_eth0_pdata = { .bus_id = 0, .phy_addr = -1, #if defined(CONFIG_LOONGSON1_LS1B) - .interface = PHY_INTERFACE_MODE_MII, + .phy_interface = PHY_INTERFACE_MODE_MII, #elif defined(CONFIG_LOONGSON1_LS1C) - .interface = PHY_INTERFACE_MODE_RMII, + .phy_interface = PHY_INTERFACE_MODE_RMII, #endif .mdio_bus_data = &ls1x_mdio_bus_data, .dma_cfg = &ls1x_eth_dma_cfg, @@ -186,7 +186,7 @@ struct platform_device ls1x_eth0_pdev = { static struct plat_stmmacenet_data ls1x_eth1_pdata = { .bus_id = 1, .phy_addr = -1, - .interface = PHY_INTERFACE_MODE_MII, + .phy_interface = PHY_INTERFACE_MODE_MII, .mdio_bus_data = &ls1x_mdio_bus_data, .dma_cfg = &ls1x_eth_dma_cfg, .has_gmac = 1, diff --git a/arch/mips/loongson32/ls1c/board.c b/arch/mips/loongson32/ls1c/board.c index e9de6da0ce51..9dcfe9de55b0 100644 --- a/arch/mips/loongson32/ls1c/board.c +++ b/arch/mips/loongson32/ls1c/board.c @@ -15,7 +15,6 @@ static struct platform_device *ls1c_platform_devices[] __initdata = { static int __init ls1c_platform_init(void) { ls1x_serial_set_uartclk(&ls1x_uart_pdev); - ls1x_rtc_set_extclk(&ls1x_rtc_pdev); return platform_add_devices(ls1c_platform_devices, ARRAY_SIZE(ls1c_platform_devices)); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index a7521b8f7658..e8e3635dda09 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -633,7 +633,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, return; } - if (cpu_has_rixi && !!_PAGE_NO_EXEC) { + if (cpu_has_rixi && _PAGE_NO_EXEC != 0) { if (fill_includes_sw_bits) { UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); } else { @@ -2572,7 +2572,7 @@ static void check_pabits(void) unsigned long entry; unsigned pabits, fillbits; - if (!cpu_has_rixi || !_PAGE_NO_EXEC) { + if (!cpu_has_rixi || _PAGE_NO_EXEC == 0) { /* * We'll only be making use of the fact that we can rotate bits * into the fill if the CPU supports RIXI, so don't bother diff --git a/arch/mips/pic32/pic32mzda/early_console.c b/arch/mips/pic32/pic32mzda/early_console.c index 25372e62783b..3cd1b408fa1c 100644 --- a/arch/mips/pic32/pic32mzda/early_console.c +++ b/arch/mips/pic32/pic32mzda/early_console.c @@ -27,7 +27,7 @@ #define U_BRG(x) (UART_BASE(x) + 0x40) static void __iomem *uart_base; -static char console_port = -1; +static int console_port = -1; static int __init configure_uart_pins(int port) { @@ -47,7 +47,7 @@ static int __init configure_uart_pins(int port) return 0; } -static void __init configure_uart(char port, int baud) +static void __init configure_uart(int port, int baud) { u32 pbclk; @@ -60,7 +60,7 @@ static void __init configure_uart(char port, int baud) uart_base + PIC32_SET(U_STA(port))); } -static void __init setup_early_console(char port, int baud) +static void __init setup_early_console(int port, int baud) { if (configure_uart_pins(port)) return; @@ -130,16 +130,15 @@ _out: return baud; } -void __init fw_init_early_console(char port) +void __init fw_init_early_console(void) { char *arch_cmdline = pic32_getcmdline(); - int baud = -1; + int baud, port; uart_base = ioremap(PIC32_BASE_UART, 0xc00); baud = get_baud_from_cmdline(arch_cmdline); - if (port == -1) - port = get_port_from_cmdline(arch_cmdline); + port = get_port_from_cmdline(arch_cmdline); if (port == -1) port = EARLY_CONSOLE_PORT; diff --git a/arch/mips/pic32/pic32mzda/init.c b/arch/mips/pic32/pic32mzda/init.c index 50f376f058f4..488c0bee7ebf 100644 --- a/arch/mips/pic32/pic32mzda/init.c +++ b/arch/mips/pic32/pic32mzda/init.c @@ -28,7 +28,7 @@ static ulong get_fdtaddr(void) if (fw_passed_dtb && !fw_arg2 && !fw_arg3) return (ulong)fw_passed_dtb; - if (__dtb_start < __dtb_end) + if (&__dtb_start < &__dtb_end) ftaddr = (ulong)__dtb_start; return ftaddr; @@ -60,7 +60,7 @@ void __init plat_mem_setup(void) strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE); #ifdef CONFIG_EARLY_PRINTK - fw_init_early_console(-1); + fw_init_early_console(); #endif pic32_config_init(); } diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c index bdf53807d7c2..bea857c9da8b 100644 --- a/arch/mips/ralink/ill_acc.c +++ b/arch/mips/ralink/ill_acc.c @@ -61,6 +61,7 @@ static int __init ill_acc_of_setup(void) pdev = of_find_device_by_node(np); if (!pdev) { pr_err("%pOFn: failed to lookup pdev\n", np); + of_node_put(np); return -EINVAL; } diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index a971f1aca096..3017263ac4f9 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -77,7 +77,7 @@ void __init plat_mem_setup(void) */ if (fw_passed_dtb) dtb = (void *)fw_passed_dtb; - else if (__dtb_start != __dtb_end) + else if (&__dtb_start != &__dtb_end) dtb = (void *)__dtb_start; __dt_setup_arch(dtb); diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c index 000ede156bdc..5143d1cf8984 100644 --- a/arch/mips/sgi-ip27/ip27-xtalk.c +++ b/arch/mips/sgi-ip27/ip27-xtalk.c @@ -27,15 +27,18 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid) { struct xtalk_bridge_platform_data *bd; struct sgi_w1_platform_data *wd; - struct platform_device *pdev; + struct platform_device *pdev_wd; + struct platform_device *pdev_bd; struct resource w1_res; unsigned long offset; offset = NODE_OFFSET(nasid); wd = kzalloc(sizeof(*wd), GFP_KERNEL); - if (!wd) - goto no_mem; + if (!wd) { + pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget); + return; + } snprintf(wd->dev_id, sizeof(wd->dev_id), "bridge-%012lx", offset + (widget << SWIN_SIZE_BITS)); @@ -46,22 +49,35 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid) w1_res.end = w1_res.start + 3; w1_res.flags = IORESOURCE_MEM; - pdev = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO); - if (!pdev) { - kfree(wd); - goto no_mem; + pdev_wd = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO); + if (!pdev_wd) { + pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget); + goto err_kfree_wd; } - platform_device_add_resources(pdev, &w1_res, 1); - platform_device_add_data(pdev, wd, sizeof(*wd)); - platform_device_add(pdev); + if (platform_device_add_resources(pdev_wd, &w1_res, 1)) { + pr_warn("xtalk:n%d/%x bridge failed to add platform resources.\n", nasid, widget); + goto err_put_pdev_wd; + } + if (platform_device_add_data(pdev_wd, wd, sizeof(*wd))) { + pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget); + goto err_put_pdev_wd; + } + if (platform_device_add(pdev_wd)) { + pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget); + goto err_put_pdev_wd; + } + /* platform_device_add_data() duplicates the data */ + kfree(wd); bd = kzalloc(sizeof(*bd), GFP_KERNEL); - if (!bd) - goto no_mem; - pdev = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO); - if (!pdev) { - kfree(bd); - goto no_mem; + if (!bd) { + pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget); + goto err_unregister_pdev_wd; + } + pdev_bd = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO); + if (!pdev_bd) { + pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget); + goto err_kfree_bd; } @@ -82,13 +98,31 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid) bd->io.flags = IORESOURCE_IO; bd->io_offset = offset; - platform_device_add_data(pdev, bd, sizeof(*bd)); - platform_device_add(pdev); + if (platform_device_add_data(pdev_bd, bd, sizeof(*bd))) { + pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget); + goto err_put_pdev_bd; + } + if (platform_device_add(pdev_bd)) { + pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget); + goto err_put_pdev_bd; + } + /* platform_device_add_data() duplicates the data */ + kfree(bd); pr_info("xtalk:n%d/%x bridge widget\n", nasid, widget); return; -no_mem: - pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget); +err_put_pdev_bd: + platform_device_put(pdev_bd); +err_kfree_bd: + kfree(bd); +err_unregister_pdev_wd: + platform_device_unregister(pdev_wd); + return; +err_put_pdev_wd: + platform_device_put(pdev_wd); +err_kfree_wd: + kfree(wd); + return; } static int probe_one_port(nasid_t nasid, int widget, int masterwid) diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c index 7b7f25b4b057..9240bcdbe74e 100644 --- a/arch/mips/vr41xx/common/icu.c +++ b/arch/mips/vr41xx/common/icu.c @@ -640,8 +640,6 @@ static int icu_get_irq(unsigned int irq) printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2); - atomic_inc(&irq_err_count); - return -1; } diff --git a/arch/nds32/include/asm/thread_info.h b/arch/nds32/include/asm/thread_info.h index c135111ec44e..d3967ad184f0 100644 --- a/arch/nds32/include/asm/thread_info.h +++ b/arch/nds32/include/asm/thread_info.h @@ -48,6 +48,7 @@ struct thread_info { #define TIF_NEED_RESCHED 2 #define TIF_SINGLESTEP 3 #define TIF_NOTIFY_RESUME 4 /* callback before returning to user */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_SYSCALL_TRACE 8 #define TIF_POLLING_NRFLAG 17 #define TIF_MEMDIE 18 @@ -57,6 +58,7 @@ struct thread_info { #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) diff --git a/arch/nds32/kernel/ex-exit.S b/arch/nds32/kernel/ex-exit.S index 6a2966c2d8c8..b30699911b81 100644 --- a/arch/nds32/kernel/ex-exit.S +++ b/arch/nds32/kernel/ex-exit.S @@ -120,7 +120,7 @@ work_pending: andi $p1, $r1, #_TIF_NEED_RESCHED bnez $p1, work_resched - andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME + andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME|#_TIF_NOTIFY_SIGNAL beqz $p1, no_work_pending move $r0, $sp ! 'regs' diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c index e01ad5d17224..c1327e552ec6 100644 --- a/arch/nds32/kernel/process.c +++ b/arch/nds32/kernel/process.c @@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childregs, 0, sizeof(struct pt_regs)); /* kernel thread fn */ p->thread.cpu_context.r6 = stack_start; diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c index 2acb94812af9..7e3ca430a223 100644 --- a/arch/nds32/kernel/signal.c +++ b/arch/nds32/kernel/signal.c @@ -376,7 +376,7 @@ static void do_signal(struct pt_regs *regs) asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int thread_flags) { - if (thread_flags & _TIF_SIGPENDING) + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); if (thread_flags & _TIF_NOTIFY_RESUME) diff --git a/arch/nios2/boot/Makefile b/arch/nios2/boot/Makefile index 37dfc7e584bc..0b704c1f379f 100644 --- a/arch/nios2/boot/Makefile +++ b/arch/nios2/boot/Makefile @@ -20,7 +20,7 @@ $(obj)/vmlinux.bin: vmlinux FORCE $(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip) -$(obj)/vmImage: $(obj)/vmlinux.gz +$(obj)/vmImage: $(obj)/vmlinux.gz FORCE $(call if_changed,uimage) @$(kecho) 'Kernel: $@ is ready' diff --git a/arch/nios2/include/asm/entry.h b/arch/nios2/include/asm/entry.h index cf37f55efbc2..bafb7b2ca59f 100644 --- a/arch/nios2/include/asm/entry.h +++ b/arch/nios2/include/asm/entry.h @@ -50,7 +50,8 @@ stw r13, PT_R13(sp) stw r14, PT_R14(sp) stw r15, PT_R15(sp) - stw r2, PT_ORIG_R2(sp) + movi r24, -1 + stw r24, PT_ORIG_R2(sp) stw r7, PT_ORIG_R7(sp) stw ra, PT_RA(sp) diff --git a/arch/nios2/include/asm/ptrace.h b/arch/nios2/include/asm/ptrace.h index 642462144872..9da34c3022a2 100644 --- a/arch/nios2/include/asm/ptrace.h +++ b/arch/nios2/include/asm/ptrace.h @@ -74,6 +74,8 @@ extern void show_regs(struct pt_regs *); ((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE)\ - 1) +#define force_successful_syscall_return() (current_pt_regs()->orig_r2 = -1) + int do_syscall_trace_enter(void); void do_syscall_trace_exit(void); #endif /* __ASSEMBLY__ */ diff --git a/arch/nios2/include/asm/thread_info.h b/arch/nios2/include/asm/thread_info.h index 7349a4fa635b..272d2c72a727 100644 --- a/arch/nios2/include/asm/thread_info.h +++ b/arch/nios2/include/asm/thread_info.h @@ -86,6 +86,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_MEMDIE 4 /* is terminating due to OOM killer */ #define TIF_SECCOMP 5 /* secure computing */ #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling @@ -97,6 +98,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) diff --git a/arch/nios2/include/asm/timex.h b/arch/nios2/include/asm/timex.h index a769f871b28d..40a1adc9bd03 100644 --- a/arch/nios2/include/asm/timex.h +++ b/arch/nios2/include/asm/timex.h @@ -8,5 +8,8 @@ typedef unsigned long cycles_t; extern cycles_t get_cycles(void); +#define get_cycles get_cycles + +#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback()) #endif diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S index 0794cd7803df..99f0a65e6234 100644 --- a/arch/nios2/kernel/entry.S +++ b/arch/nios2/kernel/entry.S @@ -185,6 +185,7 @@ ENTRY(handle_system_call) ldw r5, PT_R5(sp) local_restart: + stw r2, PT_ORIG_R2(sp) /* Check that the requested system call is within limits */ movui r1, __NR_syscalls bgeu r2, r1, ret_invsyscall @@ -192,7 +193,6 @@ local_restart: movhi r11, %hiadj(sys_call_table) add r1, r1, r11 ldw r1, %lo(sys_call_table)(r1) - beq r1, r0, ret_invsyscall /* Check if we are being traced */ GET_THREAD_INFO r11 @@ -213,6 +213,9 @@ local_restart: translate_rc_and_ret: movi r1, 0 bge r2, zero, 3f + ldw r1, PT_ORIG_R2(sp) + addi r1, r1, 1 + beq r1, zero, 3f sub r2, zero, r2 movi r1, 1 3: @@ -255,9 +258,9 @@ traced_system_call: ldw r6, PT_R6(sp) ldw r7, PT_R7(sp) - /* Fetch the syscall function, we don't need to check the boundaries - * since this is already done. - */ + /* Fetch the syscall function. */ + movui r1, __NR_syscalls + bgeu r2, r1, traced_invsyscall slli r1, r2, 2 movhi r11,%hiadj(sys_call_table) add r1, r1, r11 @@ -276,6 +279,9 @@ traced_system_call: translate_rc_and_ret2: movi r1, 0 bge r2, zero, 4f + ldw r1, PT_ORIG_R2(sp) + addi r1, r1, 1 + beq r1, zero, 4f sub r2, zero, r2 movi r1, 1 4: @@ -287,6 +293,11 @@ end_translate_rc_and_ret2: RESTORE_SWITCH_STACK br ret_from_exception + /* If the syscall number was invalid return ENOSYS */ +traced_invsyscall: + movi r2, -ENOSYS + br translate_rc_and_ret2 + Luser_return: GET_THREAD_INFO r11 /* get thread_info pointer */ ldw r10, TI_FLAGS(r11) /* get thread_info->flags */ @@ -336,9 +347,6 @@ external_interrupt: /* skip if no interrupt is pending */ beq r12, r0, ret_from_interrupt - movi r24, -1 - stw r24, PT_ORIG_R2(sp) - /* * Process an external hardware interrupt. */ diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c index 50b4eb19a6cc..c5f916ca6845 100644 --- a/arch/nios2/kernel/process.c +++ b/arch/nios2/kernel/process.c @@ -109,7 +109,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct switch_stack *childstack = ((struct switch_stack *)childregs) - 1; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childstack, 0, sizeof(struct switch_stack) + sizeof(struct pt_regs)); diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c index e45491d1d3e4..68d626c4f1ba 100644 --- a/arch/nios2/kernel/signal.c +++ b/arch/nios2/kernel/signal.c @@ -242,7 +242,7 @@ static int do_signal(struct pt_regs *regs) /* * If we were from a system call, check for system call restarting... */ - if (regs->orig_r2 >= 0) { + if (regs->orig_r2 >= 0 && regs->r1) { continue_addr = regs->ea; restart_addr = continue_addr - 4; retval = regs->r2; @@ -264,6 +264,7 @@ static int do_signal(struct pt_regs *regs) regs->ea = restart_addr; break; } + regs->orig_r2 = -1; } if (get_signal(&ksig)) { @@ -308,7 +309,8 @@ asmlinkage int do_notify_resume(struct pt_regs *regs) if (!user_mode(regs)) return 0; - if (test_thread_flag(TIF_SIGPENDING)) { + if (test_thread_flag(TIF_SIGPENDING) || + test_thread_flag(TIF_NOTIFY_SIGNAL)) { int restart = do_signal(regs); if (unlikely(restart)) { diff --git a/arch/nios2/kernel/syscall_table.c b/arch/nios2/kernel/syscall_table.c index 6176d63023c1..c2875a6dd5a4 100644 --- a/arch/nios2/kernel/syscall_table.c +++ b/arch/nios2/kernel/syscall_table.c @@ -13,5 +13,6 @@ #define __SYSCALL(nr, call) [nr] = (call), void *sys_call_table[__NR_syscalls] = { + [0 ... __NR_syscalls-1] = sys_ni_syscall, #include }; diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h index 9afe68bc423b..4f9d2a261455 100644 --- a/arch/openrisc/include/asm/thread_info.h +++ b/arch/openrisc/include/asm/thread_info.h @@ -98,6 +98,7 @@ register struct thread_info *current_thread_info_reg asm("r10"); #define TIF_SINGLESTEP 4 /* restore singlestep on return to user * mode */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ #define TIF_RESTORE_SIGMASK 9 #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling * TIF_NEED_RESCHED @@ -109,6 +110,7 @@ register struct thread_info *current_thread_info_reg asm("r10"); #define _TIF_SIGPENDING (1<flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(kregs, 0, sizeof(struct pt_regs)); kregs->gpr[20] = usp; /* fn, kernel thread */ kregs->gpr[22] = arg; diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index af66f968dd45..1ebcff271096 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c @@ -299,7 +299,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) if (unlikely(!user_mode(regs))) return 0; local_irq_enable(); - if (thread_flags & _TIF_SIGPENDING) { + if (thread_flags & (_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)) { int restart = do_signal(regs, syscall); if (unlikely(restart)) { /* diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index ecfd7af35094..149269e0be4e 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -11,6 +11,7 @@ config PARISC select ARCH_WANT_FRAME_POINTERS select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_STRICT_KERNEL_RWX + select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_NO_SG_CHAIN select ARCH_SUPPORTS_MEMORY_FAILURE @@ -315,6 +316,16 @@ config IRQSTACKS for handling hard and soft interrupts. This can help avoid overflowing the process kernel stacks. +config TLB_PTLOCK + bool "Use page table locks in TLB fault handler" + depends on SMP + default n + help + Select this option to enable page table locking in the TLB + fault handler. This ensures that page table entries are + updated consistently on SMP machines at the expense of some + loss in performance. + config HOTPLUG_CPU bool default y if SMP diff --git a/arch/parisc/include/asm/fb.h b/arch/parisc/include/asm/fb.h index c4cd6360f996..55d29c4f716e 100644 --- a/arch/parisc/include/asm/fb.h +++ b/arch/parisc/include/asm/fb.h @@ -12,9 +12,13 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; } +#if defined(CONFIG_FB_STI) +int fb_is_primary_device(struct fb_info *info); +#else static inline int fb_is_primary_device(struct fb_info *info) { return 0; } +#endif #endif /* _ASM_FB_H_ */ diff --git a/arch/parisc/include/asm/hardware.h b/arch/parisc/include/asm/hardware.h index 9d3d7737c58b..a005ebc54779 100644 --- a/arch/parisc/include/asm/hardware.h +++ b/arch/parisc/include/asm/hardware.h @@ -10,12 +10,12 @@ #define SVERSION_ANY_ID PA_SVERSION_ANY_ID struct hp_hardware { - unsigned short hw_type:5; /* HPHW_xxx */ - unsigned short hversion; - unsigned long sversion:28; - unsigned short opt; - const char name[80]; /* The hardware description */ -}; + unsigned int hw_type:8; /* HPHW_xxx */ + unsigned int hversion:12; + unsigned int sversion:12; + unsigned char opt; + unsigned char name[59]; /* The hardware description */ +} __packed; struct parisc_device; diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h index cb5f2f730421..aba69ff79e8c 100644 --- a/arch/parisc/include/asm/mmu_context.h +++ b/arch/parisc/include/asm/mmu_context.h @@ -5,6 +5,7 @@ #include #include #include +#include #include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) @@ -52,6 +53,12 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { if (prev != next) { +#ifdef CONFIG_TLB_PTLOCK + /* put physical address of page_table_lock in cr28 (tr4) + for TLB faults */ + spinlock_t *pgd_lock = &next->page_table_lock; + mtctl(__pa(__ldcw_align(&pgd_lock->rlock.raw_lock)), 28); +#endif mtctl(__pa(next->pgd), 25); load_context(next->context); } diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index 8802ce651a3a..0561568f7b48 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -112,7 +112,7 @@ extern int npmem_ranges; #else #define BITS_PER_PTE_ENTRY 2 #define BITS_PER_PMD_ENTRY 2 -#define BITS_PER_PGD_ENTRY BITS_PER_PMD_ENTRY +#define BITS_PER_PGD_ENTRY 2 #endif #define PGD_ENTRY_SIZE (1UL << BITS_PER_PGD_ENTRY) #define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY) diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index a6482b2ce0ea..dda557085311 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -15,47 +15,23 @@ #define __HAVE_ARCH_PGD_FREE #include -/* Allocate the top level pgd (page directory) - * - * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we - * allocate the first pmd adjacent to the pgd. This means that we can - * subtract a constant offset to get to it. The pmd and pgd sizes are - * arranged so that a single pmd covers 4GB (giving a full 64-bit - * process access to 8TB) so our lookups are effectively L2 for the - * first 4GB of the kernel (i.e. for all ILP32 processes and all the - * kernel for machines with under 4GB of memory) */ +/* Allocate the top level pgd (page directory) */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, - PGD_ALLOC_ORDER); - pgd_t *actual_pgd = pgd; + pgd_t *pgd; - if (likely(pgd != NULL)) { - memset(pgd, 0, PAGE_SIZE<> PxD_VALUE_SHIFT))); - /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as - * a signal that this pmd may not be freed */ - set_pgd(pgd, __pgd(PxD_FLAG_ATTACHED)); -#endif - } - spin_lock_init(pgd_spinlock(actual_pgd)); - return actual_pgd; + pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + if (unlikely(pgd == NULL)) + return NULL; + + memset(pgd, 0, PAGE_SIZE << PGD_ORDER); + + return pgd; } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { -#if CONFIG_PGTABLE_LEVELS == 3 - pgd -= PTRS_PER_PGD; -#endif - free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); + free_pages((unsigned long)pgd, PGD_ORDER); } #if CONFIG_PGTABLE_LEVELS == 3 @@ -70,41 +46,25 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) { - return (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER); + pmd_t *pmd; + + pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER); + if (likely(pmd)) + memset ((void *)pmd, 0, PAGE_SIZE << PMD_ORDER); + return pmd; } static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { - if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) { - /* - * This is the permanent pmd attached to the pgd; - * cannot free it. - * Increment the counter to compensate for the decrement - * done by generic mm code. - */ - mm_inc_nr_pmds(mm); - return; - } free_pages((unsigned long)pmd, PMD_ORDER); } - #endif static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { -#if CONFIG_PGTABLE_LEVELS == 3 - /* preserve the gateway marker if this is the beginning of - * the permanent pmd */ - if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) - set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | - PxD_FLAG_VALID | - PxD_FLAG_ATTACHED) - + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); - else -#endif - set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID) - + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); + set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID) + + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); } #define pmd_populate(mm, pmd, pte_page) \ diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 75cf84070fc9..8964798b8274 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -23,8 +23,6 @@ #include #include -static inline spinlock_t *pgd_spinlock(pgd_t *); - /* * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel * memory. For the return value to be meaningful, ADDR must be >= @@ -42,12 +40,8 @@ static inline spinlock_t *pgd_spinlock(pgd_t *); /* This is for the serialization of PxTLB broadcasts. At least on the N class * systems, only one PxTLB inter processor broadcast can be active at any one - * time on the Merced bus. - - * PTE updates are protected by locks in the PMD. - */ + * time on the Merced bus. */ extern spinlock_t pa_tlb_flush_lock; -extern spinlock_t pa_swapper_pg_lock; #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) extern int pa_serialize_tlb_flushes; #else @@ -82,22 +76,25 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) purge_tlb_end(flags); } +extern void __update_cache(pte_t pte); + /* Certain architectures need to do special things when PTEs * within a page table are directly modified. Thus, the following * hook is made available. */ -#define set_pte(pteptr, pteval) \ - do{ \ - *(pteptr) = (pteval); \ - } while(0) +#define set_pte(pteptr, pteval) \ + do { \ + *(pteptr) = (pteval); \ + mb(); \ + } while(0) -#define set_pte_at(mm, addr, ptep, pteval) \ - do { \ - unsigned long flags; \ - spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\ - set_pte(ptep, pteval); \ - purge_tlb_entries(mm, addr); \ - spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\ +#define set_pte_at(mm, addr, pteptr, pteval) \ + do { \ + if (pte_present(pteval) && \ + pte_user(pteval)) \ + __update_cache(pteval); \ + *(pteptr) = (pteval); \ + purge_tlb_entries(mm, addr); \ } while (0) #endif /* !__ASSEMBLY__ */ @@ -120,12 +117,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) #if CONFIG_PGTABLE_LEVELS == 3 -#define PGD_ORDER 1 /* Number of pages per pgd */ -#define PMD_ORDER 1 /* Number of pages per pmd */ -#define PGD_ALLOC_ORDER (2 + 1) /* first pgd contains pmd */ +#define PMD_ORDER 1 +#define PGD_ORDER 0 #else -#define PGD_ORDER 1 /* Number of pages per pgd */ -#define PGD_ALLOC_ORDER (PGD_ORDER + 1) +#define PGD_ORDER 1 #endif /* Definitions for 3rd level (we use PLD here for Page Lower directory @@ -240,11 +235,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) * able to effectively address 40/42/44-bits of physical address space * depending on 4k/16k/64k PAGE_SIZE */ #define _PxD_PRESENT_BIT 31 -#define _PxD_ATTACHED_BIT 30 -#define _PxD_VALID_BIT 29 +#define _PxD_VALID_BIT 30 #define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT)) -#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT)) #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) #define PxD_FLAG_MASK (0xf) #define PxD_FLAG_SHIFT (4) @@ -317,6 +310,7 @@ extern unsigned long *empty_zero_page; #define pte_none(x) (pte_val(x) == 0) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) +#define pte_user(x) (pte_val(x) & _PAGE_USER) #define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0)) #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) @@ -326,23 +320,10 @@ extern unsigned long *empty_zero_page; #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) -#if CONFIG_PGTABLE_LEVELS == 3 -/* The first entry of the permanent pmd is not there if it contains - * the gateway marker */ -#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED) -#else #define pmd_none(x) (!pmd_val(x)) -#endif #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) static inline void pmd_clear(pmd_t *pmd) { -#if CONFIG_PGTABLE_LEVELS == 3 - if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) - /* This is the entry pointing to the permanent pmd - * attached to the pgd; cannot clear it */ - set_pmd(pmd, __pmd(PxD_FLAG_ATTACHED)); - else -#endif set_pmd(pmd, __pmd(0)); } @@ -358,12 +339,6 @@ static inline void pmd_clear(pmd_t *pmd) { #define pud_bad(x) (!(pud_flag(x) & PxD_FLAG_VALID)) #define pud_present(x) (pud_flag(x) & PxD_FLAG_PRESENT) static inline void pud_clear(pud_t *pud) { -#if CONFIG_PGTABLE_LEVELS == 3 - if(pud_flag(*pud) & PxD_FLAG_ATTACHED) - /* This is the permanent pmd attached to the pud; cannot - * free it */ - return; -#endif set_pud(pud, __pud(0)); } #endif @@ -443,7 +418,7 @@ extern void paging_init (void); #define PG_dcache_dirty PG_arch_1 -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); +#define update_mmu_cache(vms,addr,ptep) __update_cache(*ptep) /* Encode and de-code a swap entry */ @@ -456,32 +431,18 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) - -static inline spinlock_t *pgd_spinlock(pgd_t *pgd) -{ - if (unlikely(pgd == swapper_pg_dir)) - return &pa_swapper_pg_lock; - return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1))); -} - - static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte; - unsigned long flags; if (!pte_young(*ptep)) return 0; - spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags); pte = *ptep; if (!pte_young(pte)) { - spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags); return 0; } - set_pte(ptep, pte_mkold(pte)); - purge_tlb_entries(vma->vm_mm, addr); - spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); return 1; } @@ -489,24 +450,16 @@ struct mm_struct; static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t old_pte; - unsigned long flags; - spin_lock_irqsave(pgd_spinlock(mm->pgd), flags); old_pte = *ptep; - set_pte(ptep, __pte(0)); - purge_tlb_entries(mm, addr); - spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags); + set_pte_at(mm, addr, ptep, __pte(0)); return old_pte; } static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long flags; - spin_lock_irqsave(pgd_spinlock(mm->pgd), flags); - set_pte(ptep, pte_wrprotect(*ptep)); - purge_tlb_entries(mm, addr); - spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags); + set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep)); } #define pte_same(A,B) (pte_val(A) == pte_val(B)) diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 285757544cca..0bd38a972cea 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -52,6 +52,7 @@ struct thread_info { #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_32BIT 4 /* 32 bit binary */ #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ +#define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ #define TIF_SINGLESTEP 9 /* single stepping? */ @@ -61,6 +62,7 @@ struct thread_info { #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_32BIT (1 << TIF_32BIT) @@ -72,7 +74,7 @@ struct thread_info { #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ - _TIF_NEED_RESCHED) + _TIF_NEED_RESCHED | _TIF_NOTIFY_SIGNAL) #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) diff --git a/arch/parisc/include/asm/timex.h b/arch/parisc/include/asm/timex.h index 06b510f8172e..b4622cb06a75 100644 --- a/arch/parisc/include/asm/timex.h +++ b/arch/parisc/include/asm/timex.h @@ -13,9 +13,10 @@ typedef unsigned long cycles_t; -static inline cycles_t get_cycles (void) +static inline cycles_t get_cycles(void) { return mfctl(16); } +#define get_cycles get_cycles #endif diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 305768a40773..cd2cc1b1648c 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -268,7 +268,6 @@ int main(void) DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD); DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD); DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE); - DEFINE(ASM_PGD_PMD_OFFSET, -(PAGE_SIZE << PGD_ORDER)); DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT)); DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT); DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE); diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 86a1a63563fd..c81ab0cb8925 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -83,9 +83,9 @@ EXPORT_SYMBOL(flush_cache_all_local); #define pfn_va(pfn) __va(PFN_PHYS(pfn)) void -update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) +__update_cache(pte_t pte) { - unsigned long pfn = pte_pfn(*ptep); + unsigned long pfn = pte_pfn(pte); struct page *page; /* We don't have pte special. As a result, we can be called with diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 80fa0650736b..d95157488832 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -521,7 +521,6 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) dev->id.hversion_rev = iodc_data[1] & 0x0f; dev->id.sversion = ((iodc_data[4] & 0x0f) << 16) | (iodc_data[5] << 8) | iodc_data[6]; - dev->hpa.name = parisc_pathname(dev); dev->hpa.start = hpa; /* This is awkward. The STI spec says that gfx devices may occupy * 32MB or 64MB. Unfortunately, we don't know how to tell whether @@ -535,10 +534,10 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) dev->hpa.end = hpa + 0xfff; } dev->hpa.flags = IORESOURCE_MEM; - name = parisc_hardware_description(&dev->id); - if (name) { - strlcpy(dev->name, name, sizeof(dev->name)); - } + dev->hpa.name = dev->name; + name = parisc_hardware_description(&dev->id) ? : "unknown"; + snprintf(dev->name, sizeof(dev->name), "%s [%s]", + name, parisc_pathname(dev)); /* Silently fail things like mouse ports which are subsumed within * the keyboard controller @@ -884,15 +883,13 @@ void __init walk_central_bus(void) &root); } -static void print_parisc_device(struct parisc_device *dev) +static __init void print_parisc_device(struct parisc_device *dev) { - char hw_path[64]; - static int count; + static int count __initdata; - print_pa_hwpath(dev, hw_path); - pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", - ++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type, - dev->id.hversion_rev, dev->id.hversion, dev->id.sversion); + pr_info("%d. %s at %pap { type:%d, hv:%#x, sv:%#x, rev:%#x }", + ++count, dev->name, &(dev->hpa.start), dev->id.hw_type, + dev->id.hversion, dev->id.sversion, dev->id.hversion_rev); if (dev->num_addrs) { int k; @@ -1081,7 +1078,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data) -static int print_one_device(struct device * dev, void * data) +static __init int print_one_device(struct device * dev, void * data) { struct parisc_device * pdev = to_parisc_device(dev); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 3da39140babc..05bed27eef85 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -35,10 +35,9 @@ .level 2.0 #endif - .import pa_tlb_lock,data - .macro load_pa_tlb_lock reg - mfctl %cr25,\reg - addil L%(PAGE_SIZE << (PGD_ALLOC_ORDER - 1)),\reg + /* Get aligned page_table_lock address for this mm from cr28/tr4 */ + .macro get_ptl reg + mfctl %cr28,\reg .endm /* space_to_prot macro creates a prot id from a space id */ @@ -407,7 +406,9 @@ # endif #endif dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ +#if CONFIG_PGTABLE_LEVELS < 3 copy %r0,\pte +#endif ldw,s \index(\pmd),\pmd bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ @@ -417,38 +418,23 @@ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ .endm - /* Look up PTE in a 3-Level scheme. - * - * Here we implement a Hybrid L2/L3 scheme: we allocate the - * first pmd adjacent to the pgd. This means that we can - * subtract a constant offset to get to it. The pmd and pgd - * sizes are arranged so that a single pmd covers 4GB (giving - * a full LP64 process access to 8TB) so our lookups are - * effectively L2 for the first 4GB of the kernel (i.e. for - * all ILP32 processes and all the kernel for machines with - * under 4GB of memory) */ + /* Look up PTE in a 3-Level scheme. */ .macro L3_ptep pgd,pte,index,va,fault -#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ +#if CONFIG_PGTABLE_LEVELS == 3 + copy %r0,\pte extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 ldw,s \index(\pgd),\pgd - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 - shld \pgd,PxD_VALUE_SHIFT,\index - extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 - copy \index,\pgd - extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 - ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd + shld \pgd,PxD_VALUE_SHIFT,\pgd #endif L2_ptep \pgd,\pte,\index,\va,\fault .endm - /* Acquire pa_tlb_lock lock and check page is present. */ - .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault -#ifdef CONFIG_SMP + /* Acquire page_table_lock and check page is present. */ + .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault +#ifdef CONFIG_TLB_PTLOCK 98: cmpib,COND(=),n 0,\spc,2f - load_pa_tlb_lock \tmp + get_ptl \tmp 1: LDCW 0(\tmp),\tmp1 cmpib,COND(=) 0,\tmp1,1b nop @@ -463,26 +449,26 @@ 3: .endm - /* Release pa_tlb_lock lock without reloading lock address. + /* Release page_table_lock without reloading lock address. Note that the values in the register spc are limited to NR_SPACE_IDS (262144). Thus, the stw instruction always stores a nonzero value even when register spc is 64 bits. We use an ordered store to ensure all prior accesses are performed prior to releasing the lock. */ - .macro tlb_unlock0 spc,tmp -#ifdef CONFIG_SMP + .macro ptl_unlock0 spc,tmp +#ifdef CONFIG_TLB_PTLOCK 98: or,COND(=) %r0,\spc,%r0 stw,ma \spc,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif .endm - /* Release pa_tlb_lock lock. */ - .macro tlb_unlock1 spc,tmp -#ifdef CONFIG_SMP -98: load_pa_tlb_lock \tmp + /* Release page_table_lock. */ + .macro ptl_unlock1 spc,tmp +#ifdef CONFIG_TLB_PTLOCK +98: get_ptl \tmp + ptl_unlock0 \spc,\tmp 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) - tlb_unlock0 \spc,\tmp #endif .endm @@ -1165,14 +1151,14 @@ dtlb_miss_20w: L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w - tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w + ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1191,14 +1177,14 @@ nadtlb_miss_20w: L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w - tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w + ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1219,7 +1205,7 @@ dtlb_miss_11: L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 - tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 + ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1232,7 +1218,7 @@ dtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1252,7 +1238,7 @@ nadtlb_miss_11: L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 - tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 + ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1265,7 +1251,7 @@ nadtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1285,7 +1271,7 @@ dtlb_miss_20: L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 - tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 + ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1294,7 +1280,7 @@ dtlb_miss_20: idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1313,7 +1299,7 @@ nadtlb_miss_20: L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 - tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 + ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1322,7 +1308,7 @@ nadtlb_miss_20: idtlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1422,14 +1408,14 @@ itlb_miss_20w: L3_ptep ptp,pte,t0,va,itlb_fault - tlb_lock spc,ptp,pte,t0,t1,itlb_fault + ptl_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1446,14 +1432,14 @@ naitlb_miss_20w: L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w - tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w + ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1474,7 +1460,7 @@ itlb_miss_11: L2_ptep ptp,pte,t0,va,itlb_fault - tlb_lock spc,ptp,pte,t0,t1,itlb_fault + ptl_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1487,7 +1473,7 @@ itlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1498,7 +1484,7 @@ naitlb_miss_11: L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 - tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 + ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 update_accessed ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1511,7 +1497,7 @@ naitlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1532,7 +1518,7 @@ itlb_miss_20: L2_ptep ptp,pte,t0,va,itlb_fault - tlb_lock spc,ptp,pte,t0,t1,itlb_fault + ptl_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1541,7 +1527,7 @@ itlb_miss_20: iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1552,7 +1538,7 @@ naitlb_miss_20: L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 - tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 + ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 update_accessed ptp,pte,t0,t1 make_insert_tlb spc,pte,prot,t1 @@ -1561,7 +1547,7 @@ naitlb_miss_20: iitlbt pte,prot - tlb_unlock1 spc,t0 + ptl_unlock1 spc,t0 rfir nop @@ -1584,14 +1570,14 @@ dbit_trap_20w: L3_ptep ptp,pte,t0,va,dbit_fault - tlb_lock spc,ptp,pte,t0,t1,dbit_fault + ptl_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot - tlb_unlock0 spc,t0 + ptl_unlock0 spc,t0 rfir nop #else @@ -1604,7 +1590,7 @@ dbit_trap_11: L2_ptep ptp,pte,t0,va,dbit_fault - tlb_lock spc,ptp,pte,t0,t1,dbit_fault + ptl_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 make_insert_tlb_11 spc,pte,prot @@ -1617,7 +1603,7 @@ dbit_trap_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock0 spc,t0 + ptl_unlock0 spc,t0 rfir nop @@ -1628,7 +1614,7 @@ dbit_trap_20: L2_ptep ptp,pte,t0,va,dbit_fault - tlb_lock spc,ptp,pte,t0,t1,dbit_fault + ptl_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 make_insert_tlb spc,pte,prot,t1 @@ -1637,7 +1623,7 @@ dbit_trap_20: idtlbt pte,prot - tlb_unlock0 spc,t0 + ptl_unlock0 spc,t0 rfir nop #endif diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index aa93d775c34d..598d0938449d 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -22,7 +22,7 @@ #include #include - .level PA_ASM_LEVEL + .level 1.1 __INITDATA ENTRY(boot_args) @@ -69,6 +69,47 @@ $bss_loop: stw,ma %arg2,4(%r1) stw,ma %arg3,4(%r1) +#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20) + /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU + * and halt kernel if we detect a PA1.x CPU. */ + ldi 32,%r10 + mtctl %r10,%cr11 + .level 2.0 + mfctl,w %cr11,%r10 + .level 1.1 + comib,<>,n 0,%r10,$cpu_ok + + load32 PA(msg1),%arg0 + ldi msg1_end-msg1,%arg1 +$iodc_panic: + copy %arg0, %r10 + copy %arg1, %r11 + load32 PA(init_stack),%sp +#define MEM_CONS 0x3A0 + ldw MEM_CONS+32(%r0),%arg0 // HPA + ldi ENTRY_IO_COUT,%arg1 + ldw MEM_CONS+36(%r0),%arg2 // SPA + ldw MEM_CONS+8(%r0),%arg3 // layers + load32 PA(__bss_start),%r1 + stw %r1,-52(%sp) // arg4 + stw %r0,-56(%sp) // arg5 + stw %r10,-60(%sp) // arg6 = ptr to text + stw %r11,-64(%sp) // arg7 = len + stw %r0,-68(%sp) // arg8 + load32 PA(.iodc_panic_ret), %rp + ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC + bv,n (%r1) +.iodc_panic_ret: + b . /* wait endless with ... */ + or %r10,%r10,%r10 /* qemu idle sleep */ +msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n" +msg1_end: + +$cpu_ok: +#endif + + .level PA_ASM_LEVEL + /* Initialize startup VM. Just map first 16/32 MB of memory */ load32 PA(swapper_pg_dir),%r4 mtctl %r4,%cr24 /* Initialize kernel root pointer */ diff --git a/arch/parisc/kernel/patch.c b/arch/parisc/kernel/patch.c index 80a0ab372802..e59574f65e64 100644 --- a/arch/parisc/kernel/patch.c +++ b/arch/parisc/kernel/patch.c @@ -40,10 +40,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags, *need_unmap = 1; set_fixmap(fixmap, page_to_phys(page)); - if (flags) - raw_spin_lock_irqsave(&patch_lock, *flags); - else - __acquire(&patch_lock); + raw_spin_lock_irqsave(&patch_lock, *flags); return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); } @@ -52,10 +49,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags) { clear_fixmap(fixmap); - if (flags) - raw_spin_unlock_irqrestore(&patch_lock, *flags); - else - __release(&patch_lock); + raw_spin_unlock_irqrestore(&patch_lock, *flags); } void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) @@ -67,8 +61,9 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) int mapped; /* Make sure we don't have any aliases in cache */ - flush_kernel_vmap_range(addr, len); - flush_icache_range(start, end); + flush_kernel_dcache_range_asm(start, end); + flush_kernel_icache_range_asm(start, end); + flush_tlb_kernel_range(start, end); p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped); @@ -81,8 +76,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) * We're crossing a page boundary, so * need to remap */ - flush_kernel_vmap_range((void *)fixmap, - (p-fixmap) * sizeof(*p)); + flush_kernel_dcache_range_asm((unsigned long)fixmap, + (unsigned long)p); + flush_tlb_kernel_range((unsigned long)fixmap, + (unsigned long)p); if (mapped) patch_unmap(FIX_TEXT_POKE0, &flags); p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, @@ -90,10 +87,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) } } - flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p)); + flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p); + flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p); if (mapped) patch_unmap(FIX_TEXT_POKE0, &flags); - flush_icache_range(start, end); } void __kprobes __patch_text(void *addr, u32 insn) diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index a92a23d6acd9..5e4381280c97 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -200,7 +200,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp, extern void * const ret_from_kernel_thread; extern void * const child_return; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ memset(cregs, 0, sizeof(struct pt_regs)); if (!usp) /* idle thread */ diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index 7f2d0c0ecc80..176ef00bdd15 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -419,8 +419,7 @@ show_cpuinfo (struct seq_file *m, void *v) } seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities); - seq_printf(m, "model\t\t: %s\n" - "model name\t: %s\n", + seq_printf(m, "model\t\t: %s - %s\n", boot_cpu_data.pdc.sys_model_name, cpuinfo->dev ? cpuinfo->dev->name : "Unknown"); diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 8d6c9b88eb3f..db1a47cf424d 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -609,7 +609,8 @@ do_signal(struct pt_regs *regs, long in_syscall) void do_notify_resume(struct pt_regs *regs, long in_syscall) { - if (test_thread_flag(TIF_SIGPENDING)) + if (test_thread_flag(TIF_SIGPENDING) || + test_thread_flag(TIF_NOTIFY_SIGNAL)) do_signal(regs, in_syscall); if (test_thread_flag(TIF_NOTIFY_RESUME)) diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index 084e3ddba434..4827f185e655 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -413,7 +413,7 @@ 412 32 utimensat_time64 sys_utimensat sys_utimensat 413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 -416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents +416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend 419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 286cec4d86d7..cc6ed7496050 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -107,7 +107,7 @@ #define R1(i) (((i)>>21)&0x1f) #define R2(i) (((i)>>16)&0x1f) #define R3(i) ((i)&0x1f) -#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1)) +#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1)) #define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0)) #define IM5_2(i) IM((i)>>16,5) #define IM5_3(i) IM((i),5) diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index e141441bfa64..d1d3990b83f6 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -142,24 +142,17 @@ static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { - unsigned long flags; - - spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); __set_huge_pte_at(mm, addr, ptep, entry); - spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long flags; pte_t entry; - spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); entry = *ptep; __set_huge_pte_at(mm, addr, ptep, __pte(0)); - spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); return entry; } @@ -168,29 +161,23 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long flags; pte_t old_pte; - spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); old_pte = *ptep; __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); - spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); } int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { - unsigned long flags; int changed; struct mm_struct *mm = vma->vm_mm; - spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); changed = !pte_same(*ptep, pte); if (changed) { __set_huge_pte_at(mm, addr, ptep, pte); } - spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); return changed; } diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 319afa00cdf7..6a083fc87a03 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -37,11 +37,6 @@ extern int data_start; extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ #if CONFIG_PGTABLE_LEVELS == 3 -/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout - * with the first pmd adjacent to the pgd and below it. gcc doesn't actually - * guarantee that global objects will be laid out in memory in the same order - * as the order of declaration, so put these in different sections and use - * the linker script to order them. */ pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE))); #endif @@ -558,6 +553,11 @@ void __init mem_init(void) BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t)); BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD > BITS_PER_LONG); +#if CONFIG_PGTABLE_LEVELS == 3 + BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PMD); +#else + BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD); +#endif high_memory = __va((max_pfn << PAGE_SHIFT)); set_max_mapnr(max_low_pfn); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 5afa0ebd78ca..78dd6be8b31d 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -786,7 +786,6 @@ config THREAD_SHIFT range 13 15 default "15" if PPC_256K_PAGES default "14" if PPC64 - default "14" if KASAN default "13" help Used to define the stack size. The default is almost always what you diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 7a96cdefbd4e..612254141296 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -17,23 +17,6 @@ HAS_BIARCH := $(call cc-option-yn, -m32) # Set default 32 bits cross compilers for vdso and boot wrapper CROSS32_COMPILE ?= -ifeq ($(HAS_BIARCH),y) -ifeq ($(CROSS32_COMPILE),) -ifdef CONFIG_PPC32 -# These options will be overridden by any -mcpu option that the CPU -# or platform code sets later on the command line, but they are needed -# to set a sane 32-bit cpu target for the 64-bit cross compiler which -# may default to the wrong ISA. -KBUILD_CFLAGS += -mcpu=powerpc -KBUILD_AFLAGS += -mcpu=powerpc -endif -endif -endif - -ifdef CONFIG_PPC_BOOK3S_32 -KBUILD_CFLAGS += -mcpu=powerpc -endif - # If we're on a ppc/ppc64/ppc64le machine use that defconfig, otherwise just use # ppc64_defconfig because we have nothing better to go on. uname := $(shell uname -m) @@ -170,7 +153,7 @@ CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8 CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power9,-mtune=power8) else CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,$(call cc-option,-mtune=power5)) -CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mcpu=power5,-mcpu=power4) +CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4 endif else ifdef CONFIG_PPC_BOOK3E_64 CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64 @@ -190,6 +173,7 @@ endif endif CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU)) +AFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU)) # Altivec option not allowed with e500mc64 in GCC. ifdef CONFIG_ALTIVEC @@ -200,14 +184,6 @@ endif CFLAGS-$(CONFIG_E5500_CPU) += $(E5500_CPU) CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU)) -ifdef CONFIG_PPC32 -ifdef CONFIG_PPC_E500MC -CFLAGS-y += $(call cc-option,-mcpu=e500mc,-mcpu=powerpc) -else -CFLAGS-$(CONFIG_E500) += $(call cc-option,-mcpu=8540 -msoft-float,-mcpu=powerpc) -endif -endif - asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1) KBUILD_CPPFLAGS += -I $(srctree)/arch/$(ARCH) $(asinstr) diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index e4b364b5da9e..8b78eba755f9 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -30,6 +30,7 @@ endif BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \ + $(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \ -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ $(LINUXINCLUDE) diff --git a/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi b/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi new file mode 100644 index 000000000000..7e2a90cde72e --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi @@ -0,0 +1,51 @@ +/* + * e500v1 Power ISA Device Tree Source (include) + * + * Copyright 2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/ { + cpus { + power-isa-version = "2.03"; + power-isa-b; // Base + power-isa-e; // Embedded + power-isa-atb; // Alternate Time Base + power-isa-cs; // Cache Specification + power-isa-e.le; // Embedded.Little-Endian + power-isa-e.pm; // Embedded.Performance Monitor + power-isa-ecl; // Embedded Cache Locking + power-isa-mmc; // Memory Coherence + power-isa-sp; // Signal Processing Engine + power-isa-sp.fs; // SPE.Embedded Float Scalar Single + power-isa-sp.fv; // SPE.Embedded Float Vector + mmu-type = "power-embedded"; + }; +}; diff --git a/arch/powerpc/boot/dts/fsl/mpc8540ads.dts b/arch/powerpc/boot/dts/fsl/mpc8540ads.dts index 18a885130538..e03ae130162b 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8540ads.dts +++ b/arch/powerpc/boot/dts/fsl/mpc8540ads.dts @@ -7,7 +7,7 @@ /dts-v1/; -/include/ "e500v2_power_isa.dtsi" +/include/ "e500v1_power_isa.dtsi" / { model = "MPC8540ADS"; diff --git a/arch/powerpc/boot/dts/fsl/mpc8541cds.dts b/arch/powerpc/boot/dts/fsl/mpc8541cds.dts index ac381e7b1c60..a2a6c5cf852e 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8541cds.dts +++ b/arch/powerpc/boot/dts/fsl/mpc8541cds.dts @@ -7,7 +7,7 @@ /dts-v1/; -/include/ "e500v2_power_isa.dtsi" +/include/ "e500v1_power_isa.dtsi" / { model = "MPC8541CDS"; diff --git a/arch/powerpc/boot/dts/fsl/mpc8555cds.dts b/arch/powerpc/boot/dts/fsl/mpc8555cds.dts index 9f58db2a7e66..901b6ff06dfb 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8555cds.dts +++ b/arch/powerpc/boot/dts/fsl/mpc8555cds.dts @@ -7,7 +7,7 @@ /dts-v1/; -/include/ "e500v2_power_isa.dtsi" +/include/ "e500v1_power_isa.dtsi" / { model = "MPC8555CDS"; diff --git a/arch/powerpc/boot/dts/fsl/mpc8560ads.dts b/arch/powerpc/boot/dts/fsl/mpc8560ads.dts index a24722ccaebf..c2f9aea78b29 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8560ads.dts +++ b/arch/powerpc/boot/dts/fsl/mpc8560ads.dts @@ -7,7 +7,7 @@ /dts-v1/; -/include/ "e500v2_power_isa.dtsi" +/include/ "e500v1_power_isa.dtsi" / { model = "MPC8560ADS"; diff --git a/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi b/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi index 099a598c74c0..bfe1ed5be337 100644 --- a/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi +++ b/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi @@ -139,12 +139,12 @@ fman@400000 { ethernet@e6000 { phy-handle = <&phy_rgmii_0>; - phy-connection-type = "rgmii"; + phy-connection-type = "rgmii-id"; }; ethernet@e8000 { phy-handle = <&phy_rgmii_1>; - phy-connection-type = "rgmii"; + phy-connection-type = "rgmii-id"; }; mdio0: mdio@fc000 { diff --git a/arch/powerpc/include/asm/bpf_perf_event.h b/arch/powerpc/include/asm/bpf_perf_event.h new file mode 100644 index 000000000000..e8a7b4ffb58c --- /dev/null +++ b/arch/powerpc/include/asm/bpf_perf_event.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_BPF_PERF_EVENT_H +#define _ASM_POWERPC_BPF_PERF_EVENT_H + +#include + +typedef struct user_pt_regs bpf_user_pt_regs_t; + +#endif /* _ASM_POWERPC_BPF_PERF_EVENT_H */ diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index bc76970b6ee5..e647dfcb3191 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -96,7 +96,7 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name #endif /* PPC64_ELF_ABI_v1 */ #endif /* CONFIG_FTRACE_SYSCALLS */ -#ifdef CONFIG_PPC64 +#if defined(CONFIG_PPC64) && defined(CONFIG_FUNCTION_TRACER) #include static inline void this_cpu_disable_ftrace(void) @@ -120,11 +120,13 @@ static inline u8 this_cpu_get_ftrace_enabled(void) return get_paca()->ftrace_enabled; } +void ftrace_free_init_tramp(void); #else /* CONFIG_PPC64 */ static inline void this_cpu_disable_ftrace(void) { } static inline void this_cpu_enable_ftrace(void) { } static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { } static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; } +static inline void ftrace_free_init_tramp(void) { } #endif /* CONFIG_PPC64 */ #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 254687258f42..03ae544eb6cc 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -132,7 +132,11 @@ static inline bool pfn_valid(unsigned long pfn) #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) -#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) +#define virt_addr_valid(vaddr) ({ \ + unsigned long _addr = (unsigned long)vaddr; \ + _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \ + pfn_valid(virt_to_pfn(_addr)); \ +}) /* * On Book-E parts we need __va to parse the device tree and we can't @@ -212,6 +216,9 @@ static inline bool pfn_valid(unsigned long pfn) #define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET) #else #ifdef CONFIG_PPC64 + +#define VIRTUAL_WARN_ON(x) WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && (x)) + /* * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit. @@ -219,13 +226,13 @@ static inline bool pfn_valid(unsigned long pfn) */ #define __va(x) \ ({ \ - VIRTUAL_BUG_ON((unsigned long)(x) >= PAGE_OFFSET); \ + VIRTUAL_WARN_ON((unsigned long)(x) >= PAGE_OFFSET); \ (void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \ }) #define __pa(x) \ ({ \ - VIRTUAL_BUG_ON((unsigned long)(x) < PAGE_OFFSET); \ + VIRTUAL_WARN_ON((unsigned long)(x) < PAGE_OFFSET); \ (unsigned long)(x) & 0x0fffffffffffffffUL; \ }) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index f0c0816f5727..d6a3cd147059 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -212,6 +212,7 @@ #define PPC_INST_COPY 0x7c20060c #define PPC_INST_DCBA 0x7c0005ec #define PPC_INST_DCBA_MASK 0xfc0007fe +#define PPC_INST_DSSALL 0x7e00066c #define PPC_INST_ISEL 0x7c00001e #define PPC_INST_ISEL_MASK 0xfc00003e #define PPC_INST_LSWI 0x7c0004aa @@ -517,6 +518,7 @@ #define PPC_DCBZL(a, b) stringify_in_c(.long PPC_RAW_DCBZL(a, b)) #define PPC_DIVDE(t, a, b) stringify_in_c(.long PPC_RAW_DIVDE(t, a, b)) #define PPC_DIVDEU(t, a, b) stringify_in_c(.long PPC_RAW_DIVDEU(t, a, b)) +#define PPC_DSSALL stringify_in_c(.long PPC_INST_DSSALL) #define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LQARX(t, a, b, eh)) #define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LDARX(t, a, b, eh)) #define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LWARX(t, a, b, eh)) diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 46a210b03d2b..ff31d2fa2140 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -14,10 +14,16 @@ #ifdef __KERNEL__ -#if defined(CONFIG_VMAP_STACK) && CONFIG_THREAD_SHIFT < PAGE_SHIFT +#ifdef CONFIG_KASAN +#define MIN_THREAD_SHIFT (CONFIG_THREAD_SHIFT + 1) +#else +#define MIN_THREAD_SHIFT CONFIG_THREAD_SHIFT +#endif + +#if defined(CONFIG_VMAP_STACK) && MIN_THREAD_SHIFT < PAGE_SHIFT #define THREAD_SHIFT PAGE_SHIFT #else -#define THREAD_SHIFT CONFIG_THREAD_SHIFT +#define THREAD_SHIFT MIN_THREAD_SHIFT #endif #define THREAD_SIZE (1 << THREAD_SHIFT) @@ -90,6 +96,7 @@ void arch_setup_new_exec(void); #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_NOTIFY_SIGNAL 3 /* signal notifications exist */ #define TIF_SYSCALL_EMU 4 /* syscall emulation active */ #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ #define TIF_PATCH_PENDING 6 /* pending live patching update */ @@ -115,6 +122,7 @@ void arch_setup_new_exec(void); #define _TIF_SYSCALL_TRACE (1< - -typedef struct user_pt_regs bpf_user_pt_regs_t; - -#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 376104c166fc..db2bdc4cec64 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -20,6 +20,7 @@ CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom_init.o += -fno-stack-protector CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING CFLAGS_prom_init.o += -ffreestanding +CFLAGS_prom_init.o += $(call cc-option, -ftrivial-auto-var-init=uninitialized) ifdef CONFIG_FUNCTION_TRACER # Do not trace early boot code diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index c3bb800dc435..1a5ba26aab15 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -861,7 +861,6 @@ static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info) sizeof(struct fadump_memory_range)); return 0; } - static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info, u64 base, u64 end) { @@ -880,7 +879,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info, start = mem_ranges[mrange_info->mem_range_cnt - 1].base; size = mem_ranges[mrange_info->mem_range_cnt - 1].size; - if ((start + size) == base) + /* + * Boot memory area needs separate PT_LOAD segment(s) as it + * is moved to a different location at the time of crash. + * So, fold only if the region is not boot memory area. + */ + if ((start + size) == base && start >= fw_dump.boot_mem_top) is_adjacent = true; } if (!is_adjacent) { diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 1f835539fda4..77cd4c5a2d63 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -37,7 +37,7 @@ static int __init powersave_off(char *arg) { ppc_md.power_save = NULL; cpuidle_disable = IDLE_POWERSAVE_OFF; - return 0; + return 1; } __setup("powersave=off", powersave_off); @@ -82,7 +82,7 @@ void power4_idle(void) return; if (cpu_has_feature(CPU_FTR_ALTIVEC)) - asm volatile("DSSALL ; sync" ::: "memory"); + asm volatile(PPC_DSSALL " ; sync" ::: "memory"); power4_idle_nap(); diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S index 69df840f7253..315e5e2ad703 100644 --- a/arch/powerpc/kernel/idle_6xx.S +++ b/arch/powerpc/kernel/idle_6xx.S @@ -129,7 +129,7 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) mtspr SPRN_HID0,r4 BEGIN_FTR_SECTION - DSSALL + PPC_DSSALL sync END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) lwz r8,TI_LOCAL_FLAGS(r2) /* set napping bit */ diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S index 225511d73bef..f2e03ed423d0 100644 --- a/arch/powerpc/kernel/l2cr_6xx.S +++ b/arch/powerpc/kernel/l2cr_6xx.S @@ -96,7 +96,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L2CR) /* Stop DST streams */ BEGIN_FTR_SECTION - DSSALL + PPC_DSSALL sync END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) @@ -292,7 +292,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR) isync /* Stop DST streams */ - DSSALL + PPC_DSSALL sync /* Get the current enable bit of the L3CR into r4 */ @@ -401,7 +401,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR) _GLOBAL(__flush_disable_L1) /* Stop pending alitvec streams and memory accesses */ BEGIN_FTR_SECTION - DSSALL + PPC_DSSALL END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) sync diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 7920559a1ca8..cfc461413a5f 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -66,23 +66,35 @@ void set_pci_dma_ops(const struct dma_map_ops *dma_ops) pci_dma_ops = dma_ops; } -/* - * This function should run under locking protection, specifically - * hose_spinlock. - */ static int get_phb_number(struct device_node *dn) { int ret, phb_id = -1; - u32 prop_32; u64 prop; /* * Try fixed PHB numbering first, by checking archs and reading - * the respective device-tree properties. Firstly, try powernv by - * reading "ibm,opal-phbid", only present in OPAL environment. + * the respective device-tree properties. Firstly, try reading + * standard "linux,pci-domain", then try reading "ibm,opal-phbid" + * (only present in powernv OPAL environment), then try device-tree + * alias and as the last try to use lower bits of "reg" property. */ - ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop); + ret = of_get_pci_domain_nr(dn); + if (ret >= 0) { + prop = ret; + ret = 0; + } + if (ret) + ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop); + if (ret) { + ret = of_alias_get_id(dn, "pci"); + if (ret >= 0) { + prop = ret; + ret = 0; + } + } + if (ret) { + u32 prop_32; ret = of_property_read_u32_index(dn, "reg", 1, &prop_32); prop = prop_32; } @@ -90,18 +102,20 @@ static int get_phb_number(struct device_node *dn) if (!ret) phb_id = (int)(prop & (MAX_PHBS - 1)); + spin_lock(&hose_spinlock); + /* We need to be sure to not use the same PHB number twice. */ if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap)) - return phb_id; + goto out_unlock; - /* - * If not pseries nor powernv, or if fixed PHB numbering tried to add - * the same PHB number twice, then fallback to dynamic PHB numbering. - */ + /* If everything fails then fallback to dynamic PHB numbering. */ phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS); BUG_ON(phb_id >= MAX_PHBS); set_bit(phb_id, phb_bitmap); +out_unlock: + spin_unlock(&hose_spinlock); + return phb_id; } @@ -112,10 +126,13 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev) phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL); if (phb == NULL) return NULL; - spin_lock(&hose_spinlock); + phb->global_number = get_phb_number(dev); + + spin_lock(&hose_spinlock); list_add_tail(&phb->list_node, &hose_list); spin_unlock(&hose_spinlock); + phb->dn = dev; phb->is_dynamic = slab_is_available(); #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index e99b7c547d7e..b173ba342645 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -330,6 +330,7 @@ struct pci_dn *pci_add_device_node_info(struct pci_controller *hose, INIT_LIST_HEAD(&pdn->list); parent = of_get_parent(dn); pdn->parent = parent ? PCI_DN(parent) : NULL; + of_node_put(parent); if (pdn->parent) list_add_tail(&pdn->list, &pdn->parent->child_list); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 3064694afea1..cf375d67eacb 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1684,7 +1684,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, /* Copy registers */ sp -= sizeof(struct pt_regs); childregs = (struct pt_regs *) sp; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ memset(childregs, 0, sizeof(struct pt_regs)); childregs->gpr[1] = sp + sizeof(struct pt_regs); @@ -1800,7 +1800,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) tm_reclaim_current(0); #endif - memset(regs->gpr, 0, sizeof(regs->gpr)); + memset(®s->gpr[1], 0, sizeof(regs->gpr) - sizeof(regs->gpr[0])); regs->ctr = 0; regs->link = 0; regs->xer = 0; @@ -2108,12 +2108,12 @@ static unsigned long __get_wchan(struct task_struct *p) return 0; do { - sp = *(unsigned long *)sp; + sp = READ_ONCE_NOCHECK(*(unsigned long *)sp); if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) || p->state == TASK_RUNNING) return 0; if (count > 0) { - ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; + ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]); if (!in_sched_functions(ip)) return ip; } diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 7a14a094be8a..1dfb4c213fea 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -750,6 +750,13 @@ void __init early_init_devtree(void *params) of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); + /* + * As generic code authors expect to be able to use static keys + * in early_param() handlers, we initialize the static keys just + * before parsing early params (it's fine to call jump_label_init() + * more than once). + */ + jump_label_init(); parse_early_param(); /* make sure we've parsed cmdline for mem= before this */ diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index b183ab9c5107..dfa5f729f774 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -13,7 +13,7 @@ # If you really need to reference something from prom_init.o add # it to the list below: -grep "^CONFIG_KASAN=y$" .config >/dev/null +grep "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} >/dev/null if [ $? -eq 0 ] then MEM_FUNCS="__memcpy __memset" diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c index f6e51be47c6e..9ea9ee513ae1 100644 --- a/arch/powerpc/kernel/ptrace/ptrace.c +++ b/arch/powerpc/kernel/ptrace/ptrace.c @@ -75,8 +75,13 @@ long arch_ptrace(struct task_struct *child, long request, flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&tmp, &child->thread.TS_FPR(fpidx), - sizeof(long)); + if (IS_ENABLED(CONFIG_PPC32)) { + // On 32-bit the index we are passed refers to 32-bit words + tmp = ((u32 *)child->thread.fp_state.fpr)[fpidx]; + } else { + memcpy(&tmp, &child->thread.TS_FPR(fpidx), + sizeof(long)); + } else tmp = child->thread.fp_state.fpscr; } @@ -108,8 +113,13 @@ long arch_ptrace(struct task_struct *child, long request, flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&child->thread.TS_FPR(fpidx), &data, - sizeof(long)); + if (IS_ENABLED(CONFIG_PPC32)) { + // On 32-bit the index we are passed refers to 32-bit words + ((u32 *)child->thread.fp_state.fpr)[fpidx] = data; + } else { + memcpy(&child->thread.TS_FPR(fpidx), &data, + sizeof(long)); + } else child->thread.fp_state.fpscr = data; ret = 0; @@ -478,4 +488,7 @@ void __init pt_regs_check(void) * real registers. */ BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long)); + + // ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible + BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX)); } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index cccb32cf0e08..bf962051af0a 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -1040,7 +1040,7 @@ static struct rtas_filter rtas_filters[] __ro_after_init = { { "get-time-of-day", -1, -1, -1, -1, -1 }, { "ibm,get-vpd", -1, 0, -1, 1, 2 }, { "ibm,lpar-perftools", -1, 2, 3, -1, -1 }, - { "ibm,platform-dump", -1, 4, 5, -1, -1 }, + { "ibm,platform-dump", -1, 4, 5, -1, -1 }, /* Special cased */ { "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 }, { "ibm,scan-log-dump", -1, 0, 1, -1, -1 }, { "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 }, @@ -1087,6 +1087,15 @@ static bool block_rtas_call(int token, int nargs, size = 1; end = base + size - 1; + + /* + * Special case for ibm,platform-dump - NULL buffer + * address is used to indicate end of dump processing + */ + if (!strcmp(f->name, "ibm,platform-dump") && + base == 0) + return false; + if (!in_rmo_buf(base, end)) goto err; } @@ -1296,6 +1305,12 @@ int __init early_init_dt_scan_rtas(unsigned long node, entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL); sizep = of_get_flat_dt_prop(node, "rtas-size", NULL); +#ifdef CONFIG_PPC64 + /* need this feature to decide the crashkernel offset */ + if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL)) + powerpc_firmware_features |= FW_FEATURE_LPAR; +#endif + if (basep && entryp && sizep) { rtas.base = *basep; rtas.entry = *entryp; diff --git a/arch/powerpc/kernel/secvar-sysfs.c b/arch/powerpc/kernel/secvar-sysfs.c index a0a78aba2083..1ee4640a2641 100644 --- a/arch/powerpc/kernel/secvar-sysfs.c +++ b/arch/powerpc/kernel/secvar-sysfs.c @@ -26,15 +26,18 @@ static ssize_t format_show(struct kobject *kobj, struct kobj_attribute *attr, const char *format; node = of_find_compatible_node(NULL, NULL, "ibm,secvar-backend"); - if (!of_device_is_available(node)) - return -ENODEV; + if (!of_device_is_available(node)) { + rc = -ENODEV; + goto out; + } rc = of_property_read_string(node, "format", &format); if (rc) - return rc; + goto out; rc = sprintf(buf, "%s\n", format); +out: of_node_put(node); return rc; diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index d2c356f37077..a8bb0aca1d02 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -318,7 +318,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) if (thread_info_flags & _TIF_PATCH_PENDING) klp_update_patch_state(current); - if (thread_info_flags & _TIF_SIGPENDING) { + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { BUG_ON(regs != current->thread.regs); do_signal(current); } diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S index f73f4d72fea4..e0cbd63007f2 100644 --- a/arch/powerpc/kernel/swsusp_32.S +++ b/arch/powerpc/kernel/swsusp_32.S @@ -181,7 +181,7 @@ _GLOBAL(swsusp_arch_resume) #ifdef CONFIG_ALTIVEC /* Stop pending alitvec streams and memory accesses */ BEGIN_FTR_SECTION - DSSALL + PPC_DSSALL END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif sync diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S index 6d3189830dd3..068a268a8013 100644 --- a/arch/powerpc/kernel/swsusp_asm64.S +++ b/arch/powerpc/kernel/swsusp_asm64.S @@ -142,7 +142,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR) _GLOBAL(swsusp_arch_resume) /* Stop pending alitvec streams and memory accesses */ BEGIN_FTR_SECTION - DSSALL + PPC_DSSALL END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) sync diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index d34276f3c495..b0a3063ab1b1 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -18,6 +18,7 @@ .p2align 3 #define __SYSCALL(nr, entry) .8byte entry #else + .p2align 2 #define __SYSCALL(nr, entry) .long entry #endif diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 42761ebec9f7..d24aea4fed7a 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -336,9 +336,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp) /* Is this a known long jump tramp? */ for (i = 0; i < NUM_FTRACE_TRAMPS; i++) - if (!ftrace_tramps[i]) - break; - else if (ftrace_tramps[i] == tramp) + if (ftrace_tramps[i] == tramp) return 0; /* Is this a known plt tramp? */ @@ -882,6 +880,17 @@ void arch_ftrace_update_code(int command) extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[]; +void ftrace_free_init_tramp(void) +{ + int i; + + for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++) + if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) { + ftrace_tramps[i] = 0; + return; + } +} + int __init ftrace_dyn_arch_init(void) { int i; diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c index 56da5eb2b923..80c79cb5010c 100644 --- a/arch/powerpc/kexec/core.c +++ b/arch/powerpc/kexec/core.c @@ -147,11 +147,18 @@ void __init reserve_crashkernel(void) if (!crashk_res.start) { #ifdef CONFIG_PPC64 /* - * On 64bit we split the RMO in half but cap it at half of - * a small SLB (128MB) since the crash kernel needs to place - * itself and some stacks to be in the first segment. + * On the LPAR platform place the crash kernel to mid of + * RMA size (512MB or more) to ensure the crash kernel + * gets enough space to place itself and some stack to be + * in the first segment. At the same time normal kernel + * also get enough space to allocate memory for essential + * system resource in the first segment. Keep the crash + * kernel starts at 128MB offset on other platforms. */ - crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2)); + if (firmware_has_feature(FW_FEATURE_LPAR)) + crashk_res.start = ppc64_rma_size / 2; + else + crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2)); #else crashk_res.start = KDUMP_KERNELBASE; #endif diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 8da93fdfa59e..c640053ab03f 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -421,13 +421,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, tbl[idx % TCES_PER_PAGE] = tce; } -static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl, - unsigned long entry) +static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt, + struct iommu_table *tbl, unsigned long entry) { - unsigned long hpa = 0; - enum dma_data_direction dir = DMA_NONE; + unsigned long i; + unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); + unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift); - iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir); + for (i = 0; i < subpages; ++i) { + unsigned long hpa = 0; + enum dma_data_direction dir = DMA_NONE; + + iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir); + } } static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm, @@ -486,6 +492,8 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm, break; } + iommu_tce_kill(tbl, io_entry, subpages); + return ret; } @@ -545,6 +553,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm, break; } + iommu_tce_kill(tbl, io_entry, subpages); + return ret; } @@ -591,10 +601,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, entry, ua, dir); - iommu_tce_kill(stit->tbl, entry, 1); if (ret != H_SUCCESS) { - kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry); + kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry); goto unlock_exit; } } @@ -670,13 +679,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, */ if (get_user(tce, tces + i)) { ret = H_TOO_HARD; - goto invalidate_exit; + goto unlock_exit; } tce = be64_to_cpu(tce); if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) { ret = H_PARAMETER; - goto invalidate_exit; + goto unlock_exit; } list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { @@ -685,19 +694,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, iommu_tce_direction(tce)); if (ret != H_SUCCESS) { - kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, - entry); - goto invalidate_exit; + kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, + entry + i); + goto unlock_exit; } } kvmppc_tce_put(stt, entry + i, tce); } -invalidate_exit: - list_for_each_entry_lockless(stit, &stt->iommu_tables, next) - iommu_tce_kill(stit->tbl, entry, npages); - unlock_exit: srcu_read_unlock(&vcpu->kvm->srcu, idx); @@ -736,20 +741,16 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, continue; if (ret == H_TOO_HARD) - goto invalidate_exit; + return ret; WARN_ON_ONCE(1); - kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry); + kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i); } } for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); -invalidate_exit: - list_for_each_entry_lockless(stit, &stt->iommu_tables, next) - iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages); - return ret; } EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce); diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index e5ba96c41f3f..57af53a6a2d8 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -247,13 +247,19 @@ static void iommu_tce_kill_rm(struct iommu_table *tbl, tbl->it_ops->tce_kill(tbl, entry, pages, true); } -static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl, - unsigned long entry) +static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt, + struct iommu_table *tbl, unsigned long entry) { - unsigned long hpa = 0; - enum dma_data_direction dir = DMA_NONE; + unsigned long i; + unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); + unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift); - iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir); + for (i = 0; i < subpages; ++i) { + unsigned long hpa = 0; + enum dma_data_direction dir = DMA_NONE; + + iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir); + } } static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, @@ -316,6 +322,8 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, break; } + iommu_tce_kill_rm(tbl, io_entry, subpages); + return ret; } @@ -379,6 +387,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, break; } + iommu_tce_kill_rm(tbl, io_entry, subpages); + return ret; } @@ -424,10 +434,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, stit->tbl, entry, ua, dir); - iommu_tce_kill_rm(stit->tbl, entry, 1); - if (ret != H_SUCCESS) { - kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); + kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry); return ret; } } @@ -569,7 +577,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, ua = 0; if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) { ret = H_PARAMETER; - goto invalidate_exit; + goto unlock_exit; } list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { @@ -578,19 +586,15 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, iommu_tce_direction(tce)); if (ret != H_SUCCESS) { - kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, - entry); - goto invalidate_exit; + kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, + entry + i); + goto unlock_exit; } } kvmppc_rm_tce_put(stt, entry + i, tce); } -invalidate_exit: - list_for_each_entry_lockless(stit, &stt->iommu_tables, next) - iommu_tce_kill_rm(stit->tbl, entry, npages); - unlock_exit: if (!prereg) arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); @@ -632,20 +636,16 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, continue; if (ret == H_TOO_HARD) - goto invalidate_exit; + return ret; WARN_ON_ONCE_RM(1); - kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); + kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i); } } for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value); -invalidate_exit: - list_for_each_entry_lockless(stit, &stt->iommu_tables, next) - iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages); - return ret; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 38b7a3491aac..1d2593238995 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3399,8 +3399,22 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) kvmppc_set_host_core(pcpu); + context_tracking_guest_exit(); + if (!vtime_accounting_enabled_this_cpu()) { + local_irq_enable(); + /* + * Service IRQs here before vtime_account_guest_exit() so any + * ticks that occurred while running the guest are accounted to + * the guest. If vtime accounting is enabled, accounting uses + * TB rather than ticks, so it can be done without enabling + * interrupts here, which has the problem that it accounts + * interrupt processing overhead to the host. + */ + local_irq_disable(); + } + vtime_account_guest_exit(); + local_irq_enable(); - guest_exit(); /* Let secondaries go back to the offline loop */ for (i = 0; i < controlled_threads; ++i) { @@ -4235,8 +4249,22 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, kvmppc_set_host_core(pcpu); + context_tracking_guest_exit(); + if (!vtime_accounting_enabled_this_cpu()) { + local_irq_enable(); + /* + * Service IRQs here before vtime_account_guest_exit() so any + * ticks that occurred while running the guest are accounted to + * the guest. If vtime accounting is enabled, accounting uses + * TB rather than ticks, so it can be done without enabling + * interrupts here, which has the problem that it accounts + * interrupt processing overhead to the host. + */ + local_irq_disable(); + } + vtime_account_guest_exit(); + local_irq_enable(); - guest_exit(); cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest); diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 84e5a2dc8be5..3dd58b4ee33e 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -359,13 +359,15 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm, static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot, struct kvm *kvm, unsigned long *gfn) { - struct kvmppc_uvmem_slot *p; + struct kvmppc_uvmem_slot *p = NULL, *iter; bool ret = false; unsigned long i; - list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) - if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns) + list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list) + if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) { + p = iter; break; + } if (!p) return ret; /* diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index b1abcb816439..75381beb7514 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -1016,7 +1016,21 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr) } trace_kvm_exit(exit_nr, vcpu); - guest_exit_irqoff(); + + context_tracking_guest_exit(); + if (!vtime_accounting_enabled_this_cpu()) { + local_irq_enable(); + /* + * Service IRQs here before vtime_account_guest_exit() so any + * ticks that occurred while running the guest are accounted to + * the guest. If vtime accounting is enabled, accounting uses + * TB rather than ticks, so it can be done without enabling + * interrupts here, which has the problem that it accounts + * interrupt processing overhead to the host. + */ + local_irq_disable(); + } + vtime_account_guest_exit(); local_irq_enable(); diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c index 0a05e51964c1..90111c9e7521 100644 --- a/arch/powerpc/math-emu/math_efp.c +++ b/arch/powerpc/math-emu/math_efp.c @@ -17,6 +17,7 @@ #include #include +#include #include #include diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 295959487b76..ae4ba6a6745d 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -997,15 +997,6 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre pmd = *pmdp; pmd_clear(pmdp); - /* - * pmdp collapse_flush need to ensure that there are no parallel gup - * walk after this call. This is needed so that we can have stable - * page ref count when collapsing a page. We don't allow a collapse page - * if we have gup taken on the page. We can ensure that by sending IPI - * because gup walk happens with IRQ disabled. - */ - serialize_against_pte_lookup(vma->vm_mm); - radix__flush_tlb_collapsed_pmd(vma->vm_mm, address); return pmd; diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 22eb1c718e62..1ed276d2305f 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -51,6 +51,7 @@ #include #include #include +#include #include @@ -347,6 +348,7 @@ void free_initmem(void) mark_initmem_nx(); init_mem_is_free = true; free_initmem_default(POISON_FREE_INITMEM); + ftrace_free_init_tramp(); } /** diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c index 18f20da0d348..64290d343b55 100644 --- a/arch/powerpc/mm/mmu_context.c +++ b/arch/powerpc/mm/mmu_context.c @@ -79,7 +79,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * context */ if (cpu_has_feature(CPU_FTR_ALTIVEC)) - asm volatile ("dssall"); + asm volatile (PPC_DSSALL); if (new_on_cpu) radix_kvm_prefetch_workaround(next); diff --git a/arch/powerpc/mm/nohash/book3e_pgtable.c b/arch/powerpc/mm/nohash/book3e_pgtable.c index 77884e24281d..3d845e001c87 100644 --- a/arch/powerpc/mm/nohash/book3e_pgtable.c +++ b/arch/powerpc/mm/nohash/book3e_pgtable.c @@ -95,8 +95,8 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) pgdp = pgd_offset_k(ea); p4dp = p4d_offset(pgdp, ea); if (p4d_none(*p4dp)) { - pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); - p4d_populate(&init_mm, p4dp, pmdp); + pudp = early_alloc_pgtable(PUD_TABLE_SIZE); + p4d_populate(&init_mm, p4dp, pudp); } pudp = pud_offset(p4dp, ea); if (pud_none(*pudp)) { @@ -105,7 +105,7 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) } pmdp = pmd_offset(pudp, ea); if (!pmd_present(*pmdp)) { - ptep = early_alloc_pgtable(PAGE_SIZE); + ptep = early_alloc_pgtable(PTE_TABLE_SIZE); pmd_populate_kernel(&init_mm, pmdp, ptep); } ptep = pte_offset_kernel(pmdp, ea); diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c index 4c74e8a5482b..c555ad9fa00b 100644 --- a/arch/powerpc/mm/nohash/kaslr_booke.c +++ b/arch/powerpc/mm/nohash/kaslr_booke.c @@ -18,7 +18,6 @@ #include #include #include -#include #include struct regions { @@ -36,10 +35,6 @@ struct regions { int reserved_mem_size_cells; }; -/* Simplified build-specific string for starting entropy. */ -static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" - LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; - struct regions __initdata regions; static __init void kaslr_get_cmdline(void *fdt) @@ -72,7 +67,8 @@ static unsigned long __init get_boot_seed(void *fdt) { unsigned long hash = 0; - hash = rotate_xor(hash, build_str, sizeof(build_str)); + /* build-specific string for starting entropy. */ + hash = rotate_xor(hash, linux_banner, strlen(linux_banner)); hash = rotate_xor(hash, fdt, fdt_totalsize(fdt)); return hash; diff --git a/arch/powerpc/mm/ptdump/shared.c b/arch/powerpc/mm/ptdump/shared.c index c005fe041c18..ae97b82966a4 100644 --- a/arch/powerpc/mm/ptdump/shared.c +++ b/arch/powerpc/mm/ptdump/shared.c @@ -17,9 +17,9 @@ static const struct flag_info flag_array[] = { .clear = " ", }, { .mask = _PAGE_RW, - .val = _PAGE_RW, - .set = "rw", - .clear = "r ", + .val = 0, + .set = "r ", + .clear = "rw", }, { .mask = _PAGE_EXEC, .val = _PAGE_EXEC, diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile index c02854dea2b2..da9f60ede97b 100644 --- a/arch/powerpc/perf/Makefile +++ b/arch/powerpc/perf/Makefile @@ -5,11 +5,11 @@ ifdef CONFIG_COMPAT obj-$(CONFIG_PERF_EVENTS) += callchain_32.o endif -obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o +obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o obj64-$(CONFIG_PPC_PERF_CTRS) += ppc970-pmu.o power5-pmu.o \ power5+-pmu.o power6-pmu.o power7-pmu.o \ isa207-common.o power8-pmu.o power9-pmu.o \ - generic-compat-pmu.o power10-pmu.o + generic-compat-pmu.o power10-pmu.o bhrb.o obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o obj-$(CONFIG_PPC_POWERNV) += imc-pmu.o diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index e49aa8fc6a49..6e3e50614353 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1267,27 +1267,22 @@ static void power_pmu_disable(struct pmu *pmu) * a PMI happens during interrupt replay and perf counter * values are cleared by PMU callbacks before replay. * - * If any PMC corresponding to the active PMU events are - * overflown, disable the interrupt by clearing the paca - * bit for PMI since we are disabling the PMU now. - * Otherwise provide a warning if there is PMI pending, but - * no counter is found overflown. + * Disable the interrupt by clearing the paca bit for PMI + * since we are disabling the PMU now. Otherwise provide a + * warning if there is PMI pending, but no counter is found + * overflown. + * + * Since power_pmu_disable runs under local_irq_save, it + * could happen that code hits a PMC overflow without PMI + * pending in paca. Hence only clear PMI pending if it was + * set. + * + * If a PMI is pending, then MSR[EE] must be disabled (because + * the masked PMI handler disabling EE). So it is safe to + * call clear_pmi_irq_pending(). */ - if (any_pmc_overflown(cpuhw)) { - /* - * Since power_pmu_disable runs under local_irq_save, it - * could happen that code hits a PMC overflow without PMI - * pending in paca. Hence only clear PMI pending if it was - * set. - * - * If a PMI is pending, then MSR[EE] must be disabled (because - * the masked PMI handler disabling EE). So it is safe to - * call clear_pmi_irq_pending(). - */ - if (pmi_irq_pending()) - clear_pmi_irq_pending(); - } else - WARN_ON(pmi_irq_pending()); + if (pmi_irq_pending()) + clear_pmi_irq_pending(); val = mmcra = cpuhw->mmcr.mmcra; diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 58448f0e4721..52990becbdfc 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -363,7 +363,8 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) if (event_is_threshold(event) && is_thresh_cmp_valid(event)) { mask |= CNST_THRESH_MASK; value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); - } + } else if (event_is_threshold(event)) + return -1; } else { /* * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC, diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index 2a57e93a79dc..7245355bee28 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -133,11 +133,11 @@ int p9_dd22_bl_ev[] = { /* Table of alternatives, sorted by column 0 */ static const unsigned int power9_event_alternatives[][MAX_ALT] = { - { PM_INST_DISP, PM_INST_DISP_ALT }, - { PM_RUN_CYC_ALT, PM_RUN_CYC }, - { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL }, - { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT }, { PM_BR_2PATH, PM_BR_2PATH_ALT }, + { PM_INST_DISP, PM_INST_DISP_ALT }, + { PM_RUN_CYC_ALT, PM_RUN_CYC }, + { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT }, + { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL }, }; static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[]) diff --git a/arch/powerpc/platforms/4xx/cpm.c b/arch/powerpc/platforms/4xx/cpm.c index ae8b812c9202..2481e78c0423 100644 --- a/arch/powerpc/platforms/4xx/cpm.c +++ b/arch/powerpc/platforms/4xx/cpm.c @@ -327,6 +327,6 @@ late_initcall(cpm_init); static int __init cpm_powersave_off(char *arg) { cpm.powersave_off = 1; - return 0; + return 1; } __setup("powersave=off", cpm_powersave_off); diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c index c58b6f1c40e3..3ef5e9fd3a9b 100644 --- a/arch/powerpc/platforms/8xx/cpm1.c +++ b/arch/powerpc/platforms/8xx/cpm1.c @@ -280,6 +280,7 @@ cpm_setbrg(uint brg, uint rate) out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) | CPM_BRG_EN | CPM_BRG_DIV16); } +EXPORT_SYMBOL(cpm_setbrg); struct cpm_ioport16 { __be16 dir, par, odr_sor, dat, intr; diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 32a9c4c09b98..84f9dd476bbb 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -119,9 +119,9 @@ config GENERIC_CPU depends on PPC64 && CPU_LITTLE_ENDIAN select ARCH_HAS_FAST_MULTIPLIER -config GENERIC_CPU +config POWERPC_CPU bool "Generic 32 bits powerpc" - depends on PPC32 && !PPC_8xx + depends on PPC32 && !PPC_8xx && !PPC_85xx config CELL_CPU bool "Cell Broadband Engine" @@ -152,11 +152,11 @@ config POWER9_CPU config E5500_CPU bool "Freescale e5500" - depends on E500 + depends on PPC64 && E500 config E6500_CPU bool "Freescale e6500" - depends on E500 + depends on PPC64 && E500 config 860_CPU bool "8xx family" @@ -175,11 +175,23 @@ config G4_CPU depends on PPC_BOOK3S_32 select ALTIVEC +config E500_CPU + bool "e500 (8540)" + depends on PPC_85xx && !PPC_E500MC + +config E500MC_CPU + bool "e500mc" + depends on PPC_85xx && PPC_E500MC + +config TOOLCHAIN_DEFAULT_CPU + bool "Rely on the toolchain's implicit default CPU" + depends on PPC32 + endchoice config TARGET_CPU_BOOL bool - default !GENERIC_CPU + default !GENERIC_CPU && !TOOLCHAIN_DEFAULT_CPU config TARGET_CPU string @@ -194,6 +206,9 @@ config TARGET_CPU default "e300c2" if E300C2_CPU default "e300c3" if E300C3_CPU default "G4" if G4_CPU + default "8540" if E500_CPU + default "e500mc" if E500MC_CPU + default "powerpc" if POWERPC_CPU config PPC_BOOK3S def_bool y diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index ca2555b8a0c2..ffbc7d2e9464 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -226,6 +226,7 @@ static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg) if (!prop) { dev_dbg(&dev->dev, "axon_msi: no msi-address-(32|64) properties found\n"); + of_node_put(dn); return -ENOENT; } diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 25390569e24c..908e9b8e79fe 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -664,6 +664,7 @@ spufs_init_isolated_loader(void) return; loader = of_get_property(dn, "loader", &size); + of_node_put(dn); if (!loader) return; diff --git a/arch/powerpc/platforms/powermac/cache.S b/arch/powerpc/platforms/powermac/cache.S index ced225415486..b8ae56e9f414 100644 --- a/arch/powerpc/platforms/powermac/cache.S +++ b/arch/powerpc/platforms/powermac/cache.S @@ -48,7 +48,7 @@ flush_disable_75x: /* Stop DST streams */ BEGIN_FTR_SECTION - DSSALL + PPC_DSSALL sync END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) @@ -197,7 +197,7 @@ flush_disable_745x: isync /* Stop prefetch streams */ - DSSALL + PPC_DSSALL sync /* Disable L2 prefetching */ diff --git a/arch/powerpc/platforms/powernv/opal-fadump.c b/arch/powerpc/platforms/powernv/opal-fadump.c index 9a360ced663b..e23a51a05f99 100644 --- a/arch/powerpc/platforms/powernv/opal-fadump.c +++ b/arch/powerpc/platforms/powernv/opal-fadump.c @@ -60,7 +60,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) addr = be64_to_cpu(addr); pr_debug("Kernel metadata addr: %llx\n", addr); opal_fdm_active = (void *)addr; - if (opal_fdm_active->registered_regions == 0) + if (be16_to_cpu(opal_fdm_active->registered_regions) == 0) return; ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_BOOT_MEM, &addr); @@ -95,17 +95,17 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf); static void opal_fadump_update_config(struct fw_dump *fadump_conf, const struct opal_fadump_mem_struct *fdm) { - pr_debug("Boot memory regions count: %d\n", fdm->region_cnt); + pr_debug("Boot memory regions count: %d\n", be16_to_cpu(fdm->region_cnt)); /* * The destination address of the first boot memory region is the * destination address of boot memory regions. */ - fadump_conf->boot_mem_dest_addr = fdm->rgn[0].dest; + fadump_conf->boot_mem_dest_addr = be64_to_cpu(fdm->rgn[0].dest); pr_debug("Destination address of boot memory regions: %#016llx\n", fadump_conf->boot_mem_dest_addr); - fadump_conf->fadumphdr_addr = fdm->fadumphdr_addr; + fadump_conf->fadumphdr_addr = be64_to_cpu(fdm->fadumphdr_addr); } /* @@ -126,9 +126,9 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf, fadump_conf->boot_memory_size = 0; pr_debug("Boot memory regions:\n"); - for (i = 0; i < fdm->region_cnt; i++) { - base = fdm->rgn[i].src; - size = fdm->rgn[i].size; + for (i = 0; i < be16_to_cpu(fdm->region_cnt); i++) { + base = be64_to_cpu(fdm->rgn[i].src); + size = be64_to_cpu(fdm->rgn[i].size); pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size); fadump_conf->boot_mem_addr[i] = base; @@ -143,7 +143,7 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf, * Start address of reserve dump area (permanent reservation) for * re-registering FADump after dump capture. */ - fadump_conf->reserve_dump_area_start = fdm->rgn[0].dest; + fadump_conf->reserve_dump_area_start = be64_to_cpu(fdm->rgn[0].dest); /* * Rarely, but it can so happen that system crashes before all @@ -155,13 +155,14 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf, * Hope the memory that could not be preserved only has pages * that are usually filtered out while saving the vmcore. */ - if (fdm->region_cnt > fdm->registered_regions) { + if (be16_to_cpu(fdm->region_cnt) > be16_to_cpu(fdm->registered_regions)) { pr_warn("Not all memory regions were saved!!!\n"); pr_warn(" Unsaved memory regions:\n"); - i = fdm->registered_regions; - while (i < fdm->region_cnt) { + i = be16_to_cpu(fdm->registered_regions); + while (i < be16_to_cpu(fdm->region_cnt)) { pr_warn("\t[%03d] base: 0x%llx, size: 0x%llx\n", - i, fdm->rgn[i].src, fdm->rgn[i].size); + i, be64_to_cpu(fdm->rgn[i].src), + be64_to_cpu(fdm->rgn[i].size)); i++; } @@ -170,7 +171,7 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf, } fadump_conf->boot_mem_top = (fadump_conf->boot_memory_size + hole_size); - fadump_conf->boot_mem_regs_cnt = fdm->region_cnt; + fadump_conf->boot_mem_regs_cnt = be16_to_cpu(fdm->region_cnt); opal_fadump_update_config(fadump_conf, fdm); } @@ -178,35 +179,38 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf, static void opal_fadump_init_metadata(struct opal_fadump_mem_struct *fdm) { fdm->version = OPAL_FADUMP_VERSION; - fdm->region_cnt = 0; - fdm->registered_regions = 0; - fdm->fadumphdr_addr = 0; + fdm->region_cnt = cpu_to_be16(0); + fdm->registered_regions = cpu_to_be16(0); + fdm->fadumphdr_addr = cpu_to_be64(0); } static u64 opal_fadump_init_mem_struct(struct fw_dump *fadump_conf) { u64 addr = fadump_conf->reserve_dump_area_start; + u16 reg_cnt; int i; opal_fdm = __va(fadump_conf->kernel_metadata); opal_fadump_init_metadata(opal_fdm); /* Boot memory regions */ + reg_cnt = be16_to_cpu(opal_fdm->region_cnt); for (i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) { - opal_fdm->rgn[i].src = fadump_conf->boot_mem_addr[i]; - opal_fdm->rgn[i].dest = addr; - opal_fdm->rgn[i].size = fadump_conf->boot_mem_sz[i]; + opal_fdm->rgn[i].src = cpu_to_be64(fadump_conf->boot_mem_addr[i]); + opal_fdm->rgn[i].dest = cpu_to_be64(addr); + opal_fdm->rgn[i].size = cpu_to_be64(fadump_conf->boot_mem_sz[i]); - opal_fdm->region_cnt++; + reg_cnt++; addr += fadump_conf->boot_mem_sz[i]; } + opal_fdm->region_cnt = cpu_to_be16(reg_cnt); /* * Kernel metadata is passed to f/w and retrieved in capture kerenl. * So, use it to save fadump header address instead of calculating it. */ - opal_fdm->fadumphdr_addr = (opal_fdm->rgn[0].dest + - fadump_conf->boot_memory_size); + opal_fdm->fadumphdr_addr = cpu_to_be64(be64_to_cpu(opal_fdm->rgn[0].dest) + + fadump_conf->boot_memory_size); opal_fadump_update_config(fadump_conf, opal_fdm); @@ -269,18 +273,21 @@ static u64 opal_fadump_get_bootmem_min(void) static int opal_fadump_register(struct fw_dump *fadump_conf) { s64 rc = OPAL_PARAMETER; + u16 registered_regs; int i, err = -EIO; - for (i = 0; i < opal_fdm->region_cnt; i++) { + registered_regs = be16_to_cpu(opal_fdm->registered_regions); + for (i = 0; i < be16_to_cpu(opal_fdm->region_cnt); i++) { rc = opal_mpipl_update(OPAL_MPIPL_ADD_RANGE, - opal_fdm->rgn[i].src, - opal_fdm->rgn[i].dest, - opal_fdm->rgn[i].size); + be64_to_cpu(opal_fdm->rgn[i].src), + be64_to_cpu(opal_fdm->rgn[i].dest), + be64_to_cpu(opal_fdm->rgn[i].size)); if (rc != OPAL_SUCCESS) break; - opal_fdm->registered_regions++; + registered_regs++; } + opal_fdm->registered_regions = cpu_to_be16(registered_regs); switch (rc) { case OPAL_SUCCESS: @@ -291,7 +298,8 @@ static int opal_fadump_register(struct fw_dump *fadump_conf) case OPAL_RESOURCE: /* If MAX regions limit in f/w is hit, warn and proceed. */ pr_warn("%d regions could not be registered for MPIPL as MAX limit is reached!\n", - (opal_fdm->region_cnt - opal_fdm->registered_regions)); + (be16_to_cpu(opal_fdm->region_cnt) - + be16_to_cpu(opal_fdm->registered_regions))); fadump_conf->dump_registered = 1; err = 0; break; @@ -312,7 +320,7 @@ static int opal_fadump_register(struct fw_dump *fadump_conf) * If some regions were registered before OPAL_MPIPL_ADD_RANGE * OPAL call failed, unregister all regions. */ - if ((err < 0) && (opal_fdm->registered_regions > 0)) + if ((err < 0) && (be16_to_cpu(opal_fdm->registered_regions) > 0)) opal_fadump_unregister(fadump_conf); return err; @@ -328,7 +336,7 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf) return -EIO; } - opal_fdm->registered_regions = 0; + opal_fdm->registered_regions = cpu_to_be16(0); fadump_conf->dump_registered = 0; return 0; } @@ -563,19 +571,20 @@ static void opal_fadump_region_show(struct fw_dump *fadump_conf, else fdm_ptr = opal_fdm; - for (i = 0; i < fdm_ptr->region_cnt; i++) { + for (i = 0; i < be16_to_cpu(fdm_ptr->region_cnt); i++) { /* * Only regions that are registered for MPIPL * would have dump data. */ if ((fadump_conf->dump_active) && - (i < fdm_ptr->registered_regions)) - dumped_bytes = fdm_ptr->rgn[i].size; + (i < be16_to_cpu(fdm_ptr->registered_regions))) + dumped_bytes = be64_to_cpu(fdm_ptr->rgn[i].size); seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ", - fdm_ptr->rgn[i].src, fdm_ptr->rgn[i].dest); + be64_to_cpu(fdm_ptr->rgn[i].src), + be64_to_cpu(fdm_ptr->rgn[i].dest)); seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n", - fdm_ptr->rgn[i].size, dumped_bytes); + be64_to_cpu(fdm_ptr->rgn[i].size), dumped_bytes); } /* Dump is active. Show reserved area start address. */ @@ -624,6 +633,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) { const __be32 *prop; unsigned long dn; + __be64 be_addr; u64 addr = 0; int i, len; s64 ret; @@ -680,13 +690,13 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) if (!prop) return; - ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &addr); - if ((ret != OPAL_SUCCESS) || !addr) { + ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &be_addr); + if ((ret != OPAL_SUCCESS) || !be_addr) { pr_err("Failed to get Kernel metadata (%lld)\n", ret); return; } - addr = be64_to_cpu(addr); + addr = be64_to_cpu(be_addr); pr_debug("Kernel metadata addr: %llx\n", addr); opal_fdm_active = __va(addr); @@ -697,14 +707,14 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) } /* Kernel regions not registered with f/w for MPIPL */ - if (opal_fdm_active->registered_regions == 0) { + if (be16_to_cpu(opal_fdm_active->registered_regions) == 0) { opal_fdm_active = NULL; return; } - ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &addr); - if (addr) { - addr = be64_to_cpu(addr); + ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &be_addr); + if (be_addr) { + addr = be64_to_cpu(be_addr); pr_debug("CPU metadata addr: %llx\n", addr); opal_cpu_metadata = __va(addr); } diff --git a/arch/powerpc/platforms/powernv/opal-fadump.h b/arch/powerpc/platforms/powernv/opal-fadump.h index f1e9ecf548c5..3f715efb0aa6 100644 --- a/arch/powerpc/platforms/powernv/opal-fadump.h +++ b/arch/powerpc/platforms/powernv/opal-fadump.h @@ -31,14 +31,14 @@ * OPAL FADump kernel metadata * * The address of this structure will be registered with f/w for retrieving - * and processing during crash dump. + * in the capture kernel to process the crash dump. */ struct opal_fadump_mem_struct { u8 version; u8 reserved[3]; - u16 region_cnt; /* number of regions */ - u16 registered_regions; /* Regions registered for MPIPL */ - u64 fadumphdr_addr; + __be16 region_cnt; /* number of regions */ + __be16 registered_regions; /* Regions registered for MPIPL */ + __be64 fadumphdr_addr; struct opal_mpipl_region rgn[FADUMP_MAX_MEM_REGS]; } __packed; @@ -135,7 +135,7 @@ static inline void opal_fadump_read_regs(char *bufp, unsigned int regs_cnt, for (i = 0; i < regs_cnt; i++, bufp += reg_entry_size) { reg_entry = (struct hdat_fadump_reg_entry *)bufp; val = (cpu_endian ? be64_to_cpu(reg_entry->reg_val) : - reg_entry->reg_val); + (u64)(reg_entry->reg_val)); opal_fadump_set_regval_regnum(regs, be32_to_cpu(reg_entry->reg_type), be32_to_cpu(reg_entry->reg_num), diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index c61c3b62c8c6..1d05c168c8fb 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -892,6 +892,7 @@ static void opal_export_attrs(void) kobj = kobject_create_and_add("exports", opal_kobj); if (!kobj) { pr_warn("kobject_create_and_add() of exports failed\n"); + of_node_put(np); return; } diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h index 11df4e16a1cc..528946ee7a77 100644 --- a/arch/powerpc/platforms/powernv/powernv.h +++ b/arch/powerpc/platforms/powernv/powernv.h @@ -42,4 +42,6 @@ ssize_t memcons_copy(struct memcons *mc, char *to, loff_t pos, size_t count); u32 memcons_get_size(struct memcons *mc); struct memcons *memcons_init(struct device_node *node, const char *mc_prop_name); +void pnv_rng_init(void); + #endif /* _POWERNV_H */ diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c index 69c344c8884f..a99033c3dce7 100644 --- a/arch/powerpc/platforms/powernv/rng.c +++ b/arch/powerpc/platforms/powernv/rng.c @@ -17,6 +17,7 @@ #include #include #include +#include "powernv.h" #define DARN_ERR 0xFFFFFFFFFFFFFFFFul @@ -28,7 +29,6 @@ struct powernv_rng { static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng); - int powernv_hwrng_present(void) { struct powernv_rng *rng; @@ -63,6 +63,8 @@ int powernv_get_random_real_mode(unsigned long *v) struct powernv_rng *rng; rng = raw_cpu_read(powernv_rng); + if (!rng) + return 0; *v = rng_whiten(rng, __raw_rm_readq(rng->regs_real)); @@ -98,9 +100,6 @@ static int initialise_darn(void) return 0; } } - - pr_warn("Unable to use DARN for get_random_seed()\n"); - return -EIO; } @@ -163,32 +162,59 @@ static __init int rng_create(struct device_node *dn) rng_init_per_cpu(rng, dn); - pr_info_once("Registering arch random hook.\n"); - ppc_md.get_random_seed = powernv_get_random_long; return 0; } -static __init int rng_init(void) +static int __init pnv_get_random_long_early(unsigned long *v) { struct device_node *dn; - int rc; - for_each_compatible_node(dn, NULL, "ibm,power-rng") { - rc = rng_create(dn); - if (rc) { - pr_err("Failed creating rng for %pOF (%d).\n", - dn, rc); - continue; - } + if (!slab_is_available()) + return 0; - /* Create devices for hwrng driver */ - of_platform_device_create(dn, NULL, NULL); + if (cmpxchg(&ppc_md.get_random_seed, pnv_get_random_long_early, + NULL) != pnv_get_random_long_early) + return 0; + + for_each_compatible_node(dn, NULL, "ibm,power-rng") + rng_create(dn); + + if (!ppc_md.get_random_seed) + return 0; + return ppc_md.get_random_seed(v); +} + +void __init pnv_rng_init(void) +{ + struct device_node *dn; + + /* Prefer darn over the rest. */ + if (!initialise_darn()) + return; + + dn = of_find_compatible_node(NULL, NULL, "ibm,power-rng"); + if (dn) + ppc_md.get_random_seed = pnv_get_random_long_early; + + of_node_put(dn); +} + +static int __init pnv_rng_late_init(void) +{ + struct device_node *dn; + unsigned long v; + + /* In case it wasn't called during init for some other reason. */ + if (ppc_md.get_random_seed == pnv_get_random_long_early) + pnv_get_random_long_early(&v); + + if (ppc_md.get_random_seed == powernv_get_random_long) { + for_each_compatible_node(dn, NULL, "ibm,power-rng") + of_platform_device_create(dn, NULL, NULL); } - initialise_darn(); - return 0; } -machine_subsys_initcall(powernv, rng_init); +machine_subsys_initcall(powernv, pnv_rng_late_init); diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 4426a109ec2f..1a2f12dc0552 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -193,6 +193,8 @@ static void __init pnv_setup_arch(void) pnv_check_guarded_cores(); /* XXX PMCS */ + + pnv_rng_init(); } static void __init pnv_init(void) diff --git a/arch/powerpc/platforms/powernv/ultravisor.c b/arch/powerpc/platforms/powernv/ultravisor.c index e4a00ad06f9d..67c8c4b2d8b1 100644 --- a/arch/powerpc/platforms/powernv/ultravisor.c +++ b/arch/powerpc/platforms/powernv/ultravisor.c @@ -55,6 +55,7 @@ static int __init uv_init(void) return -ENODEV; uv_memcons = memcons_init(node, "memcons"); + of_node_put(node); if (!uv_memcons) return -ENOENT; diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c index 3d21fce254b7..dd9c23c09781 100644 --- a/arch/powerpc/platforms/powernv/vas-fault.c +++ b/arch/powerpc/platforms/powernv/vas-fault.c @@ -352,7 +352,7 @@ int vas_setup_fault_window(struct vas_instance *vinst) vas_init_rx_win_attr(&attr, VAS_COP_TYPE_FAULT); attr.rx_fifo_size = vinst->fault_fifo_size; - attr.rx_fifo = vinst->fault_fifo; + attr.rx_fifo = __pa(vinst->fault_fifo); /* * Max creds is based on number of CRBs can fit in the FIFO. diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index 7ba0840fc3b5..3a86cdd5ae6c 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -403,7 +403,7 @@ static void init_winctx_regs(struct vas_window *window, * * See also: Design note in function header. */ - val = __pa(winctx->rx_fifo); + val = winctx->rx_fifo; val = SET_FIELD(VAS_PAGE_MIGRATION_SELECT, val, 0); write_hvwc_reg(window, VREG(LFIFO_BAR), val); @@ -737,7 +737,7 @@ static void init_winctx_for_rxwin(struct vas_window *rxwin, */ winctx->fifo_disable = true; winctx->intr_disable = true; - winctx->rx_fifo = NULL; + winctx->rx_fifo = 0; } winctx->lnotify_lpid = rxattr->lnotify_lpid; diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h index 70f793e8f6cc..1f6e73809205 100644 --- a/arch/powerpc/platforms/powernv/vas.h +++ b/arch/powerpc/platforms/powernv/vas.h @@ -383,7 +383,7 @@ struct vas_window { * is a container for the register fields in the window context. */ struct vas_winctx { - void *rx_fifo; + u64 rx_fifo; int rx_fifo_size; int wcreds_max; int rsvd_txbuf_count; diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 2f73cb5bf12d..f386a7bc3811 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -59,18 +59,31 @@ static int mobility_rtas_call(int token, char *buf, s32 scope) return rc; } -static int delete_dt_node(__be32 phandle) +static int delete_dt_node(struct device_node *dn) { - struct device_node *dn; + struct device_node *pdn; + bool is_platfac; - dn = of_find_node_by_phandle(be32_to_cpu(phandle)); - if (!dn) - return -ENOENT; + pdn = of_get_parent(dn); + is_platfac = of_node_is_type(dn, "ibm,platform-facilities") || + of_node_is_type(pdn, "ibm,platform-facilities"); + of_node_put(pdn); + + /* + * The drivers that bind to nodes in the platform-facilities + * hierarchy don't support node removal, and the removal directive + * from firmware is always followed by an add of an equivalent + * node. The capability (e.g. RNG, encryption, compression) + * represented by the node is never interrupted by the migration. + * So ignore changes to this part of the tree. + */ + if (is_platfac) { + pr_notice("ignoring remove operation for %pOFfp\n", dn); + return 0; + } pr_debug("removing node %pOFfp\n", dn); - dlpar_detach_node(dn); - of_node_put(dn); return 0; } @@ -135,10 +148,9 @@ static int update_dt_property(struct device_node *dn, struct property **prop, return 0; } -static int update_dt_node(__be32 phandle, s32 scope) +static int update_dt_node(struct device_node *dn, s32 scope) { struct update_props_workarea *upwa; - struct device_node *dn; struct property *prop = NULL; int i, rc, rtas_rc; char *prop_data; @@ -155,14 +167,8 @@ static int update_dt_node(__be32 phandle, s32 scope) if (!rtas_buf) return -ENOMEM; - dn = of_find_node_by_phandle(be32_to_cpu(phandle)); - if (!dn) { - kfree(rtas_buf); - return -ENOENT; - } - upwa = (struct update_props_workarea *)&rtas_buf[0]; - upwa->phandle = phandle; + upwa->phandle = cpu_to_be32(dn->phandle); do { rtas_rc = mobility_rtas_call(update_properties_token, rtas_buf, @@ -221,25 +227,30 @@ static int update_dt_node(__be32 phandle, s32 scope) cond_resched(); } while (rtas_rc == 1); - of_node_put(dn); kfree(rtas_buf); return 0; } -static int add_dt_node(__be32 parent_phandle, __be32 drc_index) +static int add_dt_node(struct device_node *parent_dn, __be32 drc_index) { struct device_node *dn; - struct device_node *parent_dn; int rc; - parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle)); - if (!parent_dn) + dn = dlpar_configure_connector(drc_index, parent_dn); + if (!dn) return -ENOENT; - dn = dlpar_configure_connector(drc_index, parent_dn); - if (!dn) { - of_node_put(parent_dn); - return -ENOENT; + /* + * Since delete_dt_node() ignores this node type, this is the + * necessary counterpart. We also know that a platform-facilities + * node returned from dlpar_configure_connector() has children + * attached, and dlpar_attach_node() only adds the parent, leaking + * the children. So ignore these on the add side for now. + */ + if (of_node_is_type(dn, "ibm,platform-facilities")) { + pr_notice("ignoring add operation for %pOF\n", dn); + dlpar_free_cc_nodes(dn); + return 0; } rc = dlpar_attach_node(dn, parent_dn); @@ -248,7 +259,6 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index) pr_debug("added node %pOFfp\n", dn); - of_node_put(parent_dn); return rc; } @@ -281,22 +291,31 @@ int pseries_devicetree_update(s32 scope) data++; for (i = 0; i < node_count; i++) { + struct device_node *np; __be32 phandle = *data++; __be32 drc_index; + np = of_find_node_by_phandle(be32_to_cpu(phandle)); + if (!np) { + pr_warn("Failed lookup: phandle 0x%x for action 0x%x\n", + be32_to_cpu(phandle), action); + continue; + } + switch (action) { case DELETE_DT_NODE: - delete_dt_node(phandle); + delete_dt_node(np); break; case UPDATE_DT_NODE: - update_dt_node(phandle, scope); + update_dt_node(np, scope); break; case ADD_DT_NODE: drc_index = *data++; - add_dt_node(phandle, drc_index); + add_dt_node(np, drc_index); break; } + of_node_put(np); cond_resched(); } } diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 593840847cd3..ada9601aaff1 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -114,4 +114,6 @@ int dlpar_workqueue_init(void); void pseries_setup_security_mitigations(void); void pseries_lpar_read_hblkrm_characteristics(void); +void pseries_rng_init(void); + #endif /* _PSERIES_PSERIES_H */ diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c index 6268545947b8..6ddfdeaace9e 100644 --- a/arch/powerpc/platforms/pseries/rng.c +++ b/arch/powerpc/platforms/pseries/rng.c @@ -10,6 +10,7 @@ #include #include #include +#include "pseries.h" static int pseries_get_random_long(unsigned long *v) @@ -24,19 +25,13 @@ static int pseries_get_random_long(unsigned long *v) return 0; } -static __init int rng_init(void) +void __init pseries_rng_init(void) { struct device_node *dn; dn = of_find_compatible_node(NULL, NULL, "ibm,random"); if (!dn) - return -ENODEV; - - pr_info("Registering arch random hook.\n"); - + return; ppc_md.get_random_seed = pseries_get_random_long; - of_node_put(dn); - return 0; } -machine_subsys_initcall(pseries, rng_init); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 47dfada140e1..0eac9ca782c2 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -824,6 +824,8 @@ static void __init pSeries_setup_arch(void) if (swiotlb_force == SWIOTLB_FORCE) ppc_swiotlb_enable = 1; + + pseries_rng_init(); } static void pseries_panic(char *str) diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index 6b4a34b36d98..8ff9bcfe4b8d 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c @@ -403,9 +403,10 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops) } /* Initialize the DART HW */ - if (dart_init(dn) != 0) + if (dart_init(dn) != 0) { + of_node_put(dn); return; - + } /* * U4 supports a DART bypass, we use it for 64-bit capable devices to * improve performance. However, that only works for devices connected @@ -418,6 +419,7 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops) /* Setup pci_dma ops */ set_pci_dma_ops(&dma_iommu_ops); + of_node_put(dn); } #ifdef CONFIG_PM diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 808e7118abfc..d276c5e96445 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -211,8 +211,10 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) dev_err(&pdev->dev, "node %pOF has an invalid fsl,msi phandle %u\n", hose->dn, np->phandle); + of_node_put(np); return -EINVAL; } + of_node_put(np); } for_each_pci_msi_entry(entry, pdev) { diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 040b9d01c079..4dd152450e78 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -520,6 +520,7 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary) struct resource rsrc; const int *bus_range; u8 hdr_type, progif; + u32 class_code; struct device_node *dev; struct ccsr_pci __iomem *pci; u16 temp; @@ -593,6 +594,13 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary) PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS; if (fsl_pcie_check_link(hose)) hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK; + /* Fix Class Code to PCI_CLASS_BRIDGE_PCI_NORMAL for pre-3.0 controller */ + if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) { + early_read_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, &class_code); + class_code &= 0xff; + class_code |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; + early_write_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, class_code); + } } else { /* * Set PBFR(PCI Bus Function Register)[10] = 1 to diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h index 1d7a41205695..5ffaa60f1fa0 100644 --- a/arch/powerpc/sysdev/fsl_pci.h +++ b/arch/powerpc/sysdev/fsl_pci.h @@ -18,6 +18,7 @@ struct platform_device; #define PCIE_LTSSM 0x0404 /* PCIE Link Training and Status */ #define PCIE_LTSSM_L0 0x16 /* L0 state */ +#define PCIE_FSL_CSR_CLASSCODE 0x474 /* FSL GPEX CSR */ #define PCIE_IP_REV_2_2 0x02080202 /* PCIE IP block version Rev2.2 */ #define PCIE_IP_REV_3_0 0x02080300 /* PCIE IP block version Rev3.0 */ #define PIWAR_EN 0x80000000 /* Enable */ diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 07c164f7f8cf..3f9f78621cf3 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -505,8 +505,10 @@ int fsl_rio_setup(struct platform_device *dev) if (rc) { dev_err(&dev->dev, "Can't get %pOF property 'reg'\n", rmu_node); + of_node_put(rmu_node); goto err_rmu; } + of_node_put(rmu_node); rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs)); if (!rmu_regs_win) { dev_err(&dev->dev, "Unable to map rmu register window\n"); diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c index 68fd2540b093..7fa520efcefa 100644 --- a/arch/powerpc/sysdev/xics/icp-opal.c +++ b/arch/powerpc/sysdev/xics/icp-opal.c @@ -195,6 +195,7 @@ int icp_opal_init(void) printk("XICS: Using OPAL ICP fallbacks\n"); + of_node_put(np); return 0; } diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index b57eeaff7bb3..38e8b9896174 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -710,6 +710,7 @@ static bool xive_get_max_prio(u8 *max_prio) } reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len); + of_node_put(rootdn); if (!reg) { pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n"); return false; diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 1b894c327578..557c4a8c4087 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -35,7 +35,7 @@ config RISCV select CLINT_TIMER if !MMU select COMMON_CLK select EDAC_SUPPORT - select GENERIC_ARCH_TOPOLOGY if SMP + select GENERIC_ARCH_TOPOLOGY select GENERIC_ATOMIC64 if !64BIT select GENERIC_CLOCKEVENTS select GENERIC_EARLY_IOREMAP diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index db9505c658ea..1bb1bf1141cc 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -37,6 +37,7 @@ else endif ifeq ($(CONFIG_LD_IS_LLD),y) +ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 150000; echo $$?),0) KBUILD_CFLAGS += -mno-relax KBUILD_AFLAGS += -mno-relax ifneq ($(LLVM_IAS),1) @@ -44,6 +45,7 @@ ifneq ($(LLVM_IAS),1) KBUILD_AFLAGS += -Wa,-mno-relax endif endif +endif # ISA string setting riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima @@ -73,6 +75,7 @@ ifeq ($(CONFIG_PERF_EVENTS),y) endif KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) +KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax) # GCC versions that support the "-mstrict-align" option default to allowing # unaligned accesses. While unaligned accesses are explicitly allowed in the diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi index 7db861053483..64c06c9b41dc 100644 --- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi +++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi @@ -166,7 +166,7 @@ clocks = <&prci PRCI_CLK_TLCLK>; status = "disabled"; }; - dma: dma@3000000 { + dma: dma-controller@3000000 { compatible = "sifive,fu540-c000-pdma"; reg = <0x0 0x3000000 0x0 0x8000>; interrupt-parent = <&plic0>; diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts index 60846e88ae4b..dddabfbbc7a9 100644 --- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts +++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts @@ -3,6 +3,8 @@ #include "fu540-c000.dtsi" #include +#include +#include /* Clock frequency (in Hz) of the PCB crystal for rtcclk */ #define RTCCLK_FREQ 1000000 @@ -46,6 +48,42 @@ compatible = "gpio-restart"; gpios = <&gpio 10 GPIO_ACTIVE_LOW>; }; + + led-controller { + compatible = "pwm-leds"; + + led-d1 { + pwms = <&pwm0 0 7812500 PWM_POLARITY_INVERTED>; + active-low; + color = ; + max-brightness = <255>; + label = "d1"; + }; + + led-d2 { + pwms = <&pwm0 1 7812500 PWM_POLARITY_INVERTED>; + active-low; + color = ; + max-brightness = <255>; + label = "d2"; + }; + + led-d3 { + pwms = <&pwm0 2 7812500 PWM_POLARITY_INVERTED>; + active-low; + color = ; + max-brightness = <255>; + label = "d3"; + }; + + led-d4 { + pwms = <&pwm0 3 7812500 PWM_POLARITY_INVERTED>; + active-low; + color = ; + max-brightness = <255>; + label = "d4"; + }; + }; }; &uart0 { diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index c025a746a148..391dd869db64 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -114,9 +114,9 @@ __io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr)) __io_reads_ins(ins, u8, b, __io_pbr(), __io_par(addr)) __io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr)) __io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr)) -#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count) -#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count) -#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count) +#define insb(addr, buffer, count) __insb(PCI_IOBASE + (addr), buffer, count) +#define insw(addr, buffer, count) __insw(PCI_IOBASE + (addr), buffer, count) +#define insl(addr, buffer, count) __insl(PCI_IOBASE + (addr), buffer, count) __io_writes_outs(writes, u8, b, __io_bw(), __io_aw()) __io_writes_outs(writes, u16, w, __io_bw(), __io_aw()) @@ -128,22 +128,22 @@ __io_writes_outs(writes, u32, l, __io_bw(), __io_aw()) __io_writes_outs(outs, u8, b, __io_pbw(), __io_paw()) __io_writes_outs(outs, u16, w, __io_pbw(), __io_paw()) __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw()) -#define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count) -#define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count) -#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count) +#define outsb(addr, buffer, count) __outsb(PCI_IOBASE + (addr), buffer, count) +#define outsw(addr, buffer, count) __outsw(PCI_IOBASE + (addr), buffer, count) +#define outsl(addr, buffer, count) __outsl(PCI_IOBASE + (addr), buffer, count) #ifdef CONFIG_64BIT __io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr)) #define readsq(addr, buffer, count) __readsq(addr, buffer, count) __io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr)) -#define insq(addr, buffer, count) __insq((void __iomem *)addr, buffer, count) +#define insq(addr, buffer, count) __insq(PCI_IOBASE + (addr), buffer, count) __io_writes_outs(writes, u64, q, __io_bw(), __io_aw()) #define writesq(addr, buffer, count) __writesq(addr, buffer, count) __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) -#define outsq(addr, buffer, count) __outsq((void __iomem *)addr, buffer, count) +#define outsq(addr, buffer, count) __outsq(PCI_IOBASE + (addr), buffer, count) #endif #include diff --git a/arch/riscv/include/asm/irq_work.h b/arch/riscv/include/asm/irq_work.h index d6c277992f76..b53891964ae0 100644 --- a/arch/riscv/include/asm/irq_work.h +++ b/arch/riscv/include/asm/irq_work.h @@ -4,7 +4,7 @@ static inline bool arch_irq_work_has_interrupt(void) { - return true; + return IS_ENABLED(CONFIG_SMP); } extern void arch_irq_work_raise(void); #endif /* _ASM_RISCV_IRQ_WORK_H */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 73e8b5e5bb65..b16304fdf448 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -470,6 +470,7 @@ extern void *dtb_early_va; extern uintptr_t dtb_early_pa; void setup_bootmem(void); void paging_init(void); +void misc_mem_init(void); #define FIRST_USER_ADDRESS 0 diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index d79ae9d98999..c55e0a1f07a0 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -80,6 +80,7 @@ struct thread_info { #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing */ #define TIF_SECCOMP 8 /* syscall secure computing */ +#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) @@ -88,9 +89,11 @@ struct thread_info { #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_WORK_MASK \ - (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED) + (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \ + _TIF_NOTIFY_SIGNAL) #define _TIF_SYSCALL_WORK \ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT | \ diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h index 81de51e6aa32..a06697846e69 100644 --- a/arch/riscv/include/asm/timex.h +++ b/arch/riscv/include/asm/timex.h @@ -41,7 +41,7 @@ static inline u32 get_cycles_hi(void) static inline unsigned long random_get_entropy(void) { if (unlikely(clint_time_val == NULL)) - return 0; + return random_get_entropy_fallback(); return get_cycles(); } #define random_get_entropy() random_get_entropy() diff --git a/arch/riscv/kernel/efi.c b/arch/riscv/kernel/efi.c index 024159298231..1aa540350abd 100644 --- a/arch/riscv/kernel/efi.c +++ b/arch/riscv/kernel/efi.c @@ -65,7 +65,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) if (md->attribute & EFI_MEMORY_RO) { val = pte_val(pte) & ~_PAGE_WRITE; - val = pte_val(pte) | _PAGE_READ; + val |= _PAGE_READ; pte = __pte(val); } if (md->attribute & EFI_MEMORY_XP) { diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 1a819c18bede..47d1411db0a9 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -261,6 +261,7 @@ clear_bss_done: REG_S a0, (a2) /* Initialize page tables and relocate to virtual addresses */ + la tp, init_task la sp, init_thread_union + THREAD_SIZE mv a0, s1 call setup_vm diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index 3fe7a5296aa5..1612e11f7bf6 100644 --- a/arch/riscv/kernel/patch.c +++ b/arch/riscv/kernel/patch.c @@ -100,7 +100,7 @@ static int patch_text_cb(void *data) struct patch_insn *patch = data; int ret = 0; - if (atomic_inc_return(&patch->cpu_count) == 1) { + if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) { ret = patch_text_nosync(patch->addr, &patch->insn, GET_INSN_LENGTH(patch->insn)); diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index dd5f985b1f40..7868050ff426 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -111,8 +111,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, { struct pt_regs *childregs = task_pt_regs(p); + memset(&p->thread.s, 0, sizeof(p->thread.s)); + /* p->thread holds context to be restored by __switch_to() */ - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* Kernel thread */ memset(childregs, 0, sizeof(struct pt_regs)); childregs->gp = gp_in_global; diff --git a/arch/riscv/kernel/reset.c b/arch/riscv/kernel/reset.c index ee5878d968cc..9c842c41684a 100644 --- a/arch/riscv/kernel/reset.c +++ b/arch/riscv/kernel/reset.c @@ -12,7 +12,7 @@ static void default_power_off(void) wait_for_interrupt(); } -void (*pm_power_off)(void) = default_power_off; +void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); void machine_restart(char *cmd) @@ -23,10 +23,16 @@ void machine_restart(char *cmd) void machine_halt(void) { - pm_power_off(); + if (pm_power_off != NULL) + pm_power_off(); + else + default_power_off(); } void machine_power_off(void) { - pm_power_off(); + if (pm_power_off != NULL) + pm_power_off(); + else + default_power_off(); } diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 117f3212a8e4..8e78a8ab6a34 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -54,10 +54,17 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); static void __init parse_dtb(void) { /* Early scan of device tree from init memory */ - if (early_init_dt_scan(dtb_early_va)) - return; + if (early_init_dt_scan(dtb_early_va)) { + const char *name = of_flat_dt_get_machine_name(); + + if (name) { + pr_info("Machine model: %s\n", name); + dump_stack_set_arch_desc("%s (DT)", name); + } + } else { + pr_err("No DTB passed to the kernel\n"); + } - pr_err("No DTB passed to the kernel\n"); #ifdef CONFIG_CMDLINE_FORCE strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); pr_info("Forcing kernel command line to: %s\n", boot_command_line); @@ -89,6 +96,8 @@ void __init setup_arch(char **cmdline_p) else pr_err("No DTB found in kernel mappings\n"); #endif + early_init_fdt_scan_reserved_mem(); + misc_mem_init(); #ifdef CONFIG_SWIOTLB swiotlb_init(1); diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index bc6841867b51..50a8225c58bc 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -121,6 +121,8 @@ SYSCALL_DEFINE0(rt_sigreturn) if (restore_altstack(&frame->uc.uc_stack)) goto badframe; + regs->cause = -1UL; + return regs->a0; badframe: @@ -310,7 +312,7 @@ asmlinkage __visible void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { /* Handle pending signal delivery */ - if (thread_info_flags & _TIF_SIGPENDING) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 0b04e0eae3ab..0e0aed380e28 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -46,6 +46,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) int cpuid; int ret; + store_cpu_topology(smp_processor_id()); + /* This covers non-smp usecase mandated by "nosmp" option */ if (max_cpus == 0) return; @@ -152,8 +154,8 @@ asmlinkage __visible void smp_callin(void) mmgrab(mm); current->active_mm = mm; + store_cpu_topology(curr_cpuid); notify_cpu_starting(curr_cpuid); - update_siblings_masks(curr_cpuid); set_cpu_online(curr_cpuid, 1); /* diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index 12f8a7fce78b..bb402685057a 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c @@ -18,10 +18,6 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len, if (unlikely(offset & (~PAGE_MASK >> page_shift_offset))) return -EINVAL; - if ((prot & PROT_WRITE) && (prot & PROT_EXEC)) - if (unlikely(!(prot & PROT_READ))) - return -EINVAL; - return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> (PAGE_SHIFT - page_shift_offset)); } diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index ad14f4466d92..c1a13011fb8e 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -43,6 +44,9 @@ void die(struct pt_regs *regs, const char *str) ret = notify_die(DIE_OOPS, str, regs, 0, regs->cause, SIGSEGV); + if (regs && kexec_should_crash(current)) + crash_kexec(regs); + bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); spin_unlock_irq(&die_lock); diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 24d936c147cd..f4ac7ff56bce 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -17,6 +17,7 @@ vdso-syms += flush_icache obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o ccflags-y := -fno-stack-protector +ccflags-y += -DDISABLE_BRANCH_PROFILING ifneq ($(c-gettimeofday-y),) CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y) @@ -28,9 +29,12 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) obj-y += vdso.o vdso-syms.o CPPFLAGS_vdso.lds += -P -C -U$(ARCH) +ifneq ($(filter vgettimeofday, $(vdso-syms)),) +CPPFLAGS_vdso.lds += -DHAS_VGETTIMEOFDAY +endif # Disable -pg to prevent insert call site -CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os +CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) # Disable profiling and instrumentation for VDSO code GCOV_PROFILE := n diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S index e6f558bca71b..b3e58402c342 100644 --- a/arch/riscv/kernel/vdso/vdso.lds.S +++ b/arch/riscv/kernel/vdso/vdso.lds.S @@ -64,9 +64,11 @@ VERSION LINUX_4.15 { global: __vdso_rt_sigreturn; +#ifdef HAS_VGETTIMEOFDAY __vdso_gettimeofday; __vdso_clock_gettime; __vdso_clock_getres; +#endif __vdso_getcpu; __vdso_flush_icache; local: *; diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 3c8b9e433c67..8f84bbe0ac33 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -167,7 +167,8 @@ static inline bool access_error(unsigned long cause, struct vm_area_struct *vma) } break; case EXC_LOAD_PAGE_FAULT: - if (!(vma->vm_flags & VM_READ)) { + /* Write implies read */ + if (!(vma->vm_flags & (VM_READ | VM_WRITE))) { return true; } break; diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index e8921e78a292..6c2f38aac544 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -41,13 +42,14 @@ struct pt_alloc_ops { #endif }; +static phys_addr_t dma32_phys_limit __ro_after_init; + static void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; #ifdef CONFIG_ZONE_DMA32 - max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, - (unsigned long) PFN_PHYS(max_low_pfn))); + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit); #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; @@ -193,6 +195,7 @@ void __init setup_bootmem(void) max_pfn = PFN_DOWN(dram_end); max_low_pfn = max_pfn; + dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); set_max_mapnr(max_low_pfn); #ifdef CONFIG_BLK_DEV_INITRD @@ -205,7 +208,7 @@ void __init setup_bootmem(void) */ memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); - early_init_fdt_scan_reserved_mem(); + dma_contiguous_reserve(dma32_phys_limit); memblock_allow_resize(); memblock_dump_all(); } @@ -665,8 +668,12 @@ static void __init resource_init(void) void __init paging_init(void) { setup_vm_final(); - sparse_init(); setup_zero_page(); +} + +void __init misc_mem_init(void) +{ + sparse_init(); zone_sizes_init(); resource_init(); } diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c index 19fecb362d81..09f6be19ba7b 100644 --- a/arch/riscv/mm/pageattr.c +++ b/arch/riscv/mm/pageattr.c @@ -118,10 +118,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, if (!numpages) return 0; - mmap_read_lock(&init_mm); + mmap_write_lock(&init_mm); ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, &masks); - mmap_read_unlock(&init_mm); + mmap_write_unlock(&init_mm); flush_tlb_kernel_range(start, end); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 896b68e541b2..878993982e39 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -507,7 +507,6 @@ config KEXEC config KEXEC_FILE bool "kexec file based system call" select KEXEC_CORE - select BUILD_BIN2C depends on CRYPTO depends on CRYPTO_SHA256 depends on CRYPTO_SHA256_S390 diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 92506918da63..a8cb00f30a7c 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -32,6 +32,16 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g) KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,)) + +ifdef CONFIG_CC_IS_GCC + ifeq ($(call cc-ifversion, -ge, 1200, y), y) + ifeq ($(call cc-ifversion, -lt, 1300, y), y) + KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds) + KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, array-bounds) + endif + endif +endif + UTS_MACHINE := s390x STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384) CHECKFLAGS += -D__s390__ -D__s390x__ diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S index 9427e2cd0c15..11bf3919610e 100644 --- a/arch/s390/boot/compressed/vmlinux.lds.S +++ b/arch/s390/boot/compressed/vmlinux.lds.S @@ -91,8 +91,17 @@ SECTIONS _compressed_start = .; *(.vmlinux.bin.compressed) _compressed_end = .; - FILL(0xff); - . = ALIGN(4096); + } + +#define SB_TRAILER_SIZE 32 + /* Trailer needed for Secure Boot */ + . += SB_TRAILER_SIZE; /* make sure .sb.trailer does not overwrite the previous section */ + . = ALIGN(4096) - SB_TRAILER_SIZE; + .sb.trailer : { + QUAD(0) + QUAD(0) + QUAD(0) + QUAD(0x000000207a49504c) } _end = .; diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 54c7536f2482..1023e9d43d44 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -701,7 +701,7 @@ static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw, unsigned int nbytes) { gw->walk_bytes_remain -= nbytes; - scatterwalk_unmap(&gw->walk); + scatterwalk_unmap(gw->walk_ptr); scatterwalk_advance(&gw->walk, nbytes); scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); gw->walk_ptr = NULL; @@ -776,7 +776,7 @@ static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) goto out; } - scatterwalk_unmap(&gw->walk); + scatterwalk_unmap(gw->walk_ptr); gw->walk_ptr = NULL; gw->ptr = gw->buf; diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c index 4cbb4b6d85a8..1f2d40993c4d 100644 --- a/arch/s390/crypto/arch_random.c +++ b/arch/s390/crypto/arch_random.c @@ -2,126 +2,17 @@ /* * s390 arch random implementation. * - * Copyright IBM Corp. 2017, 2018 + * Copyright IBM Corp. 2017, 2020 * Author(s): Harald Freudenberger - * - * The s390_arch_random_generate() function may be called from random.c - * in interrupt context. So this implementation does the best to be very - * fast. There is a buffer of random data which is asynchronously checked - * and filled by a workqueue thread. - * If there are enough bytes in the buffer the s390_arch_random_generate() - * just delivers these bytes. Otherwise false is returned until the - * worker thread refills the buffer. - * The worker fills the rng buffer by pulling fresh entropy from the - * high quality (but slow) true hardware random generator. This entropy - * is then spread over the buffer with an pseudo random generator PRNG. - * As the arch_get_random_seed_long() fetches 8 bytes and the calling - * function add_interrupt_randomness() counts this as 1 bit entropy the - * distribution needs to make sure there is in fact 1 bit entropy contained - * in 8 bytes of the buffer. The current values pull 32 byte entropy - * and scatter this into a 2048 byte buffer. So 8 byte in the buffer - * will contain 1 bit of entropy. - * The worker thread is rescheduled based on the charge level of the - * buffer but at least with 500 ms delay to avoid too much CPU consumption. - * So the max. amount of rng data delivered via arch_get_random_seed is - * limited to 4k bytes per second. */ #include #include #include -#include #include -#include #include DEFINE_STATIC_KEY_FALSE(s390_arch_random_available); atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0); EXPORT_SYMBOL(s390_arch_random_counter); - -#define ARCH_REFILL_TICKS (HZ/2) -#define ARCH_PRNG_SEED_SIZE 32 -#define ARCH_RNG_BUF_SIZE 2048 - -static DEFINE_SPINLOCK(arch_rng_lock); -static u8 *arch_rng_buf; -static unsigned int arch_rng_buf_idx; - -static void arch_rng_refill_buffer(struct work_struct *); -static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer); - -bool s390_arch_random_generate(u8 *buf, unsigned int nbytes) -{ - /* max hunk is ARCH_RNG_BUF_SIZE */ - if (nbytes > ARCH_RNG_BUF_SIZE) - return false; - - /* lock rng buffer */ - if (!spin_trylock(&arch_rng_lock)) - return false; - - /* try to resolve the requested amount of bytes from the buffer */ - arch_rng_buf_idx -= nbytes; - if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) { - memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes); - atomic64_add(nbytes, &s390_arch_random_counter); - spin_unlock(&arch_rng_lock); - return true; - } - - /* not enough bytes in rng buffer, refill is done asynchronously */ - spin_unlock(&arch_rng_lock); - - return false; -} -EXPORT_SYMBOL(s390_arch_random_generate); - -static void arch_rng_refill_buffer(struct work_struct *unused) -{ - unsigned int delay = ARCH_REFILL_TICKS; - - spin_lock(&arch_rng_lock); - if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) { - /* buffer is exhausted and needs refill */ - u8 seed[ARCH_PRNG_SEED_SIZE]; - u8 prng_wa[240]; - /* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */ - cpacf_trng(NULL, 0, seed, sizeof(seed)); - /* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */ - memset(prng_wa, 0, sizeof(prng_wa)); - cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, - &prng_wa, NULL, 0, seed, sizeof(seed)); - cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, - &prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE, NULL, 0); - arch_rng_buf_idx = ARCH_RNG_BUF_SIZE; - } - delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) / ARCH_RNG_BUF_SIZE; - spin_unlock(&arch_rng_lock); - - /* kick next check */ - queue_delayed_work(system_long_wq, &arch_rng_work, delay); -} - -static int __init s390_arch_random_init(void) -{ - /* all the needed PRNO subfunctions available ? */ - if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) && - cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) { - - /* alloc arch random working buffer */ - arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL); - if (!arch_rng_buf) - return -ENOMEM; - - /* kick worker queue job to fill the random buffer */ - queue_delayed_work(system_long_wq, - &arch_rng_work, ARCH_REFILL_TICKS); - - /* enable arch random to the outside world */ - static_branch_enable(&s390_arch_random_available); - } - - return 0; -} -arch_initcall(s390_arch_random_init); diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index f0bc4dc3e9bf..6511d15ace45 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -437,7 +437,7 @@ __init int hypfs_diag_init(void) int rc; if (diag204_probe()) { - pr_err("The hardware system does not support hypfs\n"); + pr_info("The hardware system does not support hypfs\n"); return -ENODATA; } diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 5c97f48cea91..ee919bfc8186 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -496,9 +496,9 @@ fail_hypfs_sprp_exit: hypfs_vm_exit(); fail_hypfs_diag_exit: hypfs_diag_exit(); + pr_err("Initialization of hypfs failed with rc=%i\n", rc); fail_dbfs_exit: hypfs_dbfs_exit(); - pr_err("Initialization of hypfs failed with rc=%i\n", rc); return rc; } device_initcall(hypfs_init) diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h index de61ce562052..4120c428dc37 100644 --- a/arch/s390/include/asm/archrandom.h +++ b/arch/s390/include/asm/archrandom.h @@ -2,7 +2,7 @@ /* * Kernel interface for the s390 arch_random_* functions * - * Copyright IBM Corp. 2017 + * Copyright IBM Corp. 2017, 2022 * * Author: Harald Freudenberger * @@ -14,13 +14,13 @@ #ifdef CONFIG_ARCH_RANDOM #include +#include #include +#include DECLARE_STATIC_KEY_FALSE(s390_arch_random_available); extern atomic64_t s390_arch_random_counter; -bool s390_arch_random_generate(u8 *buf, unsigned int nbytes); - static inline bool __must_check arch_get_random_long(unsigned long *v) { return false; @@ -33,16 +33,22 @@ static inline bool __must_check arch_get_random_int(unsigned int *v) static inline bool __must_check arch_get_random_seed_long(unsigned long *v) { - if (static_branch_likely(&s390_arch_random_available)) { - return s390_arch_random_generate((u8 *)v, sizeof(*v)); + if (static_branch_likely(&s390_arch_random_available) && + in_task()) { + cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v)); + atomic64_add(sizeof(*v), &s390_arch_random_counter); + return true; } return false; } static inline bool __must_check arch_get_random_seed_int(unsigned int *v) { - if (static_branch_likely(&s390_arch_random_available)) { - return s390_arch_random_generate((u8 *)v, sizeof(*v)); + if (static_branch_likely(&s390_arch_random_available) && + in_task()) { + cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v)); + atomic64_add(sizeof(*v), &s390_arch_random_counter); + return true; } return false; } diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index 26f9144562c9..e1d0b2aaaddd 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h @@ -16,7 +16,8 @@ "3: jl 1b\n" \ " lhi %0,0\n" \ "4: sacf 768\n" \ - EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ + EX_TABLE(0b,4b) EX_TABLE(1b,4b) \ + EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ "=m" (*uaddr) \ : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index 40264f60b0da..f4073106e1f3 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -148,4 +148,6 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4], unsigned long gaddr, unsigned long vmaddr); int gmap_mark_unmergeable(void); void s390_reset_acc(struct mm_struct *mm); +void s390_unlist_old_asce(struct gmap *gmap); +int s390_replace_asce(struct gmap *gmap); #endif /* _ASM_S390_GMAP_H */ diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index 60f9241e5e4a..d3642fb634bd 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h @@ -28,9 +28,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, static inline int prepare_hugepage_range(struct file *file, unsigned long addr, unsigned long len) { - if (len & ~HPAGE_MASK) + struct hstate *h = hstate_file(file); + + if (len & ~huge_page_mask(h)) return -EINVAL; - if (addr & ~HPAGE_MASK) + if (addr & ~huge_page_mask(h)) return -EINVAL; return 0; } diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index 7f3c9ac34bd8..63098df81c9f 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h @@ -9,6 +9,8 @@ #ifndef _S390_KEXEC_H #define _S390_KEXEC_H +#include + #include #include #include @@ -83,4 +85,12 @@ struct kimage_arch { extern const struct kexec_file_ops s390_kexec_image_ops; extern const struct kexec_file_ops s390_kexec_elf_ops; +#ifdef CONFIG_KEXEC_FILE +struct purgatory_info; +int arch_kexec_apply_relocations_add(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); +#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add +#endif #endif /*_S390_KEXEC_H */ diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h index b5f545db461a..60e101b8460c 100644 --- a/arch/s390/include/asm/preempt.h +++ b/arch/s390/include/asm/preempt.h @@ -46,10 +46,17 @@ static inline bool test_preempt_need_resched(void) static inline void __preempt_count_add(int val) { - if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) - __atomic_add_const(val, &S390_lowcore.preempt_count); - else - __atomic_add(val, &S390_lowcore.preempt_count); + /* + * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES + * enabled, gcc 12 fails to handle __builtin_constant_p(). + */ + if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) { + if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) { + __atomic_add_const(val, &S390_lowcore.preempt_count); + return; + } + } + __atomic_add(val, &S390_lowcore.preempt_count); } static inline void __preempt_count_sub(int val) diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 13a04fcf7762..0045341ade48 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -65,6 +65,7 @@ void arch_setup_new_exec(void); #define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */ #define TIF_PATCH_PENDING 5 /* pending live patching update */ #define TIF_PGSTE 6 /* New mm's will use 4K page tables */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ #define TIF_ISOLATE_BP 8 /* Run process with isolated BP */ #define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */ @@ -82,6 +83,7 @@ void arch_setup_new_exec(void); #define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */ #define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME) +#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL) #define _TIF_SIGPENDING BIT(TIF_SIGPENDING) #define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED) #define _TIF_UPROBE BIT(TIF_UPROBE) diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 289aaff4d365..588aa0f2c842 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -172,6 +172,7 @@ static inline cycles_t get_cycles(void) { return (cycles_t) get_tod_clock() >> 2; } +#define get_cycles get_cycles int get_phys_clock(unsigned long *clock); void init_cpu_timer(void); diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 483051e10db3..e070073930a9 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -150,6 +150,8 @@ int main(void) OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline); /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ OFFSET(__LC_DUMP_REIPL, lowcore, ipib); + OFFSET(__LC_VMCORE_INFO, lowcore, vmcore_info); + OFFSET(__LC_OS_INFO, lowcore, os_info); /* hardware defined lowcore locations 0x1000 - 0x18ff */ OFFSET(__LC_MCESAD, lowcore, mcesad); OFFSET(__LC_EXT_PARAMS2, lowcore, ext_params2); diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index 205b2e2648aa..f292c3e10671 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -44,7 +44,7 @@ struct save_area { u64 fprs[16]; u32 fpc; u32 prefix; - u64 todpreg; + u32 todpreg; u64 timer; u64 todcmp; u64 vxrs_low[16]; @@ -432,7 +432,7 @@ static void *get_vmcoreinfo_old(unsigned long *size) Elf64_Nhdr note; void *addr; - if (copy_oldmem_kernel(&addr, &S390_lowcore.vmcore_info, sizeof(addr))) + if (copy_oldmem_kernel(&addr, (void *)__LC_VMCORE_INFO, sizeof(addr))) return NULL; memset(nt_name, 0, sizeof(nt_name)); if (copy_oldmem_kernel(¬e, addr, sizeof(note))) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 963e8cb936e2..88ecbcf097a3 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -52,7 +52,8 @@ STACK_SIZE = 1 << STACK_SHIFT STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ - _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING) + _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING | \ + _TIF_NOTIFY_SIGNAL) _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_SYSCALL_TRACEPOINT) _CIF_WORK = (_CIF_ASCE_PRIMARY | _CIF_ASCE_SECONDARY | _CIF_FPU) @@ -481,8 +482,8 @@ ENTRY(system_call) #endif TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART jo .Lsysc_syscall_restart - TSTMSK __TI_flags(%r12),_TIF_SIGPENDING - jo .Lsysc_sigpending + TSTMSK __TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL) + jnz .Lsysc_sigpending TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME jo .Lsysc_notify_resume TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) @@ -863,8 +864,8 @@ ENTRY(io_int_handler) TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING jo .Lio_patch_pending #endif - TSTMSK __TI_flags(%r12),_TIF_SIGPENDING - jo .Lio_sigpending + TSTMSK __TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL) + jnz .Lio_sigpending TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME jo .Lio_notify_resume TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c index 76cd09879eaf..53da174754d9 100644 --- a/arch/s390/kernel/machine_kexec_file.c +++ b/arch/s390/kernel/machine_kexec_file.c @@ -29,6 +29,7 @@ int s390_verify_sig(const char *kernel, unsigned long kernel_len) const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1; struct module_signature *ms; unsigned long sig_len; + int ret; /* Skip signature verification when not secure IPLed. */ if (!ipl_secure_flag) @@ -63,11 +64,18 @@ int s390_verify_sig(const char *kernel, unsigned long kernel_len) return -EBADMSG; } - return verify_pkcs7_signature(kernel, kernel_len, - kernel + kernel_len, sig_len, - VERIFY_USE_PLATFORM_KEYRING, - VERIFYING_MODULE_SIGNATURE, - NULL, NULL); + ret = verify_pkcs7_signature(kernel, kernel_len, + kernel + kernel_len, sig_len, + VERIFY_USE_SECONDARY_KEYRING, + VERIFYING_MODULE_SIGNATURE, + NULL, NULL); + if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING)) + ret = verify_pkcs7_signature(kernel, kernel_len, + kernel + kernel_len, sig_len, + VERIFY_USE_PLATFORM_KEYRING, + VERIFYING_MODULE_SIGNATURE, + NULL, NULL); + return ret; } #endif /* CONFIG_KEXEC_SIG */ diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c index 0a5e4bafb6ad..1b8e2aff20e3 100644 --- a/arch/s390/kernel/os_info.c +++ b/arch/s390/kernel/os_info.c @@ -15,6 +15,7 @@ #include #include #include +#include /* * OS info structure has to be page aligned @@ -123,7 +124,7 @@ static void os_info_old_init(void) return; if (!OLDMEM_BASE) goto fail; - if (copy_oldmem_kernel(&addr, &S390_lowcore.os_info, sizeof(addr))) + if (copy_oldmem_kernel(&addr, (void *)__LC_OS_INFO, sizeof(addr))) goto fail; if (addr == 0 || addr % PAGE_SIZE) goto fail; diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 0eb1d1cc53a8..dddb32e53db8 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -292,6 +292,26 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type) return err; } +/* Events CPU_CYLCES and INSTRUCTIONS can be submitted with two different + * attribute::type values: + * - PERF_TYPE_HARDWARE: + * - pmu->type: + * Handle both type of invocations identical. They address the same hardware. + * The result is different when event modifiers exclude_kernel and/or + * exclude_user are also set. + */ +static int cpumf_pmu_event_type(struct perf_event *event) +{ + u64 ev = event->attr.config; + + if (cpumf_generic_events_basic[PERF_COUNT_HW_CPU_CYCLES] == ev || + cpumf_generic_events_basic[PERF_COUNT_HW_INSTRUCTIONS] == ev || + cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev || + cpumf_generic_events_user[PERF_COUNT_HW_INSTRUCTIONS] == ev) + return PERF_TYPE_HARDWARE; + return PERF_TYPE_RAW; +} + static int cpumf_pmu_event_init(struct perf_event *event) { unsigned int type = event->attr.type; @@ -301,7 +321,7 @@ static int cpumf_pmu_event_init(struct perf_event *event) err = __hw_perf_event_init(event, type); else if (event->pmu->type == type) /* Registered as unknown PMU */ - err = __hw_perf_event_init(event, PERF_TYPE_RAW); + err = __hw_perf_event_init(event, cpumf_pmu_event_type(event)); else return -ENOENT; diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index 1e75cc983546..b922dc0c8130 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -51,7 +51,7 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs) if (!stack) return NULL; - return (struct kvm_s390_sie_block *) stack->empty1[0]; + return (struct kvm_s390_sie_block *)stack->empty1[1]; } static bool is_in_guest(struct pt_regs *regs) diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index ec801d3bbb37..bd7da4049707 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -77,6 +77,18 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) memcpy(dst, src, arch_task_struct_size); dst->thread.fpu.regs = dst->thread.fpu.fprs; + + /* + * Don't transfer over the runtime instrumentation or the guarded + * storage control block pointers. These fields are cleared here instead + * of in copy_thread() to avoid premature freeing of associated memory + * on fork() failure. Wait to clear the RI flag because ->stack still + * refers to the source thread. + */ + dst->thread.ri_cb = NULL; + dst->thread.gs_cb = NULL; + dst->thread.gs_bc_cb = NULL; + return 0; } @@ -115,7 +127,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, frame->sf.gprs[9] = (unsigned long) frame; /* Store access registers to kernel stack of new process. */ - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { /* kernel thread */ memset(&frame->childregs, 0, sizeof(struct pt_regs)); frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | @@ -134,13 +146,11 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, frame->childregs.flags = 0; if (new_stackp) frame->childregs.gprs[15] = new_stackp; - - /* Don't copy runtime instrumentation info */ - p->thread.ri_cb = NULL; + /* + * Clear the runtime instrumentation flag after the above childregs + * copy. The CB pointer was already cleared in arch_dup_task_struct(). + */ frame->childregs.psw.mask &= ~PSW_MASK_RI; - /* Don't copy guarded storage control block */ - p->thread.gs_cb = NULL; - p->thread.gs_bc_cb = NULL; /* Set a new TLS ? */ if (clone_flags & CLONE_SETTLS) { diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index f9f8721dc532..520cf5a152cf 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -1009,6 +1009,11 @@ static void __init setup_randomness(void) if (stsi(vmms, 3, 2, 2) == 0 && vmms->count) add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count); memblock_free((unsigned long) vmms, PAGE_SIZE); + +#ifdef CONFIG_ARCH_RANDOM + if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG)) + static_branch_enable(&s390_arch_random_available); +#endif } /* diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 9e900a8977bd..b27b6c1f058d 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -472,7 +472,7 @@ void do_signal(struct pt_regs *regs) current->thread.system_call = test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0; - if (get_signal(&ksig)) { + if (test_thread_flag(TIF_SIGPENDING) && get_signal(&ksig)) { /* Whee! Actually deliver the signal. */ if (current->thread.system_call) { regs->int_code = current->thread.system_call; diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 177ccfbda40a..9505bdb0aa54 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -122,6 +122,7 @@ SECTIONS /* * Table with the patch locations to undo expolines */ + . = ALIGN(4); .nospec_call_table : { __nospec_call_start = . ; *(.s390_indirect*) diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index e7a7c499a73f..77909d362b78 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -521,12 +521,27 @@ static int handle_pv_uvc(struct kvm_vcpu *vcpu) static int handle_pv_notification(struct kvm_vcpu *vcpu) { + int ret; + if (vcpu->arch.sie_block->ipa == 0xb210) return handle_pv_spx(vcpu); if (vcpu->arch.sie_block->ipa == 0xb220) return handle_pv_sclp(vcpu); if (vcpu->arch.sie_block->ipa == 0xb9a4) return handle_pv_uvc(vcpu); + if (vcpu->arch.sie_block->ipa >> 8 == 0xae) { + /* + * Besides external call, other SIGP orders also cause a + * 108 (pv notify) intercept. In contrast to external call, + * these orders need to be emulated and hence the appropriate + * place to handle them is in handle_instruction(). + * So first try kvm_s390_handle_sigp_pei() and if that isn't + * successful, go on with handle_instruction(). + */ + ret = kvm_s390_handle_sigp_pei(vcpu); + if (!ret) + return ret; + } return handle_instruction(vcpu); } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d8e9239c24ff..59db85fb63e1 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1092,6 +1092,8 @@ static int kvm_s390_vm_get_migration(struct kvm *kvm, return 0; } +static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod); + static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) { struct kvm_s390_vm_tod_clock gtod; @@ -1101,7 +1103,7 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) return -EINVAL; - kvm_s390_set_tod_clock(kvm, >od); + __kvm_s390_set_tod_clock(kvm, >od); VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", gtod.epoch_idx, gtod.tod); @@ -1132,7 +1134,7 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) sizeof(gtod.tod))) return -EFAULT; - kvm_s390_set_tod_clock(kvm, >od); + __kvm_s390_set_tod_clock(kvm, >od); VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); return 0; } @@ -1144,6 +1146,16 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) if (attr->flags) return -EINVAL; + mutex_lock(&kvm->lock); + /* + * For protected guests, the TOD is managed by the ultravisor, so trying + * to change it will never bring the expected results. + */ + if (kvm_s390_pv_is_protected(kvm)) { + ret = -EOPNOTSUPP; + goto out_unlock; + } + switch (attr->attr) { case KVM_S390_VM_TOD_EXT: ret = kvm_s390_set_tod_ext(kvm, attr); @@ -1158,6 +1170,9 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) ret = -ENXIO; break; } + +out_unlock: + mutex_unlock(&kvm->lock); return ret; } @@ -3862,14 +3877,12 @@ retry: return 0; } -void kvm_s390_set_tod_clock(struct kvm *kvm, - const struct kvm_s390_vm_tod_clock *gtod) +static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) { struct kvm_vcpu *vcpu; struct kvm_s390_tod_clock_ext htod; int i; - mutex_lock(&kvm->lock); preempt_disable(); get_tod_clock_ext((char *)&htod); @@ -3890,7 +3903,15 @@ void kvm_s390_set_tod_clock(struct kvm *kvm, kvm_s390_vcpu_unblock_all(kvm); preempt_enable(); +} + +int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) +{ + if (!mutex_trylock(&kvm->lock)) + return 0; + __kvm_s390_set_tod_clock(kvm, gtod); mutex_unlock(&kvm->lock); + return 1; } /** diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index a3e9b71d426f..b6ff64796af9 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -326,8 +326,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); /* implemented in kvm-s390.c */ -void kvm_s390_set_tod_clock(struct kvm *kvm, - const struct kvm_s390_vm_tod_clock *gtod); +int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod); long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 3b1a498e58d2..e34d518dd3d3 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -102,7 +102,20 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) return kvm_s390_inject_prog_cond(vcpu, rc); VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod); - kvm_s390_set_tod_clock(vcpu->kvm, >od); + /* + * To set the TOD clock the kvm lock must be taken, but the vcpu lock + * is already held in handle_set_clock. The usual lock order is the + * opposite. As SCK is deprecated and should not be used in several + * cases, for example when the multiple epoch facility or TOD clock + * steering facility is installed (see Principles of Operation), a + * slow path can be used. If the lock can not be taken via try_lock, + * the instruction will be retried via -EAGAIN at a later point in + * time. + */ + if (!kvm_s390_try_set_tod_clock(vcpu->kvm, >od)) { + kvm_s390_retry_instr(vcpu); + return -EAGAIN; + } kvm_s390_set_psw_cc(vcpu, 0); return 0; diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c index 822887887222..c0e00e94ee22 100644 --- a/arch/s390/kvm/pv.c +++ b/arch/s390/kvm/pv.c @@ -163,10 +163,13 @@ int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc) atomic_set(&kvm->mm->context.is_protected, 0); KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc); WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc); - /* Inteded memory leak on "impossible" error */ - if (!cc) + /* Intended memory leak on "impossible" error */ + if (!cc) { kvm_s390_pv_dealloc_vm(kvm); - return cc ? -EIO : 0; + return 0; + } + s390_replace_asce(kvm->arch.gmap); + return -EIO; } int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 3dc921e853b6..52800279686c 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -492,9 +492,9 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) struct kvm_vcpu *dest_vcpu; u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); - trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); - if (order_code == SIGP_EXTERNAL_CALL) { + trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); + dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); BUG_ON(dest_vcpu == NULL); diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 3fbf7081c000..ff58decfef5e 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -535,8 +535,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI)) scb_s->eca |= scb_o->eca & ECA_CEI; /* Epoch Extension */ - if (test_kvm_facility(vcpu->kvm, 139)) + if (test_kvm_facility(vcpu->kvm, 139)) { scb_s->ecd |= scb_o->ecd & ECD_MEF; + scb_s->epdx = scb_o->epdx; + } /* etoken */ if (test_kvm_facility(vcpu->kvm, 156)) diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index ed517fad0d03..1866374356c8 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -429,7 +429,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) flags = FAULT_FLAG_DEFAULT; if (user_mode(regs)) flags |= FAULT_FLAG_USER; - if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) + if ((trans_exc_code & store_indication) == 0x400) + access = VM_WRITE; + if (access == VM_WRITE) flags |= FAULT_FLAG_WRITE; mmap_read_lock(mm); diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index f2d19d40272c..03e561608eed 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2596,6 +2596,18 @@ static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr, return 0; } +/* + * Give a chance to schedule after setting a key to 256 pages. + * We only hold the mm lock, which is a rwsem and the kvm srcu. + * Both can sleep. + */ +static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + cond_resched(); + return 0; +} + static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, unsigned long hmask, unsigned long next, struct mm_walk *walk) @@ -2618,12 +2630,14 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, end = start + HPAGE_SIZE - 1; __storage_key_init_range(start, end); set_bit(PG_arch_1, &page->flags); + cond_resched(); return 0; } static const struct mm_walk_ops enable_skey_walk_ops = { .hugetlb_entry = __s390_enable_skey_hugetlb, .pte_entry = __s390_enable_skey_pte, + .pmd_entry = __s390_enable_skey_pmd, }; int s390_enable_skey(void) @@ -2707,3 +2721,89 @@ void s390_reset_acc(struct mm_struct *mm) mmput(mm); } EXPORT_SYMBOL_GPL(s390_reset_acc); + +/** + * s390_unlist_old_asce - Remove the topmost level of page tables from the + * list of page tables of the gmap. + * @gmap: the gmap whose table is to be removed + * + * On s390x, KVM keeps a list of all pages containing the page tables of the + * gmap (the CRST list). This list is used at tear down time to free all + * pages that are now not needed anymore. + * + * This function removes the topmost page of the tree (the one pointed to by + * the ASCE) from the CRST list. + * + * This means that it will not be freed when the VM is torn down, and needs + * to be handled separately by the caller, unless a leak is actually + * intended. Notice that this function will only remove the page from the + * list, the page will still be used as a top level page table (and ASCE). + */ +void s390_unlist_old_asce(struct gmap *gmap) +{ + struct page *old; + + old = virt_to_page(gmap->table); + spin_lock(&gmap->guest_table_lock); + list_del(&old->lru); + /* + * Sometimes the topmost page might need to be "removed" multiple + * times, for example if the VM is rebooted into secure mode several + * times concurrently, or if s390_replace_asce fails after calling + * s390_remove_old_asce and is attempted again later. In that case + * the old asce has been removed from the list, and therefore it + * will not be freed when the VM terminates, but the ASCE is still + * in use and still pointed to. + * A subsequent call to replace_asce will follow the pointer and try + * to remove the same page from the list again. + * Therefore it's necessary that the page of the ASCE has valid + * pointers, so list_del can work (and do nothing) without + * dereferencing stale or invalid pointers. + */ + INIT_LIST_HEAD(&old->lru); + spin_unlock(&gmap->guest_table_lock); +} +EXPORT_SYMBOL_GPL(s390_unlist_old_asce); + +/** + * s390_replace_asce - Try to replace the current ASCE of a gmap with a copy + * @gmap: the gmap whose ASCE needs to be replaced + * + * If the allocation of the new top level page table fails, the ASCE is not + * replaced. + * In any case, the old ASCE is always removed from the gmap CRST list. + * Therefore the caller has to make sure to save a pointer to it + * beforehand, unless a leak is actually intended. + */ +int s390_replace_asce(struct gmap *gmap) +{ + unsigned long asce; + struct page *page; + void *table; + + s390_unlist_old_asce(gmap); + + page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); + if (!page) + return -ENOMEM; + table = page_to_virt(page); + memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT)); + + /* + * The caller has to deal with the old ASCE, but here we make sure + * the new one is properly added to the CRST list, so that + * it will be freed when the VM is torn down. + */ + spin_lock(&gmap->guest_table_lock); + list_add(&page->lru, &gmap->crst_list); + spin_unlock(&gmap->guest_table_lock); + + /* Set new table origin while preserving existing ASCE control bits */ + asce = (gmap->asce & ~_ASCE_ORIGIN) | __pa(table); + WRITE_ONCE(gmap->asce, asce); + WRITE_ONCE(gmap->mm->context.gmap_asce, asce); + WRITE_ONCE(gmap->table, table); + + return 0; +} +EXPORT_SYMBOL_GPL(s390_replace_asce); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index fabaedddc90c..1c05caf68e7d 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -734,7 +734,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT; ptev = pte_val(*ptep); if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) - page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1); + page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0); pgste_set_unlock(ptep, pgste); preempt_enable(); } diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index e14e4a3a647a..74799439b259 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -69,6 +69,7 @@ struct zpci_dev *get_zdev_by_fid(u32 fid) list_for_each_entry(tmp, &zpci_list, entry) { if (tmp->fid == fid) { zdev = tmp; + zpci_zdev_get(zdev); break; } } diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h index 55c9488e504c..8d2fcd091ca7 100644 --- a/arch/s390/pci/pci_bus.h +++ b/arch/s390/pci/pci_bus.h @@ -13,7 +13,8 @@ void zpci_bus_device_unregister(struct zpci_dev *zdev); void zpci_release_device(struct kref *kref); static inline void zpci_zdev_put(struct zpci_dev *zdev) { - kref_put(&zdev->kref, zpci_release_device); + if (zdev) + kref_put(&zdev->kref, zpci_release_device); } static inline void zpci_zdev_get(struct zpci_dev *zdev) diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 0a0e8b8293be..d1a5c80a41cb 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -22,6 +22,8 @@ #include #include +#include "pci_bus.h" + bool zpci_unique_uid; void update_uid_checking(bool new) @@ -372,8 +374,11 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data) return; zdev = get_zdev_by_fid(entry->fid); - if (!zdev) - zpci_create_device(entry->fid, entry->fh, entry->config_state); + if (zdev) { + zpci_zdev_put(zdev); + return; + } + zpci_create_device(entry->fid, entry->fh, entry->config_state); } int clp_scan_pci_devices(void) diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index b7cfde7e80a8..6ced44b5be8a 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -61,10 +61,12 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf) pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid); if (!pdev) - return; + goto no_pdev; pdev->error_state = pci_channel_io_perm_failure; pci_dev_put(pdev); +no_pdev: + zpci_zdev_put(zdev); } void zpci_event_error(void *data) @@ -76,6 +78,7 @@ void zpci_event_error(void *data) static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) { struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); + bool existing_zdev = !!zdev; enum zpci_state state; struct pci_dev *pdev; int ret; @@ -161,6 +164,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) default: break; } + if (existing_zdev) + zpci_zdev_put(zdev); } void zpci_event_availability(void *data) diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 37b1bbd1a27c..1ec8076209ca 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -64,7 +64,7 @@ static inline int __pcistg_mio_inuser( asm volatile ( " sacf 256\n" "0: llgc %[tmp],0(%[src])\n" - " sllg %[val],%[val],8\n" + "4: sllg %[val],%[val],8\n" " aghi %[src],1\n" " ogr %[val],%[tmp]\n" " brctg %[cnt],0b\n" @@ -72,7 +72,7 @@ static inline int __pcistg_mio_inuser( "2: ipm %[cc]\n" " srl %[cc],28\n" "3: sacf 768\n" - EX_TABLE(0b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b) + EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b) : [src] "+a" (src), [cnt] "+d" (cnt), [val] "+d" (val), [tmp] "=d" (tmp), @@ -222,10 +222,10 @@ static inline int __pcilg_mio_inuser( "2: ahi %[shift],-8\n" " srlg %[tmp],%[val],0(%[shift])\n" "3: stc %[tmp],0(%[dst])\n" - " aghi %[dst],1\n" + "5: aghi %[dst],1\n" " brctg %[cnt],2b\n" "4: sacf 768\n" - EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) + EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b) : [cc] "+d" (cc), [val] "=d" (val), [len] "+d" (len), [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp), diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 6d5c6463bc07..de99a19e72d7 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -271,8 +271,12 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, #endif /* CONFIG_HAVE_IOREMAP_PROT */ #else /* CONFIG_MMU */ -#define iounmap(addr) do { } while (0) -#define ioremap(offset, size) ((void __iomem *)(unsigned long)(offset)) +static inline void __iomem *ioremap(phys_addr_t offset, size_t size) +{ + return (void __iomem *)(unsigned long)offset; +} + +static inline void iounmap(volatile void __iomem *addr) { } #endif /* CONFIG_MMU */ #define ioremap_uc ioremap diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h index 8edb824049b9..0cb0ca149ac3 100644 --- a/arch/sh/include/asm/sections.h +++ b/arch/sh/include/asm/sections.h @@ -4,7 +4,7 @@ #include -extern long __machvec_start, __machvec_end; +extern char __machvec_start[], __machvec_end[]; extern char __uncached_start, __uncached_end; extern char __start_eh_frame[], __stop_eh_frame[]; diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 243ea5150aa0..598d0184ffea 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -105,6 +105,7 @@ extern void init_thread_xstate(void); #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_NOTIFY_SIGNAL 3 /* signal notifications exist */ #define TIF_SINGLESTEP 4 /* singlestepping active */ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SECCOMP 6 /* secure computing */ @@ -116,6 +117,7 @@ extern void init_thread_xstate(void); #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) @@ -132,7 +134,7 @@ extern void init_thread_xstate(void); #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ _TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \ - _TIF_SYSCALL_TRACEPOINT) + _TIF_SYSCALL_TRACEPOINT | _TIF_NOTIFY_SIGNAL) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c index d606679a211e..57efaf5b82ae 100644 --- a/arch/sh/kernel/machvec.c +++ b/arch/sh/kernel/machvec.c @@ -20,8 +20,8 @@ #define MV_NAME_SIZE 32 #define for_each_mv(mv) \ - for ((mv) = (struct sh_machine_vector *)&__machvec_start; \ - (mv) && (unsigned long)(mv) < (unsigned long)&__machvec_end; \ + for ((mv) = (struct sh_machine_vector *)__machvec_start; \ + (mv) && (unsigned long)(mv) < (unsigned long)__machvec_end; \ (mv)++) static struct sh_machine_vector * __init get_mv_byname(const char *name) @@ -87,8 +87,8 @@ void __init sh_mv_setup(void) if (!machvec_selected) { unsigned long machvec_size; - machvec_size = ((unsigned long)&__machvec_end - - (unsigned long)&__machvec_start); + machvec_size = ((unsigned long)__machvec_end - + (unsigned long)__machvec_start); /* * Sanity check for machvec section alignment. Ensure @@ -102,7 +102,7 @@ void __init sh_mv_setup(void) * vector (usually the only one) from .machvec.init. */ if (machvec_size >= sizeof(struct sh_machine_vector)) - sh_mv = *(struct sh_machine_vector *)&__machvec_start; + sh_mv = *(struct sh_machine_vector *)__machvec_start; } pr_notice("Booting machvec: %s\n", get_system_type()); diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 80a5d1c66a51..1aa508eb0823 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -114,7 +114,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, childregs = task_pt_regs(p); p->thread.sp = (unsigned long) childregs; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(childregs, 0, sizeof(struct pt_regs)); p->thread.pc = (unsigned long) ret_from_kernel_thread; childregs->regs[4] = arg; diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 1add47fd31f6..dd3092911efa 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -499,7 +499,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0, unsigned long thread_info_flags) { /* deal with pending signal delivery */ - if (thread_info_flags & _TIF_SIGPENDING) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs, save_r0); if (thread_info_flags & _TIF_NOTIFY_RESUME) diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h index 548b366165dd..45b4955b253f 100644 --- a/arch/sparc/include/asm/thread_info_32.h +++ b/arch/sparc/include/asm/thread_info_32.h @@ -104,6 +104,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_USEDFPU 8 /* FPU was used by this task * this quantum (SMP) */ #define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling @@ -115,11 +116,12 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define _TIF_NOTIFY_RESUME (1< #endif diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index a02363735915..0f9c606e1e78 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -309,7 +309,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, ti->ksp = (unsigned long) new_stack; p->thread.kregs = childregs; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { extern int nwindows; unsigned long psr; memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ); diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 6f8c7822fc06..7afd0a859a78 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -597,7 +597,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, sizeof(struct sparc_stackf)); t->fpsaved[0] = 0; - if (unlikely(p->flags & PF_KTHREAD)) { + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { memset(child_trap_frame, 0, child_stack_sz); __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = (current_pt_regs()->tstate + 1) & TSTATE_CWP; diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 1da36dd34990..4d478510550c 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -521,7 +521,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags) { - if (thread_info_flags & _TIF_SIGPENDING) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs, orig_i0); if (thread_info_flags & _TIF_NOTIFY_RESUME) tracehook_notify_resume(regs); diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index f7ef7edcd5c1..a0eec62c825d 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -549,7 +549,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long user_exit(); if (thread_info_flags & _TIF_UPROBE) uprobe_notify_resume(regs); - if (thread_info_flags & _TIF_SIGPENDING) + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs, orig_i0); if (thread_info_flags & _TIF_NOTIFY_RESUME) tracehook_notify_resume(regs); diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 4b799fad8b48..1c57599b82fa 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -192,3 +192,8 @@ config UML_TIME_TRAVEL_SUPPORT endmenu source "arch/um/drivers/Kconfig" + +config ARCH_SUSPEND_POSSIBLE + def_bool y + +source "kernel/power/Kconfig" diff --git a/arch/um/Makefile b/arch/um/Makefile index 1cea46ff9bb7..775615141339 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -131,10 +131,18 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT) # The wrappers will select whether using "malloc" or the kernel allocator. LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc +# Avoid binutils 2.39+ warnings by marking the stack non-executable and +# ignorning warnings for the kallsyms sections. +LDFLAGS_EXECSTACK = -z noexecstack +ifeq ($(CONFIG_LD_IS_BFD),y) +LDFLAGS_EXECSTACK += $(call ld-option,--no-warn-rwx-segments) +endif + LD_FLAGS_CMDLINE = $(foreach opt,$(KBUILD_LDFLAGS),-Wl,$(opt)) # Used by link-vmlinux.sh which has special support for um link export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE) +export LDFLAGS_vmlinux := $(LDFLAGS_EXECSTACK) # When cleaning we don't include .config, so we don't include # TT or skas makefiles and don't clean skas_ptregs.h. diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c index 6040817c036f..25727ed648b7 100644 --- a/arch/um/drivers/chan_user.c +++ b/arch/um/drivers/chan_user.c @@ -220,7 +220,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out, unsigned long *stack_out) { struct winch_data data; - int fds[2], n, err; + int fds[2], n, err, pid; char c; err = os_pipe(fds, 1, 1); @@ -238,8 +238,9 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out, * problem with /dev/net/tun, which if held open by this * thread, prevents the TUN/TAP device from being reused. */ - err = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out); - if (err < 0) { + pid = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out); + if (pid < 0) { + err = pid; printk(UM_KERN_ERR "fork of winch_thread failed - errno = %d\n", -err); goto out_close; @@ -263,7 +264,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out, goto out_close; } - return err; + return pid; out_close: close(fds[1]); diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c index e4b9b2ce9abf..4b712395763e 100644 --- a/arch/um/drivers/random.c +++ b/arch/um/drivers/random.c @@ -28,7 +28,7 @@ * protects against a module being loaded twice at the same time. */ static int random_fd = -1; -static struct hwrng hwrng = { 0, }; +static struct hwrng hwrng; static DECLARE_COMPLETION(have_data); static int rng_dev_read(struct hwrng *rng, void *buf, size_t max, bool block) diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h index 4c19ce4c49f1..e610e932cfe1 100644 --- a/arch/um/include/asm/thread_info.h +++ b/arch/um/include/asm/thread_info.h @@ -57,18 +57,22 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_NOTIFY_SIGNAL 3 /* signal notifications exist */ #define TIF_RESTART_BLOCK 4 #define TIF_MEMDIE 5 /* is terminating due to OOM killer */ #define TIF_SYSCALL_AUDIT 6 #define TIF_RESTORE_SIGMASK 7 #define TIF_NOTIFY_RESUME 8 #define TIF_SECCOMP 9 /* secure computing */ +#define TIF_SINGLESTEP 10 /* single stepping userspace */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_MEMDIE (1 << TIF_MEMDIE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #endif diff --git a/arch/um/include/asm/timex.h b/arch/um/include/asm/timex.h index e392a9a5bc9b..9f27176adb26 100644 --- a/arch/um/include/asm/timex.h +++ b/arch/um/include/asm/timex.h @@ -2,13 +2,8 @@ #ifndef __UM_TIMEX_H #define __UM_TIMEX_H -typedef unsigned long cycles_t; - -static inline cycles_t get_cycles (void) -{ - return 0; -} - #define CLOCK_TICK_RATE (HZ) +#include + #endif diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h index ccafb62e8cce..9c08e728a675 100644 --- a/arch/um/include/shared/kern_util.h +++ b/arch/um/include/shared/kern_util.h @@ -39,6 +39,8 @@ extern int is_syscall(unsigned long addr); extern void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs); +extern void uml_pm_wake(void); + extern int start_uml(void); extern void paging_init(void); diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h index f467d28fc0b4..2f31d44d892e 100644 --- a/arch/um/include/shared/os.h +++ b/arch/um/include/shared/os.h @@ -241,6 +241,7 @@ extern int set_signals(int enable); extern int set_signals_trace(int enable); extern int os_is_signal_stack(void); extern void deliver_alarm(void); +extern void register_pm_wake_signal(void); /* util.c */ extern void stack_protections(unsigned long address); diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c index e8fd5d540b05..7f7a74c82abb 100644 --- a/arch/um/kernel/exec.c +++ b/arch/um/kernel/exec.c @@ -44,7 +44,7 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) { PT_REGS_IP(regs) = eip; PT_REGS_SP(regs) = esp; - current->ptrace &= ~PT_DTRACE; + clear_thread_flag(TIF_SINGLESTEP); #ifdef SUBARCH_EXECVE1 SUBARCH_EXECVE1(regs->regs); #endif diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 9505a7e87396..e6c9b11b2033 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -99,7 +99,8 @@ void interrupt_end(void) if (need_resched()) schedule(); - if (test_thread_flag(TIF_SIGPENDING)) + if (test_thread_flag(TIF_SIGPENDING) || + test_thread_flag(TIF_NOTIFY_SIGNAL)) do_signal(regs); if (test_thread_flag(TIF_NOTIFY_RESUME)) tracehook_notify_resume(regs); @@ -156,7 +157,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, struct task_struct * p, unsigned long tls) { void (*handler)(void); - int kthread = current->flags & PF_KTHREAD; + int kthread = current->flags & (PF_KTHREAD | PF_IO_WORKER); int ret = 0; p->thread = (struct thread_struct) INIT_THREAD; @@ -341,7 +342,7 @@ int singlestepping(void * t) { struct task_struct *task = t ? t : current; - if (!(task->ptrace & PT_DTRACE)) + if (!test_thread_flag(TIF_SINGLESTEP)) return 0; if (task->thread.singlestep_syscall) diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c index b425f47bddbb..d37802ced563 100644 --- a/arch/um/kernel/ptrace.c +++ b/arch/um/kernel/ptrace.c @@ -12,7 +12,7 @@ void user_enable_single_step(struct task_struct *child) { - child->ptrace |= PT_DTRACE; + set_tsk_thread_flag(child, TIF_SINGLESTEP); child->thread.singlestep_syscall = 0; #ifdef SUBARCH_SET_SINGLESTEPPING @@ -22,7 +22,7 @@ void user_enable_single_step(struct task_struct *child) void user_disable_single_step(struct task_struct *child) { - child->ptrace &= ~PT_DTRACE; + clear_tsk_thread_flag(child, TIF_SINGLESTEP); child->thread.singlestep_syscall = 0; #ifdef SUBARCH_SET_SINGLESTEPPING @@ -121,7 +121,7 @@ static void send_sigtrap(struct uml_pt_regs *regs, int error_code) } /* - * XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and + * XXX Check TIF_SINGLESTEP for singlestepping check and * PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check */ int syscall_trace_enter(struct pt_regs *regs) @@ -145,7 +145,7 @@ void syscall_trace_leave(struct pt_regs *regs) audit_syscall_exit(regs); /* Fake a debug trap */ - if (ptraced & PT_DTRACE) + if (test_thread_flag(TIF_SINGLESTEP)) send_sigtrap(®s->regs, 0); if (!test_thread_flag(TIF_SYSCALL_TRACE)) diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c index 88cd9b5c1b74..ae4658f576ab 100644 --- a/arch/um/kernel/signal.c +++ b/arch/um/kernel/signal.c @@ -53,7 +53,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) unsigned long sp; int err; - if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) + if (test_thread_flag(TIF_SINGLESTEP) && (current->ptrace & PT_PTRACED)) singlestep = 1; /* Did we come from a system call? */ @@ -128,7 +128,7 @@ void do_signal(struct pt_regs *regs) * on the host. The tracing thread will check this flag and * PTRACE_SYSCALL if necessary. */ - if (current->ptrace & PT_DTRACE) + if (test_thread_flag(TIF_SINGLESTEP)) current->thread.singlestep_syscall = is_syscall(PT_REGS_IP(¤t->thread.regs)); diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 76b37297b7d4..00c6dce14bd2 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -76,7 +77,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) static void *c_start(struct seq_file *m, loff_t *pos) { - return *pos < NR_CPUS ? cpu_data + *pos : NULL; + return *pos < nr_cpu_ids ? cpu_data + *pos : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) @@ -358,6 +359,14 @@ void __init check_bugs(void) os_check_bugs(); } +void apply_retpolines(s32 *start, s32 *end) +{ +} + +void apply_returns(s32 *start, s32 *end) +{ +} + void apply_alternatives(struct alt_instr *start, struct alt_instr *end) { } @@ -377,3 +386,27 @@ void *text_poke(void *addr, const void *opcode, size_t len) void text_poke_sync(void) { } + +#ifdef CONFIG_PM_SLEEP +void uml_pm_wake(void) +{ + pm_system_wakeup(); +} + +static int init_pm_wake_signal(void) +{ + /* + * In external time-travel mode we can't use signals to wake up + * since that would mess with the scheduling. We'll have to do + * some additional work to support wakeup on virtio devices or + * similar, perhaps implementing a fake RTC controller that can + * trigger wakeup (and request the appropriate scheduling from + * the external scheduler when going to suspend.) + */ + if (time_travel_mode != TT_MODE_EXTERNAL) + register_pm_wake_signal(); + return 0; +} + +late_initcall(init_pm_wake_signal); +#endif diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c index b58bc68cbe64..0a2ea84033b4 100644 --- a/arch/um/os-Linux/signal.c +++ b/arch/um/os-Linux/signal.c @@ -136,6 +136,16 @@ void set_sigstack(void *sig_stack, int size) panic("enabling signal stack failed, errno = %d\n", errno); } +static void sigusr1_handler(int sig, struct siginfo *unused_si, mcontext_t *mc) +{ + uml_pm_wake(); +} + +void register_pm_wake_signal(void) +{ + set_handler(SIGUSR1); +} + static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = { [SIGSEGV] = sig_handler, [SIGBUS] = sig_handler, @@ -145,7 +155,9 @@ static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = { [SIGIO] = sig_handler, [SIGWINCH] = sig_handler, - [SIGALRM] = timer_alarm_handler + [SIGALRM] = timer_alarm_handler, + + [SIGUSR1] = sigusr1_handler, }; static void hard_handler(int sig, siginfo_t *si, void *p) diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index 94a7c4125ebc..eecde73b2e78 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -5,6 +5,7 @@ */ #include +#include #include #include #include @@ -644,10 +645,24 @@ void halt_skas(void) UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT); } +static bool noreboot; + +static int __init noreboot_cmd_param(char *str, int *add) +{ + noreboot = true; + return 0; +} + +__uml_setup("noreboot", noreboot_cmd_param, +"noreboot\n" +" Rather than rebooting, exit always, akin to QEMU's -no-reboot option.\n" +" This is useful if you're using CONFIG_PANIC_TIMEOUT in order to catch\n" +" crashes in CI\n"); + void reboot_skas(void) { block_signals_trace(); - UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT); + UML_LONGJMP(&initial_jmpbuf, noreboot ? INIT_JMP_HALT : INIT_JMP_REBOOT); } void __switch_mm(struct mm_id *mm_idp) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5e28d4a0dff5..32536ff02ba8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -460,15 +460,6 @@ config GOLDFISH def_bool y depends on X86_GOLDFISH -config RETPOLINE - bool "Avoid speculative indirect branches in kernel" - default y - help - Compile kernel with the retpoline compiler options to guard against - kernel-to-user data leaks by avoiding speculative indirect - branches. Requires a compiler with -mindirect-branch=thunk-extern - support for full protection. The kernel may run slower. - config X86_CPU_RESCTRL bool "x86 CPU resource control support" depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) @@ -1328,7 +1319,7 @@ config MICROCODE config MICROCODE_INTEL bool "Intel microcode loading support" - depends on MICROCODE + depends on CPU_SUP_INTEL && MICROCODE default MICROCODE help This options enables microcode patch loading support for Intel @@ -1340,7 +1331,7 @@ config MICROCODE_INTEL config MICROCODE_AMD bool "AMD microcode loading support" - depends on MICROCODE + depends on CPU_SUP_AMD && MICROCODE help If you select this option, microcode patch loading support for AMD processors will be enabled. @@ -1961,7 +1952,6 @@ config EFI config EFI_STUB bool "EFI stub support" depends on EFI && !X86_USE_3DNOW - depends on $(cc-option,-mabi=ms) || X86_32 select RELOCATABLE help This kernel feature allows a bzImage to be loaded directly @@ -2422,6 +2412,88 @@ source "kernel/livepatch/Kconfig" endmenu +config CC_HAS_SLS + def_bool $(cc-option,-mharden-sls=all) + +config CC_HAS_RETURN_THUNK + def_bool $(cc-option,-mfunction-return=thunk-extern) + +menuconfig SPECULATION_MITIGATIONS + bool "Mitigations for speculative execution vulnerabilities" + default y + help + Say Y here to enable options which enable mitigations for + speculative execution hardware vulnerabilities. + + If you say N, all mitigations will be disabled. You really + should know what you are doing to say so. + +if SPECULATION_MITIGATIONS + +config PAGE_TABLE_ISOLATION + bool "Remove the kernel mapping in user mode" + default y + depends on (X86_64 || X86_PAE) + help + This feature reduces the number of hardware side channels by + ensuring that the majority of kernel addresses are not mapped + into userspace. + + See Documentation/x86/pti.rst for more details. + +config RETPOLINE + bool "Avoid speculative indirect branches in kernel" + default y + help + Compile kernel with the retpoline compiler options to guard against + kernel-to-user data leaks by avoiding speculative indirect + branches. Requires a compiler with -mindirect-branch=thunk-extern + support for full protection. The kernel may run slower. + +config RETHUNK + bool "Enable return-thunks" + depends on RETPOLINE && CC_HAS_RETURN_THUNK + default y if X86_64 + help + Compile the kernel with the return-thunks compiler option to guard + against kernel-to-user data leaks by avoiding return speculation. + Requires a compiler with -mfunction-return=thunk-extern + support for full protection. The kernel may run slower. + +config CPU_UNRET_ENTRY + bool "Enable UNRET on kernel entry" + depends on CPU_SUP_AMD && RETHUNK && X86_64 + default y + help + Compile the kernel with support for the retbleed=unret mitigation. + +config CPU_IBPB_ENTRY + bool "Enable IBPB on kernel entry" + depends on CPU_SUP_AMD && X86_64 + default y + help + Compile the kernel with support for the retbleed=ibpb mitigation. + +config CPU_IBRS_ENTRY + bool "Enable IBRS on kernel entry" + depends on CPU_SUP_INTEL && X86_64 + default y + help + Compile the kernel with support for the spectre_v2=ibrs mitigation. + This mitigates both spectre_v2 and retbleed at great cost to + performance. + +config SLS + bool "Mitigate Straight-Line-Speculation" + depends on CC_HAS_SLS && X86_64 + default n + help + Compile the kernel with straight-line-speculation options to guard + against straight line speculation. The kernel image might be slightly + larger. + +endif + config ARCH_HAS_ADD_PAGES def_bool y depends on X86_64 && ARCH_ENABLE_MEMORY_HOTPLUG @@ -2872,6 +2944,11 @@ config IA32_AOUT config X86_X32 bool "x32 ABI for 64-bit mode" depends on X86_64 + # llvm-objcopy does not convert x86_64 .note.gnu.property or + # compressed debug sections to x86_x32 properly: + # https://github.com/ClangBuiltLinux/linux/issues/514 + # https://github.com/ClangBuiltLinux/linux/issues/1141 + depends on $(success,$(OBJCOPY) --version | head -n1 | grep -qv llvm) help Include code to run binaries for the x32 native 32-bit ABI for 64-bit processors. An x32 process gets access to the diff --git a/arch/x86/Makefile b/arch/x86/Makefile index c824dc6acea4..616cc9aec021 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -31,7 +31,7 @@ endif CODE16GCC_CFLAGS := -m32 -Wa,$(srctree)/arch/x86/boot/code16gcc.h M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS)) -REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \ +REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \ -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none) @@ -196,6 +196,10 @@ ifdef CONFIG_RETPOLINE endif endif +ifdef CONFIG_SLS + KBUILD_CFLAGS += -mharden-sls=all +endif + KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE) ifdef CONFIG_LTO_CLANG diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index fe605205b4ce..59a42342b555 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -103,7 +103,7 @@ $(obj)/zoffset.h: $(obj)/compressed/vmlinux FORCE AFLAGS_header.o += -I$(objtree)/$(obj) $(obj)/header.o: $(obj)/zoffset.h -LDFLAGS_setup.elf := -m elf_i386 -T +LDFLAGS_setup.elf := -m elf_i386 -z noexecstack -T $(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE $(call if_changed,ld) diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index bf91e0a36d77..ad268a15bc7b 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -68,6 +68,8 @@ LDFLAGS_vmlinux := -pie $(call ld-option, --no-dynamic-linker) ifdef CONFIG_LD_ORPHAN_WARN LDFLAGS_vmlinux += --orphan-handling=warn endif +LDFLAGS_vmlinux += -z noexecstack +LDFLAGS_vmlinux += $(call ld-option,--no-warn-rwx-segments) LDFLAGS_vmlinux += -T hostprogs := mkpiggy diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S index c4bb0f9363f5..c8052a141b08 100644 --- a/arch/x86/boot/compressed/efi_thunk_64.S +++ b/arch/x86/boot/compressed/efi_thunk_64.S @@ -89,7 +89,7 @@ SYM_FUNC_START(__efi64_thunk) pop %rbx pop %rbp - ret + RET SYM_FUNC_END(__efi64_thunk) .code32 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 72f655c238cf..b55e2007b30c 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -786,7 +786,7 @@ SYM_FUNC_START(efi32_pe_entry) 2: popl %edi // restore callee-save registers popl %ebx leave - ret + RET SYM_FUNC_END(efi32_pe_entry) .section ".rodata" @@ -868,7 +868,7 @@ SYM_FUNC_START(startup32_check_sev_cbit) popl %ebx popl %eax #endif - ret + RET SYM_FUNC_END(startup32_check_sev_cbit) /* diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S index a6dea4e8a082..484a9c06f41d 100644 --- a/arch/x86/boot/compressed/mem_encrypt.S +++ b/arch/x86/boot/compressed/mem_encrypt.S @@ -58,7 +58,7 @@ SYM_FUNC_START(get_sev_encryption_bit) #endif /* CONFIG_AMD_MEM_ENCRYPT */ - ret + RET SYM_FUNC_END(get_sev_encryption_bit) .code64 @@ -99,7 +99,7 @@ SYM_FUNC_START(set_sev_encryption_mask) #endif xor %rax, %rax - ret + RET SYM_FUNC_END(set_sev_encryption_mask) .data diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig index 14ea69c0d417..8c486248e3dd 100644 --- a/arch/x86/configs/gki_defconfig +++ b/arch/x86/configs/gki_defconfig @@ -464,6 +464,7 @@ CONFIG_MMC_CRYPTO=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_LEDS_CLASS_FLASH=y +CONFIG_LEDS_CLASS_MULTICOLOR=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_LEDS_TRIGGER_TRANSIENT=y CONFIG_EDAC=y @@ -580,7 +581,6 @@ CONFIG_HARDENED_USERCOPY=y CONFIG_STATIC_USERMODEHELPER=y CONFIG_STATIC_USERMODEHELPER_PATH="" CONFIG_SECURITY_SELINUX=y -CONFIG_INIT_STACK_ALL_ZERO=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y CONFIG_CRYPTO_CHACHA20POLY1305=y CONFIG_CRYPTO_ADIANTUM=y @@ -626,3 +626,4 @@ CONFIG_SCHEDSTATS=y CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_TRACEFS_DISABLE_AUTOMOUNT=y CONFIG_UNWINDER_FRAME_POINTER=y +CONFIG_FUNCTION_ERROR_INJECTION=y diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index a31de0c6ccde..66f25c2938bf 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -66,7 +66,9 @@ obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += blake2s-x86_64.o -blake2s-x86_64-y := blake2s-core.o blake2s-glue.o +blake2s-x86_64-y := blake2s-shash.o +obj-$(if $(CONFIG_CRYPTO_BLAKE2S_X86),y) += libblake2s-x86_64.o +libblake2s-x86_64-y := blake2s-core.o blake2s-glue.o obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S index 51d46d93efbc..b48ddebb4748 100644 --- a/arch/x86/crypto/aegis128-aesni-asm.S +++ b/arch/x86/crypto/aegis128-aesni-asm.S @@ -122,7 +122,7 @@ SYM_FUNC_START_LOCAL(__load_partial) pxor T0, MSG .Lld_partial_8: - ret + RET SYM_FUNC_END(__load_partial) /* @@ -180,7 +180,7 @@ SYM_FUNC_START_LOCAL(__store_partial) mov %r10b, (%r9) .Lst_partial_1: - ret + RET SYM_FUNC_END(__store_partial) /* @@ -225,7 +225,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_init) movdqu STATE4, 0x40(STATEP) FRAME_END - ret + RET SYM_FUNC_END(crypto_aegis128_aesni_init) /* @@ -337,7 +337,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad) movdqu STATE3, 0x30(STATEP) movdqu STATE4, 0x40(STATEP) FRAME_END - ret + RET .Lad_out_1: movdqu STATE4, 0x00(STATEP) @@ -346,7 +346,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad) movdqu STATE2, 0x30(STATEP) movdqu STATE3, 0x40(STATEP) FRAME_END - ret + RET .Lad_out_2: movdqu STATE3, 0x00(STATEP) @@ -355,7 +355,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad) movdqu STATE1, 0x30(STATEP) movdqu STATE2, 0x40(STATEP) FRAME_END - ret + RET .Lad_out_3: movdqu STATE2, 0x00(STATEP) @@ -364,7 +364,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad) movdqu STATE0, 0x30(STATEP) movdqu STATE1, 0x40(STATEP) FRAME_END - ret + RET .Lad_out_4: movdqu STATE1, 0x00(STATEP) @@ -373,11 +373,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad) movdqu STATE4, 0x30(STATEP) movdqu STATE0, 0x40(STATEP) FRAME_END - ret + RET .Lad_out: FRAME_END - ret + RET SYM_FUNC_END(crypto_aegis128_aesni_ad) .macro encrypt_block a s0 s1 s2 s3 s4 i @@ -452,7 +452,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc) movdqu STATE2, 0x30(STATEP) movdqu STATE3, 0x40(STATEP) FRAME_END - ret + RET .Lenc_out_1: movdqu STATE3, 0x00(STATEP) @@ -461,7 +461,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc) movdqu STATE1, 0x30(STATEP) movdqu STATE2, 0x40(STATEP) FRAME_END - ret + RET .Lenc_out_2: movdqu STATE2, 0x00(STATEP) @@ -470,7 +470,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc) movdqu STATE0, 0x30(STATEP) movdqu STATE1, 0x40(STATEP) FRAME_END - ret + RET .Lenc_out_3: movdqu STATE1, 0x00(STATEP) @@ -479,7 +479,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc) movdqu STATE4, 0x30(STATEP) movdqu STATE0, 0x40(STATEP) FRAME_END - ret + RET .Lenc_out_4: movdqu STATE0, 0x00(STATEP) @@ -488,11 +488,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc) movdqu STATE3, 0x30(STATEP) movdqu STATE4, 0x40(STATEP) FRAME_END - ret + RET .Lenc_out: FRAME_END - ret + RET SYM_FUNC_END(crypto_aegis128_aesni_enc) /* @@ -532,7 +532,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc_tail) movdqu STATE3, 0x40(STATEP) FRAME_END - ret + RET SYM_FUNC_END(crypto_aegis128_aesni_enc_tail) .macro decrypt_block a s0 s1 s2 s3 s4 i @@ -606,7 +606,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec) movdqu STATE2, 0x30(STATEP) movdqu STATE3, 0x40(STATEP) FRAME_END - ret + RET .Ldec_out_1: movdqu STATE3, 0x00(STATEP) @@ -615,7 +615,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec) movdqu STATE1, 0x30(STATEP) movdqu STATE2, 0x40(STATEP) FRAME_END - ret + RET .Ldec_out_2: movdqu STATE2, 0x00(STATEP) @@ -624,7 +624,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec) movdqu STATE0, 0x30(STATEP) movdqu STATE1, 0x40(STATEP) FRAME_END - ret + RET .Ldec_out_3: movdqu STATE1, 0x00(STATEP) @@ -633,7 +633,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec) movdqu STATE4, 0x30(STATEP) movdqu STATE0, 0x40(STATEP) FRAME_END - ret + RET .Ldec_out_4: movdqu STATE0, 0x00(STATEP) @@ -642,11 +642,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec) movdqu STATE3, 0x30(STATEP) movdqu STATE4, 0x40(STATEP) FRAME_END - ret + RET .Ldec_out: FRAME_END - ret + RET SYM_FUNC_END(crypto_aegis128_aesni_dec) /* @@ -696,7 +696,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec_tail) movdqu STATE3, 0x40(STATEP) FRAME_END - ret + RET SYM_FUNC_END(crypto_aegis128_aesni_dec_tail) /* @@ -743,5 +743,5 @@ SYM_FUNC_START(crypto_aegis128_aesni_final) movdqu MSG, (%rsi) FRAME_END - ret + RET SYM_FUNC_END(crypto_aegis128_aesni_final) diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S index 3f0fc7dd87d7..c799838242a6 100644 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S @@ -525,7 +525,7 @@ ddq_add_8: /* return updated IV */ vpshufb xbyteswap, xcounter, xcounter vmovdqu xcounter, (p_iv) - ret + RET .endm /* diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 57aef3f5a81e..69c7c0dc22ea 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -1598,7 +1598,7 @@ SYM_FUNC_START(aesni_gcm_dec) GCM_ENC_DEC dec GCM_COMPLETE arg10, arg11 FUNC_RESTORE - ret + RET SYM_FUNC_END(aesni_gcm_dec) @@ -1687,7 +1687,7 @@ SYM_FUNC_START(aesni_gcm_enc) GCM_COMPLETE arg10, arg11 FUNC_RESTORE - ret + RET SYM_FUNC_END(aesni_gcm_enc) /***************************************************************************** @@ -1705,7 +1705,7 @@ SYM_FUNC_START(aesni_gcm_init) FUNC_SAVE GCM_INIT %arg3, %arg4,%arg5, %arg6 FUNC_RESTORE - ret + RET SYM_FUNC_END(aesni_gcm_init) /***************************************************************************** @@ -1720,7 +1720,7 @@ SYM_FUNC_START(aesni_gcm_enc_update) FUNC_SAVE GCM_ENC_DEC enc FUNC_RESTORE - ret + RET SYM_FUNC_END(aesni_gcm_enc_update) /***************************************************************************** @@ -1735,7 +1735,7 @@ SYM_FUNC_START(aesni_gcm_dec_update) FUNC_SAVE GCM_ENC_DEC dec FUNC_RESTORE - ret + RET SYM_FUNC_END(aesni_gcm_dec_update) /***************************************************************************** @@ -1750,7 +1750,7 @@ SYM_FUNC_START(aesni_gcm_finalize) FUNC_SAVE GCM_COMPLETE %arg3 %arg4 FUNC_RESTORE - ret + RET SYM_FUNC_END(aesni_gcm_finalize) #endif @@ -1766,7 +1766,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a) pxor %xmm1, %xmm0 movaps %xmm0, (TKEYP) add $0x10, TKEYP - ret + RET SYM_FUNC_END(_key_expansion_256a) SYM_FUNC_END_ALIAS(_key_expansion_128) @@ -1791,7 +1791,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_192a) shufps $0b01001110, %xmm2, %xmm1 movaps %xmm1, 0x10(TKEYP) add $0x20, TKEYP - ret + RET SYM_FUNC_END(_key_expansion_192a) SYM_FUNC_START_LOCAL(_key_expansion_192b) @@ -1810,7 +1810,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_192b) movaps %xmm0, (TKEYP) add $0x10, TKEYP - ret + RET SYM_FUNC_END(_key_expansion_192b) SYM_FUNC_START_LOCAL(_key_expansion_256b) @@ -1822,7 +1822,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_256b) pxor %xmm1, %xmm2 movaps %xmm2, (TKEYP) add $0x10, TKEYP - ret + RET SYM_FUNC_END(_key_expansion_256b) /* @@ -1937,7 +1937,7 @@ SYM_FUNC_START(aesni_set_key) popl KEYP #endif FRAME_END - ret + RET SYM_FUNC_END(aesni_set_key) /* @@ -1961,7 +1961,7 @@ SYM_FUNC_START(aesni_enc) popl KEYP #endif FRAME_END - ret + RET SYM_FUNC_END(aesni_enc) /* @@ -2018,7 +2018,7 @@ SYM_FUNC_START_LOCAL(_aesni_enc1) aesenc KEY, STATE movaps 0x70(TKEYP), KEY aesenclast KEY, STATE - ret + RET SYM_FUNC_END(_aesni_enc1) /* @@ -2126,7 +2126,7 @@ SYM_FUNC_START_LOCAL(_aesni_enc4) aesenclast KEY, STATE2 aesenclast KEY, STATE3 aesenclast KEY, STATE4 - ret + RET SYM_FUNC_END(_aesni_enc4) /* @@ -2151,7 +2151,7 @@ SYM_FUNC_START(aesni_dec) popl KEYP #endif FRAME_END - ret + RET SYM_FUNC_END(aesni_dec) /* @@ -2208,7 +2208,7 @@ SYM_FUNC_START_LOCAL(_aesni_dec1) aesdec KEY, STATE movaps 0x70(TKEYP), KEY aesdeclast KEY, STATE - ret + RET SYM_FUNC_END(_aesni_dec1) /* @@ -2316,7 +2316,7 @@ SYM_FUNC_START_LOCAL(_aesni_dec4) aesdeclast KEY, STATE2 aesdeclast KEY, STATE3 aesdeclast KEY, STATE4 - ret + RET SYM_FUNC_END(_aesni_dec4) /* @@ -2376,7 +2376,7 @@ SYM_FUNC_START(aesni_ecb_enc) popl LEN #endif FRAME_END - ret + RET SYM_FUNC_END(aesni_ecb_enc) /* @@ -2437,7 +2437,7 @@ SYM_FUNC_START(aesni_ecb_dec) popl LEN #endif FRAME_END - ret + RET SYM_FUNC_END(aesni_ecb_dec) /* @@ -2481,7 +2481,7 @@ SYM_FUNC_START(aesni_cbc_enc) popl IVP #endif FRAME_END - ret + RET SYM_FUNC_END(aesni_cbc_enc) /* @@ -2574,7 +2574,7 @@ SYM_FUNC_START(aesni_cbc_dec) popl IVP #endif FRAME_END - ret + RET SYM_FUNC_END(aesni_cbc_dec) #ifdef __x86_64__ @@ -2602,7 +2602,7 @@ SYM_FUNC_START_LOCAL(_aesni_inc_init) mov $1, TCTR_LOW movq TCTR_LOW, INC movq CTR, TCTR_LOW - ret + RET SYM_FUNC_END(_aesni_inc_init) /* @@ -2630,7 +2630,7 @@ SYM_FUNC_START_LOCAL(_aesni_inc) .Linc_low: movaps CTR, IV pshufb BSWAP_MASK, IV - ret + RET SYM_FUNC_END(_aesni_inc) /* @@ -2693,7 +2693,7 @@ SYM_FUNC_START(aesni_ctr_enc) movups IV, (IVP) .Lctr_enc_just_ret: FRAME_END - ret + RET SYM_FUNC_END(aesni_ctr_enc) /* @@ -2778,7 +2778,7 @@ SYM_FUNC_START(aesni_xts_encrypt) movups IV, (IVP) FRAME_END - ret + RET SYM_FUNC_END(aesni_xts_encrypt) /* @@ -2846,7 +2846,7 @@ SYM_FUNC_START(aesni_xts_decrypt) movups IV, (IVP) FRAME_END - ret + RET SYM_FUNC_END(aesni_xts_decrypt) #endif diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S index 2cf8e94d986a..4d9b2f887064 100644 --- a/arch/x86/crypto/aesni-intel_avx-x86_64.S +++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S @@ -1777,7 +1777,7 @@ SYM_FUNC_START(aesni_gcm_init_avx_gen2) FUNC_SAVE INIT GHASH_MUL_AVX, PRECOMPUTE_AVX FUNC_RESTORE - ret + RET SYM_FUNC_END(aesni_gcm_init_avx_gen2) ############################################################################### @@ -1798,15 +1798,15 @@ SYM_FUNC_START(aesni_gcm_enc_update_avx_gen2) # must be 192 GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 11 FUNC_RESTORE - ret + RET key_128_enc_update: GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 9 FUNC_RESTORE - ret + RET key_256_enc_update: GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 13 FUNC_RESTORE - ret + RET SYM_FUNC_END(aesni_gcm_enc_update_avx_gen2) ############################################################################### @@ -1827,15 +1827,15 @@ SYM_FUNC_START(aesni_gcm_dec_update_avx_gen2) # must be 192 GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 11 FUNC_RESTORE - ret + RET key_128_dec_update: GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 9 FUNC_RESTORE - ret + RET key_256_dec_update: GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 13 FUNC_RESTORE - ret + RET SYM_FUNC_END(aesni_gcm_dec_update_avx_gen2) ############################################################################### @@ -1856,15 +1856,15 @@ SYM_FUNC_START(aesni_gcm_finalize_avx_gen2) # must be 192 GCM_COMPLETE GHASH_MUL_AVX, 11, arg3, arg4 FUNC_RESTORE - ret + RET key_128_finalize: GCM_COMPLETE GHASH_MUL_AVX, 9, arg3, arg4 FUNC_RESTORE - ret + RET key_256_finalize: GCM_COMPLETE GHASH_MUL_AVX, 13, arg3, arg4 FUNC_RESTORE - ret + RET SYM_FUNC_END(aesni_gcm_finalize_avx_gen2) ############################################################################### @@ -2745,7 +2745,7 @@ SYM_FUNC_START(aesni_gcm_init_avx_gen4) FUNC_SAVE INIT GHASH_MUL_AVX2, PRECOMPUTE_AVX2 FUNC_RESTORE - ret + RET SYM_FUNC_END(aesni_gcm_init_avx_gen4) ############################################################################### @@ -2766,15 +2766,15 @@ SYM_FUNC_START(aesni_gcm_enc_update_avx_gen4) # must be 192 GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 11 FUNC_RESTORE - ret + RET key_128_enc_update4: GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 9 FUNC_RESTORE - ret + RET key_256_enc_update4: GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 13 FUNC_RESTORE - ret + RET SYM_FUNC_END(aesni_gcm_enc_update_avx_gen4) ############################################################################### @@ -2795,15 +2795,15 @@ SYM_FUNC_START(aesni_gcm_dec_update_avx_gen4) # must be 192 GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 11 FUNC_RESTORE - ret + RET key_128_dec_update4: GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 9 FUNC_RESTORE - ret + RET key_256_dec_update4: GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 13 FUNC_RESTORE - ret + RET SYM_FUNC_END(aesni_gcm_dec_update_avx_gen4) ############################################################################### @@ -2824,13 +2824,13 @@ SYM_FUNC_START(aesni_gcm_finalize_avx_gen4) # must be 192 GCM_COMPLETE GHASH_MUL_AVX2, 11, arg3, arg4 FUNC_RESTORE - ret + RET key_128_finalize4: GCM_COMPLETE GHASH_MUL_AVX2, 9, arg3, arg4 FUNC_RESTORE - ret + RET key_256_finalize4: GCM_COMPLETE GHASH_MUL_AVX2, 13, arg3, arg4 FUNC_RESTORE - ret + RET SYM_FUNC_END(aesni_gcm_finalize_avx_gen4) diff --git a/arch/x86/crypto/blake2s-core.S b/arch/x86/crypto/blake2s-core.S index 2ca79974f819..b50b35ff1fdb 100644 --- a/arch/x86/crypto/blake2s-core.S +++ b/arch/x86/crypto/blake2s-core.S @@ -171,7 +171,7 @@ SYM_FUNC_START(blake2s_compress_ssse3) movdqu %xmm1,0x10(%rdi) movdqu %xmm14,0x20(%rdi) .Lendofloop: - ret + RET SYM_FUNC_END(blake2s_compress_ssse3) #ifdef CONFIG_AS_AVX512 @@ -251,6 +251,6 @@ SYM_FUNC_START(blake2s_compress_avx512) vmovdqu %xmm1,0x10(%rdi) vmovdqu %xmm4,0x20(%rdi) vzeroupper - retq + RET SYM_FUNC_END(blake2s_compress_avx512) #endif /* CONFIG_AS_AVX512 */ diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c index a40365ab301e..69853c13e8fb 100644 --- a/arch/x86/crypto/blake2s-glue.c +++ b/arch/x86/crypto/blake2s-glue.c @@ -5,7 +5,6 @@ #include #include -#include #include #include @@ -28,9 +27,8 @@ asmlinkage void blake2s_compress_avx512(struct blake2s_state *state, static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_ssse3); static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_avx512); -void blake2s_compress_arch(struct blake2s_state *state, - const u8 *block, size_t nblocks, - const u32 inc) +void blake2s_compress(struct blake2s_state *state, const u8 *block, + size_t nblocks, const u32 inc) { /* SIMD disables preemption, so relax after processing each page. */ BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8); @@ -56,49 +54,12 @@ void blake2s_compress_arch(struct blake2s_state *state, block += blocks * BLAKE2S_BLOCK_SIZE; } while (nblocks); } -EXPORT_SYMBOL(blake2s_compress_arch); - -static int crypto_blake2s_update_x86(struct shash_desc *desc, - const u8 *in, unsigned int inlen) -{ - return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch); -} - -static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out) -{ - return crypto_blake2s_final(desc, out, blake2s_compress_arch); -} - -#define BLAKE2S_ALG(name, driver_name, digest_size) \ - { \ - .base.cra_name = name, \ - .base.cra_driver_name = driver_name, \ - .base.cra_priority = 200, \ - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \ - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \ - .base.cra_module = THIS_MODULE, \ - .digestsize = digest_size, \ - .setkey = crypto_blake2s_setkey, \ - .init = crypto_blake2s_init, \ - .update = crypto_blake2s_update_x86, \ - .final = crypto_blake2s_final_x86, \ - .descsize = sizeof(struct blake2s_state), \ - } - -static struct shash_alg blake2s_algs[] = { - BLAKE2S_ALG("blake2s-128", "blake2s-128-x86", BLAKE2S_128_HASH_SIZE), - BLAKE2S_ALG("blake2s-160", "blake2s-160-x86", BLAKE2S_160_HASH_SIZE), - BLAKE2S_ALG("blake2s-224", "blake2s-224-x86", BLAKE2S_224_HASH_SIZE), - BLAKE2S_ALG("blake2s-256", "blake2s-256-x86", BLAKE2S_256_HASH_SIZE), -}; +EXPORT_SYMBOL(blake2s_compress); static int __init blake2s_mod_init(void) { - if (!boot_cpu_has(X86_FEATURE_SSSE3)) - return 0; - - static_branch_enable(&blake2s_use_ssse3); + if (boot_cpu_has(X86_FEATURE_SSSE3)) + static_branch_enable(&blake2s_use_ssse3); if (IS_ENABLED(CONFIG_AS_AVX512) && boot_cpu_has(X86_FEATURE_AVX) && @@ -109,26 +70,9 @@ static int __init blake2s_mod_init(void) XFEATURE_MASK_AVX512, NULL)) static_branch_enable(&blake2s_use_avx512); - return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? - crypto_register_shashes(blake2s_algs, - ARRAY_SIZE(blake2s_algs)) : 0; -} - -static void __exit blake2s_mod_exit(void) -{ - if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3)) - crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); + return 0; } module_init(blake2s_mod_init); -module_exit(blake2s_mod_exit); -MODULE_ALIAS_CRYPTO("blake2s-128"); -MODULE_ALIAS_CRYPTO("blake2s-128-x86"); -MODULE_ALIAS_CRYPTO("blake2s-160"); -MODULE_ALIAS_CRYPTO("blake2s-160-x86"); -MODULE_ALIAS_CRYPTO("blake2s-224"); -MODULE_ALIAS_CRYPTO("blake2s-224-x86"); -MODULE_ALIAS_CRYPTO("blake2s-256"); -MODULE_ALIAS_CRYPTO("blake2s-256-x86"); MODULE_LICENSE("GPL v2"); diff --git a/arch/x86/crypto/blake2s-shash.c b/arch/x86/crypto/blake2s-shash.c new file mode 100644 index 000000000000..59ae28abe35c --- /dev/null +++ b/arch/x86/crypto/blake2s-shash.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +static int crypto_blake2s_update_x86(struct shash_desc *desc, + const u8 *in, unsigned int inlen) +{ + return crypto_blake2s_update(desc, in, inlen, false); +} + +static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out) +{ + return crypto_blake2s_final(desc, out, false); +} + +#define BLAKE2S_ALG(name, driver_name, digest_size) \ + { \ + .base.cra_name = name, \ + .base.cra_driver_name = driver_name, \ + .base.cra_priority = 200, \ + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ + .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \ + .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \ + .base.cra_module = THIS_MODULE, \ + .digestsize = digest_size, \ + .setkey = crypto_blake2s_setkey, \ + .init = crypto_blake2s_init, \ + .update = crypto_blake2s_update_x86, \ + .final = crypto_blake2s_final_x86, \ + .descsize = sizeof(struct blake2s_state), \ + } + +static struct shash_alg blake2s_algs[] = { + BLAKE2S_ALG("blake2s-128", "blake2s-128-x86", BLAKE2S_128_HASH_SIZE), + BLAKE2S_ALG("blake2s-160", "blake2s-160-x86", BLAKE2S_160_HASH_SIZE), + BLAKE2S_ALG("blake2s-224", "blake2s-224-x86", BLAKE2S_224_HASH_SIZE), + BLAKE2S_ALG("blake2s-256", "blake2s-256-x86", BLAKE2S_256_HASH_SIZE), +}; + +static int __init blake2s_mod_init(void) +{ + if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3)) + return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); + return 0; +} + +static void __exit blake2s_mod_exit(void) +{ + if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3)) + crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); +} + +module_init(blake2s_mod_init); +module_exit(blake2s_mod_exit); + +MODULE_ALIAS_CRYPTO("blake2s-128"); +MODULE_ALIAS_CRYPTO("blake2s-128-x86"); +MODULE_ALIAS_CRYPTO("blake2s-160"); +MODULE_ALIAS_CRYPTO("blake2s-160-x86"); +MODULE_ALIAS_CRYPTO("blake2s-224"); +MODULE_ALIAS_CRYPTO("blake2s-224-x86"); +MODULE_ALIAS_CRYPTO("blake2s-256"); +MODULE_ALIAS_CRYPTO("blake2s-256-x86"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S index 4222ac6d6584..802d71582689 100644 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S @@ -135,10 +135,10 @@ SYM_FUNC_START(__blowfish_enc_blk) jnz .L__enc_xor; write_block(); - ret; + RET; .L__enc_xor: xor_block(); - ret; + RET; SYM_FUNC_END(__blowfish_enc_blk) SYM_FUNC_START(blowfish_dec_blk) @@ -170,7 +170,7 @@ SYM_FUNC_START(blowfish_dec_blk) movq %r11, %r12; - ret; + RET; SYM_FUNC_END(blowfish_dec_blk) /********************************************************************** @@ -322,14 +322,14 @@ SYM_FUNC_START(__blowfish_enc_blk_4way) popq %rbx; popq %r12; - ret; + RET; .L__enc_xor4: xor_block4(); popq %rbx; popq %r12; - ret; + RET; SYM_FUNC_END(__blowfish_enc_blk_4way) SYM_FUNC_START(blowfish_dec_blk_4way) @@ -364,5 +364,5 @@ SYM_FUNC_START(blowfish_dec_blk_4way) popq %rbx; popq %r12; - ret; + RET; SYM_FUNC_END(blowfish_dec_blk_4way) diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S index ecc0a9a905c4..297b1a7ed2bc 100644 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S @@ -193,7 +193,7 @@ SYM_FUNC_START_LOCAL(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_c roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rcx, (%r9)); - ret; + RET; SYM_FUNC_END(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) .align 8 @@ -201,7 +201,7 @@ SYM_FUNC_START_LOCAL(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_a roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11, %rax, (%r9)); - ret; + RET; SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) /* @@ -787,7 +787,7 @@ SYM_FUNC_START_LOCAL(__camellia_enc_blk16) %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax)); FRAME_END - ret; + RET; .align 8 .Lenc_max32: @@ -874,7 +874,7 @@ SYM_FUNC_START_LOCAL(__camellia_dec_blk16) %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax)); FRAME_END - ret; + RET; .align 8 .Ldec_max32: @@ -915,7 +915,7 @@ SYM_FUNC_START(camellia_ecb_enc_16way) %xmm8, %rsi); FRAME_END - ret; + RET; SYM_FUNC_END(camellia_ecb_enc_16way) SYM_FUNC_START(camellia_ecb_dec_16way) @@ -945,7 +945,7 @@ SYM_FUNC_START(camellia_ecb_dec_16way) %xmm8, %rsi); FRAME_END - ret; + RET; SYM_FUNC_END(camellia_ecb_dec_16way) SYM_FUNC_START(camellia_cbc_dec_16way) @@ -996,7 +996,7 @@ SYM_FUNC_START(camellia_cbc_dec_16way) %xmm8, %rsi); FRAME_END - ret; + RET; SYM_FUNC_END(camellia_cbc_dec_16way) #define inc_le128(x, minus_one, tmp) \ @@ -1109,7 +1109,7 @@ SYM_FUNC_START(camellia_ctr_16way) %xmm8, %rsi); FRAME_END - ret; + RET; SYM_FUNC_END(camellia_ctr_16way) #define gf128mul_x_ble(iv, mask, tmp) \ @@ -1253,7 +1253,7 @@ SYM_FUNC_START_LOCAL(camellia_xts_crypt_16way) %xmm8, %rsi); FRAME_END - ret; + RET; SYM_FUNC_END(camellia_xts_crypt_16way) SYM_FUNC_START(camellia_xts_enc_16way) diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index 0907243c501c..288cd246da20 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S @@ -227,7 +227,7 @@ SYM_FUNC_START_LOCAL(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_c roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rcx, (%r9)); - ret; + RET; SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) .align 8 @@ -235,7 +235,7 @@ SYM_FUNC_START_LOCAL(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_a roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3, %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11, %rax, (%r9)); - ret; + RET; SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) /* @@ -825,7 +825,7 @@ SYM_FUNC_START_LOCAL(__camellia_enc_blk32) %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax)); FRAME_END - ret; + RET; .align 8 .Lenc_max32: @@ -912,7 +912,7 @@ SYM_FUNC_START_LOCAL(__camellia_dec_blk32) %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax)); FRAME_END - ret; + RET; .align 8 .Ldec_max32: @@ -957,7 +957,7 @@ SYM_FUNC_START(camellia_ecb_enc_32way) vzeroupper; FRAME_END - ret; + RET; SYM_FUNC_END(camellia_ecb_enc_32way) SYM_FUNC_START(camellia_ecb_dec_32way) @@ -991,7 +991,7 @@ SYM_FUNC_START(camellia_ecb_dec_32way) vzeroupper; FRAME_END - ret; + RET; SYM_FUNC_END(camellia_ecb_dec_32way) SYM_FUNC_START(camellia_cbc_dec_32way) @@ -1059,7 +1059,7 @@ SYM_FUNC_START(camellia_cbc_dec_32way) vzeroupper; FRAME_END - ret; + RET; SYM_FUNC_END(camellia_cbc_dec_32way) #define inc_le128(x, minus_one, tmp) \ @@ -1199,7 +1199,7 @@ SYM_FUNC_START(camellia_ctr_32way) vzeroupper; FRAME_END - ret; + RET; SYM_FUNC_END(camellia_ctr_32way) #define gf128mul_x_ble(iv, mask, tmp) \ @@ -1366,7 +1366,7 @@ SYM_FUNC_START_LOCAL(camellia_xts_crypt_32way) vzeroupper; FRAME_END - ret; + RET; SYM_FUNC_END(camellia_xts_crypt_32way) SYM_FUNC_START(camellia_xts_enc_32way) diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S index 1372e6408850..347c059f5940 100644 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S @@ -213,13 +213,13 @@ SYM_FUNC_START(__camellia_enc_blk) enc_outunpack(mov, RT1); movq RR12, %r12; - ret; + RET; .L__enc_xor: enc_outunpack(xor, RT1); movq RR12, %r12; - ret; + RET; SYM_FUNC_END(__camellia_enc_blk) SYM_FUNC_START(camellia_dec_blk) @@ -257,7 +257,7 @@ SYM_FUNC_START(camellia_dec_blk) dec_outunpack(); movq RR12, %r12; - ret; + RET; SYM_FUNC_END(camellia_dec_blk) /********************************************************************** @@ -448,14 +448,14 @@ SYM_FUNC_START(__camellia_enc_blk_2way) movq RR12, %r12; popq %rbx; - ret; + RET; .L__enc2_xor: enc_outunpack2(xor, RT2); movq RR12, %r12; popq %rbx; - ret; + RET; SYM_FUNC_END(__camellia_enc_blk_2way) SYM_FUNC_START(camellia_dec_blk_2way) @@ -495,5 +495,5 @@ SYM_FUNC_START(camellia_dec_blk_2way) movq RR12, %r12; movq RXOR, %rbx; - ret; + RET; SYM_FUNC_END(camellia_dec_blk_2way) diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S index 8a6181b08b59..b258af420c92 100644 --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S @@ -279,7 +279,7 @@ SYM_FUNC_START_LOCAL(__cast5_enc_blk16) outunpack_blocks(RR3, RL3, RTMP, RX, RKM); outunpack_blocks(RR4, RL4, RTMP, RX, RKM); - ret; + RET; SYM_FUNC_END(__cast5_enc_blk16) .align 16 @@ -352,7 +352,7 @@ SYM_FUNC_START_LOCAL(__cast5_dec_blk16) outunpack_blocks(RR3, RL3, RTMP, RX, RKM); outunpack_blocks(RR4, RL4, RTMP, RX, RKM); - ret; + RET; .L__skip_dec: vpsrldq $4, RKR, RKR; @@ -393,7 +393,7 @@ SYM_FUNC_START(cast5_ecb_enc_16way) popq %r15; FRAME_END - ret; + RET; SYM_FUNC_END(cast5_ecb_enc_16way) SYM_FUNC_START(cast5_ecb_dec_16way) @@ -431,7 +431,7 @@ SYM_FUNC_START(cast5_ecb_dec_16way) popq %r15; FRAME_END - ret; + RET; SYM_FUNC_END(cast5_ecb_dec_16way) SYM_FUNC_START(cast5_cbc_dec_16way) @@ -483,7 +483,7 @@ SYM_FUNC_START(cast5_cbc_dec_16way) popq %r15; popq %r12; FRAME_END - ret; + RET; SYM_FUNC_END(cast5_cbc_dec_16way) SYM_FUNC_START(cast5_ctr_16way) @@ -559,5 +559,5 @@ SYM_FUNC_START(cast5_ctr_16way) popq %r15; popq %r12; FRAME_END - ret; + RET; SYM_FUNC_END(cast5_ctr_16way) diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S index 932a3ce32a88..6eccaf1fb4c6 100644 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S @@ -291,7 +291,7 @@ SYM_FUNC_START_LOCAL(__cast6_enc_blk8) outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); - ret; + RET; SYM_FUNC_END(__cast6_enc_blk8) .align 8 @@ -338,7 +338,7 @@ SYM_FUNC_START_LOCAL(__cast6_dec_blk8) outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); - ret; + RET; SYM_FUNC_END(__cast6_dec_blk8) SYM_FUNC_START(cast6_ecb_enc_8way) @@ -361,7 +361,7 @@ SYM_FUNC_START(cast6_ecb_enc_8way) popq %r15; FRAME_END - ret; + RET; SYM_FUNC_END(cast6_ecb_enc_8way) SYM_FUNC_START(cast6_ecb_dec_8way) @@ -384,7 +384,7 @@ SYM_FUNC_START(cast6_ecb_dec_8way) popq %r15; FRAME_END - ret; + RET; SYM_FUNC_END(cast6_ecb_dec_8way) SYM_FUNC_START(cast6_cbc_dec_8way) @@ -410,7 +410,7 @@ SYM_FUNC_START(cast6_cbc_dec_8way) popq %r15; popq %r12; FRAME_END - ret; + RET; SYM_FUNC_END(cast6_cbc_dec_8way) SYM_FUNC_START(cast6_ctr_8way) @@ -438,7 +438,7 @@ SYM_FUNC_START(cast6_ctr_8way) popq %r15; popq %r12; FRAME_END - ret; + RET; SYM_FUNC_END(cast6_ctr_8way) SYM_FUNC_START(cast6_xts_enc_8way) @@ -465,7 +465,7 @@ SYM_FUNC_START(cast6_xts_enc_8way) popq %r15; FRAME_END - ret; + RET; SYM_FUNC_END(cast6_xts_enc_8way) SYM_FUNC_START(cast6_xts_dec_8way) @@ -492,5 +492,5 @@ SYM_FUNC_START(cast6_xts_dec_8way) popq %r15; FRAME_END - ret; + RET; SYM_FUNC_END(cast6_xts_dec_8way) diff --git a/arch/x86/crypto/chacha-avx2-x86_64.S b/arch/x86/crypto/chacha-avx2-x86_64.S index ee9a40ab4109..f3d8fc018249 100644 --- a/arch/x86/crypto/chacha-avx2-x86_64.S +++ b/arch/x86/crypto/chacha-avx2-x86_64.S @@ -193,7 +193,7 @@ SYM_FUNC_START(chacha_2block_xor_avx2) .Ldone2: vzeroupper - ret + RET .Lxorpart2: # xor remaining bytes from partial register into output @@ -498,7 +498,7 @@ SYM_FUNC_START(chacha_4block_xor_avx2) .Ldone4: vzeroupper - ret + RET .Lxorpart4: # xor remaining bytes from partial register into output @@ -992,7 +992,7 @@ SYM_FUNC_START(chacha_8block_xor_avx2) .Ldone8: vzeroupper lea -8(%r10),%rsp - ret + RET .Lxorpart8: # xor remaining bytes from partial register into output diff --git a/arch/x86/crypto/chacha-avx512vl-x86_64.S b/arch/x86/crypto/chacha-avx512vl-x86_64.S index bb193fde123a..259383e1ad44 100644 --- a/arch/x86/crypto/chacha-avx512vl-x86_64.S +++ b/arch/x86/crypto/chacha-avx512vl-x86_64.S @@ -166,13 +166,13 @@ SYM_FUNC_START(chacha_2block_xor_avx512vl) .Ldone2: vzeroupper - ret + RET .Lxorpart2: # xor remaining bytes from partial register into output mov %rcx,%rax and $0xf,%rcx - jz .Ldone8 + jz .Ldone2 mov %rax,%r9 and $~0xf,%r9 @@ -432,13 +432,13 @@ SYM_FUNC_START(chacha_4block_xor_avx512vl) .Ldone4: vzeroupper - ret + RET .Lxorpart4: # xor remaining bytes from partial register into output mov %rcx,%rax and $0xf,%rcx - jz .Ldone8 + jz .Ldone4 mov %rax,%r9 and $~0xf,%r9 @@ -812,7 +812,7 @@ SYM_FUNC_START(chacha_8block_xor_avx512vl) .Ldone8: vzeroupper - ret + RET .Lxorpart8: # xor remaining bytes from partial register into output diff --git a/arch/x86/crypto/chacha-ssse3-x86_64.S b/arch/x86/crypto/chacha-ssse3-x86_64.S index ca1788bfee16..7111949cd5b9 100644 --- a/arch/x86/crypto/chacha-ssse3-x86_64.S +++ b/arch/x86/crypto/chacha-ssse3-x86_64.S @@ -108,7 +108,7 @@ SYM_FUNC_START_LOCAL(chacha_permute) sub $2,%r8d jnz .Ldoubleround - ret + RET SYM_FUNC_END(chacha_permute) SYM_FUNC_START(chacha_block_xor_ssse3) @@ -166,7 +166,7 @@ SYM_FUNC_START(chacha_block_xor_ssse3) .Ldone: FRAME_END - ret + RET .Lxorpart: # xor remaining bytes from partial register into output @@ -217,7 +217,7 @@ SYM_FUNC_START(hchacha_block_ssse3) movdqu %xmm3,0x10(%rsi) FRAME_END - ret + RET SYM_FUNC_END(hchacha_block_ssse3) SYM_FUNC_START(chacha_4block_xor_ssse3) @@ -762,7 +762,7 @@ SYM_FUNC_START(chacha_4block_xor_ssse3) .Ldone4: lea -8(%r10),%rsp - ret + RET .Lxorpart4: # xor remaining bytes from partial register into output diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S index 6e7d4c4d3208..c392a6edbfff 100644 --- a/arch/x86/crypto/crc32-pclmul_asm.S +++ b/arch/x86/crypto/crc32-pclmul_asm.S @@ -236,5 +236,5 @@ fold_64: pxor %xmm2, %xmm1 pextrd $0x01, %xmm1, %eax - ret + RET SYM_FUNC_END(crc32_pclmul_le_16) diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index 884dc767b051..f6e3568fbd63 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -309,7 +309,7 @@ do_return: popq %rsi popq %rdi popq %rbx - ret + RET SYM_FUNC_END(crc_pcl) .section .rodata, "a", @progbits diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S index b2533d63030e..721474abfb71 100644 --- a/arch/x86/crypto/crct10dif-pcl-asm_64.S +++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S @@ -257,7 +257,7 @@ SYM_FUNC_START(crc_t10dif_pcl) # Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0. pextrw $0, %xmm0, %eax - ret + RET .align 16 .Lless_than_256_bytes: diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S index fac0fdc3f25d..f4c760f4cade 100644 --- a/arch/x86/crypto/des3_ede-asm_64.S +++ b/arch/x86/crypto/des3_ede-asm_64.S @@ -243,7 +243,7 @@ SYM_FUNC_START(des3_ede_x86_64_crypt_blk) popq %r12; popq %rbx; - ret; + RET; SYM_FUNC_END(des3_ede_x86_64_crypt_blk) /*********************************************************************** @@ -528,7 +528,7 @@ SYM_FUNC_START(des3_ede_x86_64_crypt_blk_3way) popq %r12; popq %rbx; - ret; + RET; SYM_FUNC_END(des3_ede_x86_64_crypt_blk_3way) .section .rodata, "a", @progbits diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S index 99ac25e18e09..2bf871899920 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_asm.S +++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S @@ -85,7 +85,7 @@ SYM_FUNC_START_LOCAL(__clmul_gf128mul_ble) psrlq $1, T2 pxor T2, T1 pxor T1, DATA - ret + RET SYM_FUNC_END(__clmul_gf128mul_ble) /* void clmul_ghash_mul(char *dst, const u128 *shash) */ @@ -99,7 +99,7 @@ SYM_FUNC_START(clmul_ghash_mul) pshufb BSWAP, DATA movups DATA, (%rdi) FRAME_END - ret + RET SYM_FUNC_END(clmul_ghash_mul) /* @@ -128,5 +128,5 @@ SYM_FUNC_START(clmul_ghash_update) movups DATA, (%rdi) .Lupdate_just_ret: FRAME_END - ret + RET SYM_FUNC_END(clmul_ghash_update) diff --git a/arch/x86/crypto/nh-avx2-x86_64.S b/arch/x86/crypto/nh-avx2-x86_64.S index b22c7b936272..6a0b15e7196a 100644 --- a/arch/x86/crypto/nh-avx2-x86_64.S +++ b/arch/x86/crypto/nh-avx2-x86_64.S @@ -153,5 +153,5 @@ SYM_FUNC_START(nh_avx2) vpaddq T1, T0, T0 vpaddq T4, T0, T0 vmovdqu T0, (HASH) - ret + RET SYM_FUNC_END(nh_avx2) diff --git a/arch/x86/crypto/nh-sse2-x86_64.S b/arch/x86/crypto/nh-sse2-x86_64.S index d7ae22dd6683..34c567bbcb4f 100644 --- a/arch/x86/crypto/nh-sse2-x86_64.S +++ b/arch/x86/crypto/nh-sse2-x86_64.S @@ -119,5 +119,5 @@ SYM_FUNC_START(nh_sse2) paddq PASS2_SUMS, T1 movdqu T0, 0x00(HASH) movdqu T1, 0x10(HASH) - ret + RET SYM_FUNC_END(nh_sse2) diff --git a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl index 7d568012cc15..58eaec958c86 100644 --- a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl +++ b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl @@ -297,7 +297,7 @@ ___ $code.=<<___; mov \$1,%eax .Lno_key: - ret + RET ___ &end_function("poly1305_init_x86_64"); @@ -373,7 +373,7 @@ $code.=<<___; .cfi_adjust_cfa_offset -48 .Lno_data: .Lblocks_epilogue: - ret + RET .cfi_endproc ___ &end_function("poly1305_blocks_x86_64"); @@ -399,7 +399,7 @@ $code.=<<___; mov %rax,0($mac) # write result mov %rcx,8($mac) - ret + RET ___ &end_function("poly1305_emit_x86_64"); if ($avx) { @@ -429,7 +429,7 @@ ___ &poly1305_iteration(); $code.=<<___; pop $ctx - ret + RET .size __poly1305_block,.-__poly1305_block .type __poly1305_init_avx,\@abi-omnipotent @@ -594,7 +594,7 @@ __poly1305_init_avx: lea -48-64($ctx),$ctx # size [de-]optimization pop %rbp - ret + RET .size __poly1305_init_avx,.-__poly1305_init_avx ___ @@ -747,7 +747,7 @@ $code.=<<___; .cfi_restore %rbp .Lno_data_avx: .Lblocks_avx_epilogue: - ret + RET .cfi_endproc .align 32 @@ -1452,7 +1452,7 @@ $code.=<<___ if (!$win64); ___ $code.=<<___; vzeroupper - ret + RET .cfi_endproc ___ &end_function("poly1305_blocks_avx"); @@ -1508,7 +1508,7 @@ $code.=<<___; mov %rax,0($mac) # write result mov %rcx,8($mac) - ret + RET ___ &end_function("poly1305_emit_avx"); @@ -1675,7 +1675,7 @@ $code.=<<___; .cfi_restore %rbp .Lno_data_avx2$suffix: .Lblocks_avx2_epilogue$suffix: - ret + RET .cfi_endproc .align 32 @@ -2201,7 +2201,7 @@ $code.=<<___ if (!$win64); ___ $code.=<<___; vzeroupper - ret + RET .cfi_endproc ___ if($avx > 2 && $avx512) { @@ -2792,7 +2792,7 @@ $code.=<<___ if (!$win64); .cfi_def_cfa_register %rsp ___ $code.=<<___; - ret + RET .cfi_endproc ___ @@ -2893,7 +2893,7 @@ $code.=<<___ if ($flavour =~ /elf32/); ___ $code.=<<___; mov \$1,%eax - ret + RET .size poly1305_init_base2_44,.-poly1305_init_base2_44 ___ { @@ -3010,7 +3010,7 @@ poly1305_blocks_vpmadd52: jnz .Lblocks_vpmadd52_4x .Lno_data_vpmadd52: - ret + RET .size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52 ___ } @@ -3451,7 +3451,7 @@ poly1305_blocks_vpmadd52_4x: vzeroall .Lno_data_vpmadd52_4x: - ret + RET .size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x ___ } @@ -3824,7 +3824,7 @@ $code.=<<___; vzeroall .Lno_data_vpmadd52_8x: - ret + RET .size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x ___ } @@ -3861,7 +3861,7 @@ poly1305_emit_base2_44: mov %rax,0($mac) # write result mov %rcx,8($mac) - ret + RET .size poly1305_emit_base2_44,.-poly1305_emit_base2_44 ___ } } } @@ -3916,7 +3916,7 @@ xor128_encrypt_n_pad: .Ldone_enc: mov $otp,%rax - ret + RET .size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad .globl xor128_decrypt_n_pad @@ -3967,7 +3967,7 @@ xor128_decrypt_n_pad: .Ldone_dec: mov $otp,%rax - ret + RET .size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad ___ } @@ -4109,7 +4109,7 @@ avx_handler: pop %rbx pop %rdi pop %rsi - ret + RET .size avx_handler,.-avx_handler .section .pdata diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S index ba9e4c1e7f5c..c985bc15304b 100644 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S @@ -605,7 +605,7 @@ SYM_FUNC_START_LOCAL(__serpent_enc_blk8_avx) write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); - ret; + RET; SYM_FUNC_END(__serpent_enc_blk8_avx) .align 8 @@ -659,7 +659,7 @@ SYM_FUNC_START_LOCAL(__serpent_dec_blk8_avx) write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); - ret; + RET; SYM_FUNC_END(__serpent_dec_blk8_avx) SYM_FUNC_START(serpent_ecb_enc_8way_avx) @@ -677,7 +677,7 @@ SYM_FUNC_START(serpent_ecb_enc_8way_avx) store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END - ret; + RET; SYM_FUNC_END(serpent_ecb_enc_8way_avx) SYM_FUNC_START(serpent_ecb_dec_8way_avx) @@ -695,7 +695,7 @@ SYM_FUNC_START(serpent_ecb_dec_8way_avx) store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); FRAME_END - ret; + RET; SYM_FUNC_END(serpent_ecb_dec_8way_avx) SYM_FUNC_START(serpent_cbc_dec_8way_avx) @@ -713,7 +713,7 @@ SYM_FUNC_START(serpent_cbc_dec_8way_avx) store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); FRAME_END - ret; + RET; SYM_FUNC_END(serpent_cbc_dec_8way_avx) SYM_FUNC_START(serpent_ctr_8way_avx) @@ -733,7 +733,7 @@ SYM_FUNC_START(serpent_ctr_8way_avx) store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END - ret; + RET; SYM_FUNC_END(serpent_ctr_8way_avx) SYM_FUNC_START(serpent_xts_enc_8way_avx) @@ -755,7 +755,7 @@ SYM_FUNC_START(serpent_xts_enc_8way_avx) store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END - ret; + RET; SYM_FUNC_END(serpent_xts_enc_8way_avx) SYM_FUNC_START(serpent_xts_dec_8way_avx) @@ -777,5 +777,5 @@ SYM_FUNC_START(serpent_xts_dec_8way_avx) store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); FRAME_END - ret; + RET; SYM_FUNC_END(serpent_xts_dec_8way_avx) diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S index c9648aeae705..ca18948964f4 100644 --- a/arch/x86/crypto/serpent-avx2-asm_64.S +++ b/arch/x86/crypto/serpent-avx2-asm_64.S @@ -611,7 +611,7 @@ SYM_FUNC_START_LOCAL(__serpent_enc_blk16) write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); - ret; + RET; SYM_FUNC_END(__serpent_enc_blk16) .align 8 @@ -665,7 +665,7 @@ SYM_FUNC_START_LOCAL(__serpent_dec_blk16) write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); - ret; + RET; SYM_FUNC_END(__serpent_dec_blk16) SYM_FUNC_START(serpent_ecb_enc_16way) @@ -687,7 +687,7 @@ SYM_FUNC_START(serpent_ecb_enc_16way) vzeroupper; FRAME_END - ret; + RET; SYM_FUNC_END(serpent_ecb_enc_16way) SYM_FUNC_START(serpent_ecb_dec_16way) @@ -709,7 +709,7 @@ SYM_FUNC_START(serpent_ecb_dec_16way) vzeroupper; FRAME_END - ret; + RET; SYM_FUNC_END(serpent_ecb_dec_16way) SYM_FUNC_START(serpent_cbc_dec_16way) @@ -732,7 +732,7 @@ SYM_FUNC_START(serpent_cbc_dec_16way) vzeroupper; FRAME_END - ret; + RET; SYM_FUNC_END(serpent_cbc_dec_16way) SYM_FUNC_START(serpent_ctr_16way) @@ -757,7 +757,7 @@ SYM_FUNC_START(serpent_ctr_16way) vzeroupper; FRAME_END - ret; + RET; SYM_FUNC_END(serpent_ctr_16way) SYM_FUNC_START(serpent_xts_enc_16way) @@ -783,7 +783,7 @@ SYM_FUNC_START(serpent_xts_enc_16way) vzeroupper; FRAME_END - ret; + RET; SYM_FUNC_END(serpent_xts_enc_16way) SYM_FUNC_START(serpent_xts_dec_16way) @@ -809,5 +809,5 @@ SYM_FUNC_START(serpent_xts_dec_16way) vzeroupper; FRAME_END - ret; + RET; SYM_FUNC_END(serpent_xts_dec_16way) diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S b/arch/x86/crypto/serpent-sse2-i586-asm_32.S index 6379b99cb722..8ccb03ad7cef 100644 --- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S +++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S @@ -553,12 +553,12 @@ SYM_FUNC_START(__serpent_enc_blk_4way) write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); - ret; + RET; .L__enc_xor4: xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); - ret; + RET; SYM_FUNC_END(__serpent_enc_blk_4way) SYM_FUNC_START(serpent_dec_blk_4way) @@ -612,5 +612,5 @@ SYM_FUNC_START(serpent_dec_blk_4way) movl arg_dst(%esp), %eax; write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA); - ret; + RET; SYM_FUNC_END(serpent_dec_blk_4way) diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S index efb6dc17dc90..e0998a011d1d 100644 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S @@ -675,13 +675,13 @@ SYM_FUNC_START(__serpent_enc_blk_8way) write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); - ret; + RET; .L__enc_xor8: xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); - ret; + RET; SYM_FUNC_END(__serpent_enc_blk_8way) SYM_FUNC_START(serpent_dec_blk_8way) @@ -735,5 +735,5 @@ SYM_FUNC_START(serpent_dec_blk_8way) write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2); write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2); - ret; + RET; SYM_FUNC_END(serpent_dec_blk_8way) diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S index 1e594d60afa5..6fa622652bfc 100644 --- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S +++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S @@ -674,7 +674,7 @@ _loop3: pop %r12 pop %rbx - ret + RET SYM_FUNC_END(\name) .endm diff --git a/arch/x86/crypto/sha1_ni_asm.S b/arch/x86/crypto/sha1_ni_asm.S index 11efe3a45a1f..b59f3ca62837 100644 --- a/arch/x86/crypto/sha1_ni_asm.S +++ b/arch/x86/crypto/sha1_ni_asm.S @@ -290,7 +290,7 @@ SYM_FUNC_START(sha1_ni_transform) .Ldone_hash: mov RSPSAVE, %rsp - ret + RET SYM_FUNC_END(sha1_ni_transform) .section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S index d25668d2a1e9..263f916362e0 100644 --- a/arch/x86/crypto/sha1_ssse3_asm.S +++ b/arch/x86/crypto/sha1_ssse3_asm.S @@ -99,7 +99,7 @@ pop %rbp pop %r12 pop %rbx - ret + RET SYM_FUNC_END(\name) .endm diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S index 4739cd31b9db..3baa1ec39097 100644 --- a/arch/x86/crypto/sha256-avx-asm.S +++ b/arch/x86/crypto/sha256-avx-asm.S @@ -458,7 +458,7 @@ done_hash: popq %r13 popq %r12 popq %rbx - ret + RET SYM_FUNC_END(sha256_transform_avx) .section .rodata.cst256.K256, "aM", @progbits, 256 diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S index 11ff60c29c8b..3439aaf4295d 100644 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/crypto/sha256-avx2-asm.S @@ -711,7 +711,7 @@ done_hash: popq %r13 popq %r12 popq %rbx - ret + RET SYM_FUNC_END(sha256_transform_rorx) .section .rodata.cst512.K256, "aM", @progbits, 512 diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S index ddfa863b4ee3..c4a5db612c32 100644 --- a/arch/x86/crypto/sha256-ssse3-asm.S +++ b/arch/x86/crypto/sha256-ssse3-asm.S @@ -472,7 +472,7 @@ done_hash: popq %r12 popq %rbx - ret + RET SYM_FUNC_END(sha256_transform_ssse3) .section .rodata.cst256.K256, "aM", @progbits, 256 diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/crypto/sha256_ni_asm.S index 7abade04a3a3..94d50dd27cb5 100644 --- a/arch/x86/crypto/sha256_ni_asm.S +++ b/arch/x86/crypto/sha256_ni_asm.S @@ -326,7 +326,7 @@ SYM_FUNC_START(sha256_ni_transform) .Ldone_hash: - ret + RET SYM_FUNC_END(sha256_ni_transform) .section .rodata.cst256.K256, "aM", @progbits, 256 diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S index 63470fd6ae32..34fc71c3524e 100644 --- a/arch/x86/crypto/sha512-avx-asm.S +++ b/arch/x86/crypto/sha512-avx-asm.S @@ -364,7 +364,7 @@ updateblock: mov frame_RSPSAVE(%rsp), %rsp nowork: - ret + RET SYM_FUNC_END(sha512_transform_avx) ######################################################################## diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S index 3a44bdcfd583..399fa74ab3d3 100644 --- a/arch/x86/crypto/sha512-avx2-asm.S +++ b/arch/x86/crypto/sha512-avx2-asm.S @@ -681,7 +681,7 @@ done_hash: # Restore Stack Pointer mov frame_RSPSAVE(%rsp), %rsp - ret + RET SYM_FUNC_END(sha512_transform_rorx) ######################################################################## diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S index 7946a1bee85b..e9b460a54503 100644 --- a/arch/x86/crypto/sha512-ssse3-asm.S +++ b/arch/x86/crypto/sha512-ssse3-asm.S @@ -366,7 +366,7 @@ updateblock: mov frame_RSPSAVE(%rsp), %rsp nowork: - ret + RET SYM_FUNC_END(sha512_transform_ssse3) ######################################################################## diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S index a5151393bb2f..c3707a71ef72 100644 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S @@ -272,7 +272,7 @@ SYM_FUNC_START_LOCAL(__twofish_enc_blk8) outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); - ret; + RET; SYM_FUNC_END(__twofish_enc_blk8) .align 8 @@ -312,7 +312,7 @@ SYM_FUNC_START_LOCAL(__twofish_dec_blk8) outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2); outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); - ret; + RET; SYM_FUNC_END(__twofish_dec_blk8) SYM_FUNC_START(twofish_ecb_enc_8way) @@ -332,7 +332,7 @@ SYM_FUNC_START(twofish_ecb_enc_8way) store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); FRAME_END - ret; + RET; SYM_FUNC_END(twofish_ecb_enc_8way) SYM_FUNC_START(twofish_ecb_dec_8way) @@ -352,7 +352,7 @@ SYM_FUNC_START(twofish_ecb_dec_8way) store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END - ret; + RET; SYM_FUNC_END(twofish_ecb_dec_8way) SYM_FUNC_START(twofish_cbc_dec_8way) @@ -377,7 +377,7 @@ SYM_FUNC_START(twofish_cbc_dec_8way) popq %r12; FRAME_END - ret; + RET; SYM_FUNC_END(twofish_cbc_dec_8way) SYM_FUNC_START(twofish_ctr_8way) @@ -404,7 +404,7 @@ SYM_FUNC_START(twofish_ctr_8way) popq %r12; FRAME_END - ret; + RET; SYM_FUNC_END(twofish_ctr_8way) SYM_FUNC_START(twofish_xts_enc_8way) @@ -428,7 +428,7 @@ SYM_FUNC_START(twofish_xts_enc_8way) store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); FRAME_END - ret; + RET; SYM_FUNC_END(twofish_xts_enc_8way) SYM_FUNC_START(twofish_xts_dec_8way) @@ -452,5 +452,5 @@ SYM_FUNC_START(twofish_xts_dec_8way) store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); FRAME_END - ret; + RET; SYM_FUNC_END(twofish_xts_dec_8way) diff --git a/arch/x86/crypto/twofish-i586-asm_32.S b/arch/x86/crypto/twofish-i586-asm_32.S index a6f09e4f2e46..3abcad661884 100644 --- a/arch/x86/crypto/twofish-i586-asm_32.S +++ b/arch/x86/crypto/twofish-i586-asm_32.S @@ -260,7 +260,7 @@ SYM_FUNC_START(twofish_enc_blk) pop %ebx pop %ebp mov $1, %eax - ret + RET SYM_FUNC_END(twofish_enc_blk) SYM_FUNC_START(twofish_dec_blk) @@ -317,5 +317,5 @@ SYM_FUNC_START(twofish_dec_blk) pop %ebx pop %ebp mov $1, %eax - ret + RET SYM_FUNC_END(twofish_dec_blk) diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S index fc23552afe37..b12d916f08d6 100644 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S @@ -258,7 +258,7 @@ SYM_FUNC_START(__twofish_enc_blk_3way) popq %rbx; popq %r12; popq %r13; - ret; + RET; .L__enc_xor3: outunpack_enc3(xor); @@ -266,7 +266,7 @@ SYM_FUNC_START(__twofish_enc_blk_3way) popq %rbx; popq %r12; popq %r13; - ret; + RET; SYM_FUNC_END(__twofish_enc_blk_3way) SYM_FUNC_START(twofish_dec_blk_3way) @@ -301,5 +301,5 @@ SYM_FUNC_START(twofish_dec_blk_3way) popq %rbx; popq %r12; popq %r13; - ret; + RET; SYM_FUNC_END(twofish_dec_blk_3way) diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S index d2e56232494a..775af290cd19 100644 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S @@ -252,7 +252,7 @@ SYM_FUNC_START(twofish_enc_blk) popq R1 movl $1,%eax - ret + RET SYM_FUNC_END(twofish_enc_blk) SYM_FUNC_START(twofish_dec_blk) @@ -304,5 +304,5 @@ SYM_FUNC_START(twofish_dec_blk) popq R1 movl $1,%eax - ret + RET SYM_FUNC_END(twofish_dec_blk) diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile index 08bf95dbc911..63dc4b1dfc92 100644 --- a/arch/x86/entry/Makefile +++ b/arch/x86/entry/Makefile @@ -21,12 +21,13 @@ CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,) CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,) CFLAGS_syscall_x32.o += $(call cc-option,-Wno-override-init,) -obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o +obj-y := entry.o entry_$(BITS).o syscall_$(BITS).o obj-y += common.o obj-y += vdso/ obj-y += vsyscall/ +obj-$(CONFIG_PREEMPTION) += thunk_$(BITS).o obj-$(CONFIG_IA32_EMULATION) += entry_64_compat.o syscall_32.o obj-$(CONFIG_X86_X32_ABI) += syscall_x32.o diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 4bec2597626b..8da971b5e2aa 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -6,6 +6,8 @@ #include #include #include +#include +#include /* @@ -146,27 +148,19 @@ For 32-bit we have the following conventions - kernel is built with .endm -.macro POP_REGS pop_rdi=1 skip_r11rcx=0 +.macro POP_REGS pop_rdi=1 popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx - .if \skip_r11rcx - popq %rsi - .else popq %r11 - .endif popq %r10 popq %r9 popq %r8 popq %rax - .if \skip_r11rcx - popq %rsi - .else popq %rcx - .endif popq %rdx popq %rsi .if \pop_rdi @@ -316,6 +310,66 @@ For 32-bit we have the following conventions - kernel is built with #endif +/* + * IBRS kernel mitigation for Spectre_v2. + * + * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers + * the regs it uses (AX, CX, DX). Must be called before the first RET + * instruction (NOTE! UNTRAIN_RET includes a RET instruction) + * + * The optional argument is used to save/restore the current value, + * which is used on the paranoid paths. + * + * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set. + */ +.macro IBRS_ENTER save_reg +#ifdef CONFIG_CPU_IBRS_ENTRY + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS + movl $MSR_IA32_SPEC_CTRL, %ecx + +.ifnb \save_reg + rdmsr + shl $32, %rdx + or %rdx, %rax + mov %rax, \save_reg + test $SPEC_CTRL_IBRS, %eax + jz .Ldo_wrmsr_\@ + lfence + jmp .Lend_\@ +.Ldo_wrmsr_\@: +.endif + + movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx + movl %edx, %eax + shr $32, %rdx + wrmsr +.Lend_\@: +#endif +.endm + +/* + * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX) + * regs. Must be called after the last RET. + */ +.macro IBRS_EXIT save_reg +#ifdef CONFIG_CPU_IBRS_ENTRY + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS + movl $MSR_IA32_SPEC_CTRL, %ecx + +.ifnb \save_reg + mov \save_reg, %rdx +.else + movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx + andl $(~SPEC_CTRL_IBRS), %edx +.endif + + movl %edx, %eax + shr $32, %rdx + wrmsr +.Lend_\@: +#endif +.endm + /* * Mitigate Spectre v1 for conditional swapgs code paths. * diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S new file mode 100644 index 000000000000..bfb7bcb362bc --- /dev/null +++ b/arch/x86/entry/entry.S @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common place for both 32- and 64-bit entry routines. + */ + +#include +#include +#include + +.pushsection .noinstr.text, "ax" + +SYM_FUNC_START(entry_ibpb) + movl $MSR_IA32_PRED_CMD, %ecx + movl $PRED_CMD_IBPB, %eax + xorl %edx, %edx + wrmsr + RET +SYM_FUNC_END(entry_ibpb) +/* For KVM */ +EXPORT_SYMBOL_GPL(entry_ibpb); + +.popsection diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index df8c017e6161..8fcd6a42b3a1 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -40,7 +40,7 @@ #include #include #include -#include +#include #include #include #include @@ -782,7 +782,6 @@ SYM_CODE_START(__switch_to_asm) movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset #endif -#ifdef CONFIG_RETPOLINE /* * When switching from a shallower to a deeper call stack * the RSB may either underflow or use entries populated @@ -791,7 +790,6 @@ SYM_CODE_START(__switch_to_asm) * speculative execution to prevent attack. */ FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW -#endif /* Restore flags or the incoming task to restore AC state. */ popfl @@ -821,7 +819,7 @@ SYM_FUNC_START(schedule_tail_wrapper) popl %eax FRAME_END - ret + RET SYM_FUNC_END(schedule_tail_wrapper) .popsection diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index a24ce5905ab8..559c82b83475 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -93,7 +93,7 @@ SYM_CODE_END(native_usergs_sysret64) */ SYM_CODE_START(entry_SYSCALL_64) - UNWIND_HINT_EMPTY + UNWIND_HINT_ENTRY swapgs /* tss.sp2 is scratch space. */ @@ -117,6 +117,11 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) /* IRQs are off. */ movq %rax, %rdi movq %rsp, %rsi + + /* clobbers %rax, make sure it is after saving the syscall nr */ + IBRS_ENTER + UNTRAIN_RET + call do_syscall_64 /* returns with IRQs disabled */ /* @@ -191,8 +196,8 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) * perf profiles. Nothing jumps here. */ syscall_return_via_sysret: - /* rcx and r11 are already restored (see code above) */ - POP_REGS pop_rdi=0 skip_r11rcx=1 + IBRS_EXIT + POP_REGS pop_rdi=0 /* * Now all regs are restored except RSP and RDI. @@ -244,7 +249,6 @@ SYM_FUNC_START(__switch_to_asm) movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset #endif -#ifdef CONFIG_RETPOLINE /* * When switching from a shallower to a deeper call stack * the RSB may either underflow or use entries populated @@ -253,7 +257,6 @@ SYM_FUNC_START(__switch_to_asm) * speculative execution to prevent attack. */ FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW -#endif /* restore callee-saved registers */ popq %r15 @@ -500,6 +503,7 @@ SYM_CODE_START(\asmsym) call vc_switch_off_ist movq %rax, %rsp /* Switch to new stack */ + ENCODE_FRAME_POINTER UNWIND_HINT_REGS /* Update pt_regs */ @@ -568,6 +572,7 @@ __irqentry_text_end: SYM_CODE_START_LOCAL(common_interrupt_return) SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) + IBRS_EXIT #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates user mode. */ testb $3, CS(%rsp) @@ -675,6 +680,7 @@ native_irq_return_ldt: pushq %rdi /* Stash user RDI */ swapgs /* to kernel GS */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ + UNTRAIN_RET movq PER_CPU_VAR(espfix_waddr), %rdi movq %rax, (0*8)(%rdi) /* user RAX */ @@ -739,7 +745,7 @@ SYM_FUNC_START(asm_load_gs_index) 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE swapgs FRAME_END - ret + RET SYM_FUNC_END(asm_load_gs_index) EXPORT_SYMBOL(asm_load_gs_index) @@ -798,7 +804,7 @@ SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL) /* Restore the previous stack pointer from RBP. */ leaveq - ret + RET SYM_FUNC_END(asm_call_on_stack) #ifdef CONFIG_XEN_PV @@ -887,6 +893,9 @@ SYM_CODE_END(xen_failsafe_callback) * 1 -> no SWAPGS on exit * * Y GSBASE value at entry, must be restored in paranoid_exit + * + * R14 - old CR3 + * R15 - old SPEC_CTRL */ SYM_CODE_START_LOCAL(paranoid_entry) UNWIND_HINT_FUNC @@ -931,7 +940,7 @@ SYM_CODE_START_LOCAL(paranoid_entry) * is needed here. */ SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx - ret + jmp .Lparanoid_gsbase_done .Lparanoid_entry_checkgs: /* EBX = 1 -> kernel GSBASE active, no restore required */ @@ -950,9 +959,17 @@ SYM_CODE_START_LOCAL(paranoid_entry) xorl %ebx, %ebx swapgs .Lparanoid_kernel_gsbase: - FENCE_SWAPGS_KERNEL_ENTRY - ret +.Lparanoid_gsbase_done: + + /* + * Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like + * CR3 above, keep the old value in a callee saved register. + */ + IBRS_ENTER save_reg=%r15 + UNTRAIN_RET + + RET SYM_CODE_END(paranoid_entry) /* @@ -973,9 +990,19 @@ SYM_CODE_END(paranoid_entry) * 1 -> no SWAPGS on exit * * Y User space GSBASE, must be restored unconditionally + * + * R14 - old CR3 + * R15 - old SPEC_CTRL */ SYM_CODE_START_LOCAL(paranoid_exit) UNWIND_HINT_REGS + + /* + * Must restore IBRS state before both CR3 and %GS since we need access + * to the per-CPU x86_spec_ctrl_shadow variable. + */ + IBRS_EXIT save_reg=%r15 + /* * The order of operations is important. RESTORE_CR3 requires * kernel GSBASE. @@ -1022,8 +1049,11 @@ SYM_CODE_START_LOCAL(error_entry) FENCE_SWAPGS_USER_ENTRY /* We have user CR3. Change to kernel CR3. */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax + IBRS_ENTER + UNTRAIN_RET .Lerror_entry_from_usermode_after_swapgs: + /* Put us onto the real thread stack. */ popq %r12 /* save return addr in %12 */ movq %rsp, %rdi /* arg0 = pt_regs pointer */ @@ -1031,7 +1061,7 @@ SYM_CODE_START_LOCAL(error_entry) movq %rax, %rsp /* switch stack */ ENCODE_FRAME_POINTER pushq %r12 - ret + RET /* * There are two places in the kernel that can potentially fault with @@ -1062,7 +1092,8 @@ SYM_CODE_START_LOCAL(error_entry) */ .Lerror_entry_done_lfence: FENCE_SWAPGS_KERNEL_ENTRY - ret + ANNOTATE_UNRET_END + RET .Lbstep_iret: /* Fix truncated RIP */ @@ -1077,6 +1108,8 @@ SYM_CODE_START_LOCAL(error_entry) SWAPGS FENCE_SWAPGS_USER_ENTRY SWITCH_TO_KERNEL_CR3 scratch_reg=%rax + IBRS_ENTER + UNTRAIN_RET /* * Pretend that the exception came from user mode: set up pt_regs @@ -1181,6 +1214,9 @@ SYM_CODE_START(asm_exc_nmi) PUSH_AND_CLEAR_REGS rdx=(%rdx) ENCODE_FRAME_POINTER + IBRS_ENTER + UNTRAIN_RET + /* * At this point we no longer need to worry about stack damage * due to nesting -- we're on the normal thread stack and we're @@ -1403,6 +1439,9 @@ end_repeat_nmi: movq $-1, %rsi call exc_nmi + /* Always restore stashed SPEC_CTRL value (see paranoid_entry) */ + IBRS_EXIT save_reg=%r15 + /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 0051cf5c792d..4d637a965efb 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -4,7 +4,6 @@ * * Copyright 2000-2002 Andi Kleen, SuSE Labs. */ -#include "calling.h" #include #include #include @@ -14,9 +13,12 @@ #include #include #include +#include #include #include +#include "calling.h" + .section .entry.text, "ax" /* @@ -47,7 +49,7 @@ * 0(%ebp) arg6 */ SYM_CODE_START(entry_SYSENTER_compat) - UNWIND_HINT_EMPTY + UNWIND_HINT_ENTRY /* Interrupts are off on entry. */ SWAPGS @@ -112,6 +114,9 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL) cld + IBRS_ENTER + UNTRAIN_RET + /* * SYSENTER doesn't filter flags, so we need to clear NT and AC * ourselves. To save a few cycles, we can check whether @@ -197,7 +202,7 @@ SYM_CODE_END(entry_SYSENTER_compat) * 0(%esp) arg6 */ SYM_CODE_START(entry_SYSCALL_compat) - UNWIND_HINT_EMPTY + UNWIND_HINT_ENTRY /* Interrupts are off on entry. */ swapgs @@ -252,6 +257,9 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL) UNWIND_HINT_REGS + IBRS_ENTER + UNTRAIN_RET + movq %rsp, %rdi call do_fast_syscall_32 /* XEN PV guests always use IRET path */ @@ -266,6 +274,8 @@ sysret32_from_system_call: */ STACKLEAK_ERASE + IBRS_EXIT + movq RBX(%rsp), %rbx /* pt_regs->rbx */ movq RBP(%rsp), %rbp /* pt_regs->rbp */ movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */ @@ -339,7 +349,7 @@ SYM_CODE_END(entry_SYSCALL_compat) * ebp arg6 */ SYM_CODE_START(entry_INT80_compat) - UNWIND_HINT_EMPTY + UNWIND_HINT_ENTRY /* * Interrupts are off on entry. */ @@ -409,6 +419,9 @@ SYM_CODE_START(entry_INT80_compat) cld + IBRS_ENTER + UNTRAIN_RET + movq %rsp, %rdi call do_int80_syscall_32 jmp swapgs_restore_regs_and_return_to_usermode diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S index f1f96d4d8cd6..ff6e7003da97 100644 --- a/arch/x86/entry/thunk_32.S +++ b/arch/x86/entry/thunk_32.S @@ -24,15 +24,13 @@ SYM_CODE_START_NOALIGN(\name) popl %edx popl %ecx popl %eax - ret + RET _ASM_NOKPROBE(\name) SYM_CODE_END(\name) .endm -#ifdef CONFIG_PREEMPTION THUNK preempt_schedule_thunk, preempt_schedule THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace EXPORT_SYMBOL(preempt_schedule_thunk) EXPORT_SYMBOL(preempt_schedule_notrace_thunk) -#endif diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S index c9a9fbf1655f..14776163fbff 100644 --- a/arch/x86/entry/thunk_64.S +++ b/arch/x86/entry/thunk_64.S @@ -36,14 +36,11 @@ SYM_FUNC_END(\name) _ASM_NOKPROBE(\name) .endm -#ifdef CONFIG_PREEMPTION THUNK preempt_schedule_thunk, preempt_schedule THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace EXPORT_SYMBOL(preempt_schedule_thunk) EXPORT_SYMBOL(preempt_schedule_notrace_thunk) -#endif -#ifdef CONFIG_PREEMPTION SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore) popq %r11 popq %r10 @@ -55,7 +52,6 @@ SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore) popq %rsi popq %rdi popq %rbp - ret + RET _ASM_NOKPROBE(__thunk_restore) SYM_CODE_END(__thunk_restore) -#endif diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index b36f42965b62..be60773ef946 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -91,6 +91,7 @@ endif endif $(vobjs): KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) +$(vobjs): KBUILD_AFLAGS += -DBUILD_VDSO # # vDSO code runs in userspace and -pg doesn't help with profiling anyway. @@ -176,7 +177,7 @@ quiet_cmd_vdso = VDSO $@ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 \ - $(call ld-option, --eh-frame-hdr) -Bsymbolic + $(call ld-option, --eh-frame-hdr) -Bsymbolic -z noexecstack GCOV_PROFILE := n quiet_cmd_vdso_and_check = VDSO $@ diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S index de1fff7188aa..c92a8fef2ed2 100644 --- a/arch/x86/entry/vdso/vdso32/system_call.S +++ b/arch/x86/entry/vdso/vdso32/system_call.S @@ -6,7 +6,7 @@ #include #include #include -#include +#include .text .globl __kernel_vsyscall @@ -78,7 +78,7 @@ SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL) popl %ecx CFI_RESTORE ecx CFI_ADJUST_CFA_OFFSET -4 - ret + RET CFI_ENDPROC .size __kernel_vsyscall,.-__kernel_vsyscall diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 9185cb1d13b9..5876289e48d8 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -440,7 +440,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) static __init int vdso_setup(char *s) { vdso64_enabled = simple_strtoul(s, NULL, 0); - return 0; + return 1; } __setup("vdso=", vdso_setup); diff --git a/arch/x86/entry/vsyscall/vsyscall_emu_64.S b/arch/x86/entry/vsyscall/vsyscall_emu_64.S index 2e203f3a25a7..ef2dd1827243 100644 --- a/arch/x86/entry/vsyscall/vsyscall_emu_64.S +++ b/arch/x86/entry/vsyscall/vsyscall_emu_64.S @@ -20,16 +20,19 @@ __vsyscall_page: mov $__NR_gettimeofday, %rax syscall ret + int3 .balign 1024, 0xcc mov $__NR_time, %rax syscall ret + int3 .balign 1024, 0xcc mov $__NR_getcpu, %rax syscall ret + int3 .balign 4096, 0xcc diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index ccc9ee1971e8..8a85658a24cc 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -312,6 +312,16 @@ static int perf_ibs_init(struct perf_event *event) hwc->config_base = perf_ibs->msr; hwc->config = config; + /* + * rip recorded by IbsOpRip will not be consistent with rsp and rbp + * recorded as part of interrupt regs. Thus we need to use rip from + * interrupt regs while unwinding call stack. Setting _EARLY flag + * makes sure we unwind call-stack before perf sample rip is set to + * IbsOpRip. + */ + if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) + event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY; + return 0; } @@ -692,6 +702,14 @@ fail: data.raw = &raw; } + /* + * rip recorded by IbsOpRip will not be consistent with rsp and rbp + * recorded as part of interrupt regs. Thus we need to use rip from + * interrupt regs while unwinding call stack. + */ + if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) + data.callchain = perf_callchain(event, iregs); + throttle = perf_event_overflow(event, &data, ®s); out: if (throttle) { @@ -764,9 +782,10 @@ static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name) return ret; } -static __init void perf_event_ibs_init(void) +static __init int perf_event_ibs_init(void) { struct attribute **attr = ibs_op_format_attrs; + int ret; /* * Some chips fail to reset the fetch count when it is written; instead @@ -778,7 +797,9 @@ static __init void perf_event_ibs_init(void) if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10) perf_ibs_fetch.fetch_ignore_if_zero_rip = 1; - perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch"); + ret = perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch"); + if (ret) + return ret; if (ibs_caps & IBS_CAPS_OPCNT) { perf_ibs_op.config_mask |= IBS_OP_CNT_CTL; @@ -791,15 +812,35 @@ static __init void perf_event_ibs_init(void) perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK; } - perf_ibs_pmu_init(&perf_ibs_op, "ibs_op"); + ret = perf_ibs_pmu_init(&perf_ibs_op, "ibs_op"); + if (ret) + goto err_op; + + ret = register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs"); + if (ret) + goto err_nmi; - register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs"); pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps); + return 0; + +err_nmi: + perf_pmu_unregister(&perf_ibs_op.pmu); + free_percpu(perf_ibs_op.pcpu); + perf_ibs_op.pcpu = NULL; +err_op: + perf_pmu_unregister(&perf_ibs_fetch.pmu); + free_percpu(perf_ibs_fetch.pcpu); + perf_ibs_fetch.pcpu = NULL; + + return ret; } #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */ -static __init void perf_event_ibs_init(void) { } +static __init int perf_event_ibs_init(void) +{ + return 0; +} #endif @@ -1069,9 +1110,7 @@ static __init int amd_ibs_init(void) x86_pmu_amd_ibs_starting_cpu, x86_pmu_amd_ibs_dying_cpu); - perf_event_ibs_init(); - - return 0; + return perf_event_ibs_init(); } /* Since we need the pci subsystem to init ibs we can't do this earlier: */ diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 5ba13b00e3a7..990d5543e3bf 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -254,7 +254,7 @@ static struct event_constraint intel_icl_event_constraints[] = { INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf), INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf), INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */ - INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf), + INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf), INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf), INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */ INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */ @@ -4412,6 +4412,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = { INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 11, 0x00000000), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c), INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c), INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e), diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 945d470f62d0..48f30ffef1f4 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -855,8 +855,13 @@ struct event_constraint intel_icl_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */ INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ - INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */ - INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), /* MEM_INST_RETIRED.STORE */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */ diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index bd8516e6c353..4b6c39c5facb 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1114,6 +1114,14 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event) if (static_cpu_has(X86_FEATURE_ARCH_LBR)) { reg->config = mask; + + /* + * The Arch LBR HW can retrieve the common branch types + * from the LBR_INFO. It doesn't require the high overhead + * SW disassemble. + * Enable the branch type by default for the Arch LBR. + */ + reg->reg |= X86_BR_TYPE_SAVE; return 0; } @@ -1839,7 +1847,7 @@ void __init intel_pmu_arch_lbr_init(void) return; clear_arch_lbr: - clear_cpu_cap(&boot_cpu_data, X86_FEATURE_ARCH_LBR); + setup_clear_cpu_cap(X86_FEATURE_ARCH_LBR); } /** diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index cc3b79c06685..d87421acddc3 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -13,6 +13,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include +#include #include #include @@ -1245,6 +1247,15 @@ static int pt_buffer_try_single(struct pt_buffer *buf, int nr_pages) if (1 << order != nr_pages) goto out; + /* + * Some processors cannot always support single range for more than + * 4KB - refer errata TGL052, ADL037 and RPL017. Future processors might + * also be affected, so for now rather than trying to keep track of + * which ones, just disable it for all. + */ + if (nr_pages > 1) + goto out; + buf->single = true; buf->nr_pages = nr_pages; ret = 0; @@ -1348,11 +1359,37 @@ static void pt_addr_filters_fini(struct perf_event *event) event->hw.addr_filters = NULL; } -static inline bool valid_kernel_ip(unsigned long ip) +#ifdef CONFIG_X86_64 +static u64 canonical_address(u64 vaddr, u8 vaddr_bits) { - return virt_addr_valid(ip) && kernel_ip(ip); + return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits); } +static u64 is_canonical_address(u64 vaddr, u8 vaddr_bits) +{ + return canonical_address(vaddr, vaddr_bits) == vaddr; +} + +/* Clamp to a canonical address greater-than-or-equal-to the address given */ +static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits) +{ + return is_canonical_address(vaddr, vaddr_bits) ? + vaddr : + -BIT_ULL(vaddr_bits - 1); +} + +/* Clamp to a canonical address less-than-or-equal-to the address given */ +static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits) +{ + return is_canonical_address(vaddr, vaddr_bits) ? + vaddr : + BIT_ULL(vaddr_bits - 1) - 1; +} +#else +#define clamp_to_ge_canonical_addr(x, y) (x) +#define clamp_to_le_canonical_addr(x, y) (x) +#endif + static int pt_event_addr_filters_validate(struct list_head *filters) { struct perf_addr_filter *filter; @@ -1367,14 +1404,6 @@ static int pt_event_addr_filters_validate(struct list_head *filters) filter->action == PERF_ADDR_FILTER_ACTION_START) return -EOPNOTSUPP; - if (!filter->path.dentry) { - if (!valid_kernel_ip(filter->offset)) - return -EINVAL; - - if (!valid_kernel_ip(filter->offset + filter->size)) - return -EINVAL; - } - if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges)) return -EOPNOTSUPP; } @@ -1398,9 +1427,26 @@ static void pt_event_addr_filters_sync(struct perf_event *event) if (filter->path.dentry && !fr[range].start) { msr_a = msr_b = 0; } else { - /* apply the offset */ - msr_a = fr[range].start; - msr_b = msr_a + fr[range].size - 1; + unsigned long n = fr[range].size - 1; + unsigned long a = fr[range].start; + unsigned long b; + + if (a > ULONG_MAX - n) + b = ULONG_MAX; + else + b = a + n; + /* + * Apply the offset. 64-bit addresses written to the + * MSRs must be canonical, but the range can encompass + * non-canonical addresses. Since software cannot + * execute at non-canonical addresses, adjusting to + * canonical addresses does not affect the result of the + * address filter. + */ + msr_a = clamp_to_ge_canonical_addr(a, boot_cpu_data.x86_virt_bits); + msr_b = clamp_to_le_canonical_addr(b, boot_cpu_data.x86_virt_bits); + if (msr_b < msr_a) + msr_a = msr_b = 0; } filters->filter[range].msr_a = msr_a; diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index bbd1120ae161..fa9289718147 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -657,6 +657,22 @@ int snb_pci2phy_map_init(int devid) return 0; } +static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + /* + * SNB IMC counters are 32-bit and are laid out back to back + * in MMIO space. Therefore we must use a 32-bit accessor function + * using readq() from uncore_mmio_read_counter() causes problems + * because it is reading 64-bit at a time. This is okay for the + * uncore_perf_event_update() function because it drops the upper + * 32-bits but not okay for plain uncore_read_counter() as invoked + * in uncore_pmu_event_start(). + */ + return (u64)readl(box->io_addr + hwc->event_base); +} + static struct pmu snb_uncore_imc_pmu = { .task_ctx_nr = perf_invalid_context, .event_init = snb_uncore_imc_event_init, @@ -676,7 +692,7 @@ static struct intel_uncore_ops snb_uncore_imc_ops = { .disable_event = snb_uncore_imc_disable_event, .enable_event = snb_uncore_imc_enable_event, .hw_config = snb_uncore_imc_hw_config, - .read_counter = uncore_mmio_read_counter, + .read_counter = snb_uncore_imc_read_counter, }; static struct intel_uncore_type snb_uncore_imc = { diff --git a/arch/x86/include/asm/GEN-for-each-reg.h b/arch/x86/include/asm/GEN-for-each-reg.h index 1b07fb102c4e..07949102a08d 100644 --- a/arch/x86/include/asm/GEN-for-each-reg.h +++ b/arch/x86/include/asm/GEN-for-each-reg.h @@ -1,11 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * These are in machine order; things rely on that. + */ #ifdef CONFIG_64BIT GEN(rax) -GEN(rbx) GEN(rcx) GEN(rdx) +GEN(rbx) +GEN(rsp) +GEN(rbp) GEN(rsi) GEN(rdi) -GEN(rbp) GEN(r8) GEN(r9) GEN(r10) @@ -16,10 +21,11 @@ GEN(r14) GEN(r15) #else GEN(eax) -GEN(ebx) GEN(ecx) GEN(edx) +GEN(ebx) +GEN(esp) +GEN(ebp) GEN(esi) GEN(edi) -GEN(ebp) #endif diff --git a/arch/x86/include/asm/acenv.h b/arch/x86/include/asm/acenv.h index 9aff97f0de7f..d937c55e717e 100644 --- a/arch/x86/include/asm/acenv.h +++ b/arch/x86/include/asm/acenv.h @@ -13,7 +13,19 @@ /* Asm macros */ -#define ACPI_FLUSH_CPU_CACHE() wbinvd() +/* + * ACPI_FLUSH_CPU_CACHE() flushes caches on entering sleep states. + * It is required to prevent data loss. + * + * While running inside virtual machine, the kernel can bypass cache flushing. + * Changing sleep state in a virtual machine doesn't affect the host system + * sleep state and cannot lead to data loss. + */ +#define ACPI_FLUSH_CPU_CACHE() \ +do { \ + if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) \ + wbinvd(); \ +} while (0) int __acpi_acquire_global_lock(unsigned int *lock); int __acpi_release_global_lock(unsigned int *lock); diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h deleted file mode 100644 index 464034db299f..000000000000 --- a/arch/x86/include/asm/alternative-asm.h +++ /dev/null @@ -1,114 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_X86_ALTERNATIVE_ASM_H -#define _ASM_X86_ALTERNATIVE_ASM_H - -#ifdef __ASSEMBLY__ - -#include - -#ifdef CONFIG_SMP - .macro LOCK_PREFIX -672: lock - .pushsection .smp_locks,"a" - .balign 4 - .long 672b - . - .popsection - .endm -#else - .macro LOCK_PREFIX - .endm -#endif - -/* - * objtool annotation to ignore the alternatives and only consider the original - * instruction(s). - */ -.macro ANNOTATE_IGNORE_ALTERNATIVE - .Lannotate_\@: - .pushsection .discard.ignore_alts - .long .Lannotate_\@ - . - .popsection -.endm - -/* - * Issue one struct alt_instr descriptor entry (need to put it into - * the section .altinstructions, see below). This entry contains - * enough information for the alternatives patching code to patch an - * instruction. See apply_alternatives(). - */ -.macro altinstruction_entry orig alt feature orig_len alt_len pad_len - .long \orig - . - .long \alt - . - .word \feature - .byte \orig_len - .byte \alt_len - .byte \pad_len -.endm - -/* - * Define an alternative between two instructions. If @feature is - * present, early code in apply_alternatives() replaces @oldinstr with - * @newinstr. ".skip" directive takes care of proper instruction padding - * in case @newinstr is longer than @oldinstr. - */ -.macro ALTERNATIVE oldinstr, newinstr, feature -140: - \oldinstr -141: - .skip -(((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)),0x90 -142: - - .pushsection .altinstructions,"a" - altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b - .popsection - - .pushsection .altinstr_replacement,"ax" -143: - \newinstr -144: - .popsection -.endm - -#define old_len 141b-140b -#define new_len1 144f-143f -#define new_len2 145f-144f - -/* - * gas compatible max based on the idea from: - * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax - * - * The additional "-" is needed because gas uses a "true" value of -1. - */ -#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) - - -/* - * Same as ALTERNATIVE macro above but for two alternatives. If CPU - * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has - * @feature2, it replaces @oldinstr with @feature2. - */ -.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 -140: - \oldinstr -141: - .skip -((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \ - (alt_max_short(new_len1, new_len2) - (old_len)),0x90 -142: - - .pushsection .altinstructions,"a" - altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f,142b-141b - altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b - .popsection - - .pushsection .altinstr_replacement,"ax" -143: - \newinstr1 -144: - \newinstr2 -145: - .popsection -.endm - -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_X86_ALTERNATIVE_ASM_H */ diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 13adca37c99a..0e777b27972b 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -2,13 +2,17 @@ #ifndef _ASM_X86_ALTERNATIVE_H #define _ASM_X86_ALTERNATIVE_H -#ifndef __ASSEMBLY__ - #include -#include #include #include +#define ALTINSTR_FLAG_INV (1 << 15) +#define ALT_NOT(feat) ((feat) | ALTINSTR_FLAG_INV) + +#ifndef __ASSEMBLY__ + +#include + /* * Alternative inline assembly for SMP. * @@ -61,7 +65,6 @@ struct alt_instr { u16 cpuid; /* cpuid bit set for replacement */ u8 instrlen; /* length of original instruction */ u8 replacementlen; /* length of new instruction */ - u8 padlen; /* length of build-time padding */ } __packed; /* @@ -72,6 +75,8 @@ extern int alternatives_patched; extern void alternative_instructions(void); extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); +extern void apply_retpolines(s32 *start, s32 *end); +extern void apply_returns(s32 *start, s32 *end); struct module; @@ -100,7 +105,6 @@ static inline int alternatives_text_reserved(void *start, void *end) #define alt_end_marker "663" #define alt_slen "662b-661b" -#define alt_pad_len alt_end_marker"b-662b" #define alt_total_slen alt_end_marker"b-661b" #define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f" @@ -147,8 +151,7 @@ static inline int alternatives_text_reserved(void *start, void *end) " .long " b_replacement(num)"f - .\n" /* new instruction */ \ " .word " __stringify(feature) "\n" /* feature bit */ \ " .byte " alt_total_slen "\n" /* source len */ \ - " .byte " alt_rlen(num) "\n" /* replacement len */ \ - " .byte " alt_pad_len "\n" /* pad len */ + " .byte " alt_rlen(num) "\n" /* replacement len */ #define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \ "# ALT: replacement " #num "\n" \ @@ -175,6 +178,11 @@ static inline int alternatives_text_reserved(void *start, void *end) ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ ".popsection\n" +/* If @feature is set, patch in @newinstr_yes, otherwise @newinstr_no. */ +#define ALTERNATIVE_TERNARY(oldinstr, feature, newinstr_yes, newinstr_no) \ + ALTERNATIVE_2(oldinstr, newinstr_no, X86_FEATURE_ALWAYS, \ + newinstr_yes, feature) + #define ALTERNATIVE_3(oldinsn, newinsn1, feat1, newinsn2, feat2, newinsn3, feat3) \ OLDINSTR_3(oldinsn, 1, 2, 3) \ ".pushsection .altinstructions,\"a\"\n" \ @@ -206,15 +214,15 @@ static inline int alternatives_text_reserved(void *start, void *end) #define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \ asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory") +#define alternative_ternary(oldinstr, feature, newinstr_yes, newinstr_no) \ + asm_inline volatile(ALTERNATIVE_TERNARY(oldinstr, feature, newinstr_yes, newinstr_no) ::: "memory") + /* * Alternative inline assembly with input. * * Peculiarities: * No memory clobber here. * Argument numbers start with 1. - * Best is to use constraints that are fixed size (like (%1) ... "r") - * If you use variable sized constraints like "m" or "g" in the - * replacement make sure to pad to the worst case length. * Leaving an unused argument 0 to keep API compatibility. */ #define alternative_input(oldinstr, newinstr, feature, input...) \ @@ -271,6 +279,115 @@ static inline int alternatives_text_reserved(void *start, void *end) */ #define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr +#else /* __ASSEMBLY__ */ + +#ifdef CONFIG_SMP + .macro LOCK_PREFIX +672: lock + .pushsection .smp_locks,"a" + .balign 4 + .long 672b - . + .popsection + .endm +#else + .macro LOCK_PREFIX + .endm +#endif + +/* + * objtool annotation to ignore the alternatives and only consider the original + * instruction(s). + */ +.macro ANNOTATE_IGNORE_ALTERNATIVE + .Lannotate_\@: + .pushsection .discard.ignore_alts + .long .Lannotate_\@ - . + .popsection +.endm + +/* + * Issue one struct alt_instr descriptor entry (need to put it into + * the section .altinstructions, see below). This entry contains + * enough information for the alternatives patching code to patch an + * instruction. See apply_alternatives(). + */ +.macro altinstruction_entry orig alt feature orig_len alt_len + .long \orig - . + .long \alt - . + .word \feature + .byte \orig_len + .byte \alt_len +.endm + +/* + * Define an alternative between two instructions. If @feature is + * present, early code in apply_alternatives() replaces @oldinstr with + * @newinstr. ".skip" directive takes care of proper instruction padding + * in case @newinstr is longer than @oldinstr. + */ +.macro ALTERNATIVE oldinstr, newinstr, feature +140: + \oldinstr +141: + .skip -(((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)),0x90 +142: + + .pushsection .altinstructions,"a" + altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f + .popsection + + .pushsection .altinstr_replacement,"ax" +143: + \newinstr +144: + .popsection +.endm + +#define old_len 141b-140b +#define new_len1 144f-143f +#define new_len2 145f-144f + +/* + * gas compatible max based on the idea from: + * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax + * + * The additional "-" is needed because gas uses a "true" value of -1. + */ +#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) + + +/* + * Same as ALTERNATIVE macro above but for two alternatives. If CPU + * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has + * @feature2, it replaces @oldinstr with @feature2. + */ +.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 +140: + \oldinstr +141: + .skip -((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \ + (alt_max_short(new_len1, new_len2) - (old_len)),0x90 +142: + + .pushsection .altinstructions,"a" + altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f + altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f + .popsection + + .pushsection .altinstr_replacement,"ax" +143: + \newinstr1 +144: + \newinstr2 +145: + .popsection +.endm + +/* If @feature is set, patch in @newinstr_yes, otherwise @newinstr_no. */ +#define ALTERNATIVE_TERNARY(oldinstr, feature, newinstr_yes, newinstr_no) \ + ALTERNATIVE_2 oldinstr, newinstr_no, X86_FEATURE_ALWAYS, \ + newinstr_yes, feature + #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_ALTERNATIVE_H */ diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h index 51e2bf27cc9b..8f80de627c60 100644 --- a/arch/x86/include/asm/asm-prototypes.h +++ b/arch/x86/include/asm/asm-prototypes.h @@ -17,20 +17,3 @@ extern void cmpxchg8b_emu(void); #endif -#ifdef CONFIG_RETPOLINE - -#define DECL_INDIRECT_THUNK(reg) \ - extern asmlinkage void __x86_indirect_thunk_ ## reg (void); - -#define DECL_RETPOLINE(reg) \ - extern asmlinkage void __x86_retpoline_ ## reg (void); - -#undef GEN -#define GEN(reg) DECL_INDIRECT_THUNK(reg) -#include - -#undef GEN -#define GEN(reg) DECL_RETPOLINE(reg) -#include - -#endif /* CONFIG_RETPOLINE */ diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index 0e327a01f50f..46a067bd7e0b 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h @@ -29,15 +29,13 @@ typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; struct compat_stat { - compat_dev_t st_dev; - u16 __pad1; + u32 st_dev; compat_ino_t st_ino; compat_mode_t st_mode; compat_nlink_t st_nlink; __compat_uid_t st_uid; __compat_gid_t st_gid; - compat_dev_t st_rdev; - u16 __pad2; + u32 st_rdev; u32 st_size; u32 st_blksize; u32 st_blocks; diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 59bf91c57aa8..f4cbc01c0bc4 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -8,6 +8,7 @@ #include #include +#include enum cpuid_leafs { @@ -49,7 +50,7 @@ extern const char * const x86_power_flags[32]; extern const char * const x86_bug_flags[NBUGINTS*32]; #define test_cpu_cap(c, bit) \ - test_bit(bit, (unsigned long *)((c)->x86_capability)) + arch_test_bit(bit, (unsigned long *)((c)->x86_capability)) /* * There are 32 bits/features in each mask word. The high bits @@ -172,39 +173,15 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); */ static __always_inline bool _static_cpu_has(u16 bit) { - asm_volatile_goto("1: jmp 6f\n" - "2:\n" - ".skip -(((5f-4f) - (2b-1b)) > 0) * " - "((5f-4f) - (2b-1b)),0x90\n" - "3:\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" /* src offset */ - " .long 4f - .\n" /* repl offset */ - " .word %P[always]\n" /* always replace */ - " .byte 3b - 1b\n" /* src len */ - " .byte 5f - 4f\n" /* repl len */ - " .byte 3b - 2b\n" /* pad len */ - ".previous\n" - ".section .altinstr_replacement,\"ax\"\n" - "4: jmp %l[t_no]\n" - "5:\n" - ".previous\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" /* src offset */ - " .long 0\n" /* no replacement */ - " .word %P[feature]\n" /* feature bit */ - " .byte 3b - 1b\n" /* src len */ - " .byte 0\n" /* repl len */ - " .byte 0\n" /* pad len */ - ".previous\n" - ".section .altinstr_aux,\"ax\"\n" - "6:\n" - " testb %[bitnum],%[cap_byte]\n" - " jnz %l[t_yes]\n" - " jmp %l[t_no]\n" - ".previous\n" + asm_volatile_goto( + ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]") + ".section .altinstr_aux,\"ax\"\n" + "6:\n" + " testb %[bitnum],%[cap_byte]\n" + " jnz %l[t_yes]\n" + " jmp %l[t_no]\n" + ".previous\n" : : [feature] "i" (bit), - [always] "i" (X86_FEATURE_ALWAYS), [bitnum] "i" (1 << (bit & 7)), [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) : : t_yes, t_no); diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 3b407f46f1a0..1fcda8263554 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -203,8 +203,8 @@ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ -#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ -#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */ +#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */ +#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ @@ -290,6 +290,17 @@ #define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */ #define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */ #define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */ +/* FREE! (11*32+ 8) */ +/* FREE! (11*32+ 9) */ +#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */ +#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */ +#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ +#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ +#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */ +#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */ +#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */ +#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */ +#define X86_FEATURE_MSR_TSX_CTRL (11*32+18) /* "" MSR IA32_TSX_CTRL (Intel) implemented */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ @@ -308,6 +319,7 @@ #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ +#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ @@ -417,5 +429,9 @@ #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ +#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ +#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */ +#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */ +#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 09db5b8f1444..16c24915210d 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -56,6 +56,25 @@ # define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) #endif +#ifdef CONFIG_RETPOLINE +# define DISABLE_RETPOLINE 0 +#else +# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \ + (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31))) +#endif + +#ifdef CONFIG_RETHUNK +# define DISABLE_RETHUNK 0 +#else +# define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31)) +#endif + +#ifdef CONFIG_CPU_UNRET_ENTRY +# define DISABLE_UNRET 0 +#else +# define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31)) +#endif + /* Force disable because it's broken beyond repair */ #define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31)) @@ -73,7 +92,7 @@ #define DISABLED_MASK8 0 #define DISABLED_MASK9 (DISABLE_SMAP) #define DISABLED_MASK10 0 -#define DISABLED_MASK11 0 +#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET) #define DISABLED_MASK12 0 #define DISABLED_MASK13 0 #define DISABLED_MASK14 0 diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index 0ed20e8bba9e..ae7192b75136 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -474,7 +474,7 @@ struct hv_enlightened_vmcs { u64 guest_rip; u32 hv_clean_fields; - u32 hv_padding_32; + u32 padding32_1; u32 hv_synthetic_controls; struct { u32 nested_flush_hypercall:1; @@ -482,7 +482,7 @@ struct hv_enlightened_vmcs { u32 reserved:30; } __packed hv_enlightenments_control; u32 hv_vp_id; - + u32 padding32_2; u64 hv_vm_id; u64 partition_assist_page; u64 padding64_4[4]; diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h index 4cf2ad521f65..b56c5741581a 100644 --- a/arch/x86/include/asm/inat.h +++ b/arch/x86/include/asm/inat.h @@ -6,7 +6,7 @@ * * Written by Masami Hiramatsu */ -#include +#include /* __ignore_sync_check__ */ /* * Internal bits. Don't use bitmasks directly, because these bits are diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h index c1438f9239e4..594ad2f57d23 100644 --- a/arch/x86/include/asm/insn-eval.h +++ b/arch/x86/include/asm/insn-eval.h @@ -26,7 +26,7 @@ int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE]); int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE]); -bool insn_decode(struct insn *insn, struct pt_regs *regs, - unsigned char buf[MAX_INSN_SIZE], int buf_size); +bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs, + unsigned char buf[MAX_INSN_SIZE], int buf_size); #endif /* _ASM_X86_INSN_EVAL_H */ diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index a8c3d284fa46..0da37756917a 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -8,7 +8,7 @@ */ /* insn_attr_t is defined in inat.h */ -#include +#include /* __ignore_sync_check__ */ struct insn_field { union { @@ -87,13 +87,25 @@ struct insn { #define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */ extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64); -extern void insn_get_prefixes(struct insn *insn); -extern void insn_get_opcode(struct insn *insn); -extern void insn_get_modrm(struct insn *insn); -extern void insn_get_sib(struct insn *insn); -extern void insn_get_displacement(struct insn *insn); -extern void insn_get_immediate(struct insn *insn); -extern void insn_get_length(struct insn *insn); +extern int insn_get_prefixes(struct insn *insn); +extern int insn_get_opcode(struct insn *insn); +extern int insn_get_modrm(struct insn *insn); +extern int insn_get_sib(struct insn *insn); +extern int insn_get_displacement(struct insn *insn); +extern int insn_get_immediate(struct insn *insn); +extern int insn_get_length(struct insn *insn); + +enum insn_mode { + INSN_MODE_32, + INSN_MODE_64, + /* Mode is determined by the current kernel build. */ + INSN_MODE_KERN, + INSN_NUM_MODES, +}; + +extern int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m); + +#define insn_decode_kernel(_insn, _ptr) insn_decode((_insn), (_ptr), MAX_INSN_SIZE, INSN_MODE_KERN) /* Attribute will be determined after getting ModRM (for opcode groups) */ static inline void insn_get_attribute(struct insn *insn) diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index bf1ed2ddc74b..7a983119bc40 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -17,8 +17,10 @@ arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) { u64 start = rmrr->base_address; u64 end = rmrr->end_address + 1; + int entry_type; - if (e820__mapped_all(start, end, E820_TYPE_RESERVED)) + entry_type = e820__get_entry_type(start, end); + if (entry_type == E820_TYPE_RESERVED || entry_type == E820_TYPE_NVS) return 0; pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n", diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index 6802c59e8252..8e80c2655ca9 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -191,6 +191,14 @@ extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages); #define arch_kexec_pre_free_pages arch_kexec_pre_free_pages +#ifdef CONFIG_KEXEC_FILE +struct purgatory_info; +int arch_kexec_apply_relocations_add(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); +#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add +#endif #endif typedef void crash_vmclear_fn(void); diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0eb41dce55da..660012ab7bfa 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -432,6 +432,7 @@ struct kvm_pmu { unsigned nr_arch_fixed_counters; unsigned available_event_types; u64 fixed_ctr_ctrl; + u64 fixed_ctr_ctrl_mask; u64 global_ctrl; u64 global_status; u64 global_ovf_ctrl; @@ -439,6 +440,7 @@ struct kvm_pmu { u64 global_ctrl_mask; u64 global_ovf_ctrl_mask; u64 reserved_bits; + u64 raw_event_mask; u8 version; struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; @@ -1117,7 +1119,8 @@ struct kvm_x86_ops { struct kvm_segment *var, int seg); void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); - int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); + bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0); + void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); @@ -1272,6 +1275,7 @@ struct kvm_x86_ops { int (*mem_enc_op)(struct kvm *kvm, void __user *argp); int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); + void (*guest_memory_reclaimed)(struct kvm *kvm); int (*get_msr_feature)(struct kvm_msr_entry *entry); @@ -1340,8 +1344,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) return -ENOTSUPP; } -int kvm_mmu_module_init(void); -void kvm_mmu_module_exit(void); +void __init kvm_mmu_x86_module_init(void); +int kvm_mmu_vendor_module_init(void); +void kvm_mmu_vendor_module_exit(void); void kvm_mmu_destroy(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu); diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index 365111789cc6..5000cf59bdf5 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h @@ -18,6 +18,28 @@ #define __ALIGN_STR __stringify(__ALIGN) #endif +#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) +#define RET jmp __x86_return_thunk +#else /* CONFIG_RETPOLINE */ +#ifdef CONFIG_SLS +#define RET ret; int3 +#else +#define RET ret +#endif +#endif /* CONFIG_RETPOLINE */ + +#else /* __ASSEMBLY__ */ + +#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) +#define ASM_RET "jmp __x86_return_thunk\n\t" +#else /* CONFIG_RETPOLINE */ +#ifdef CONFIG_SLS +#define ASM_RET "ret; int3\n\t" +#else +#define ASM_RET "ret\n\t" +#endif +#endif /* CONFIG_RETPOLINE */ + #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_LINKAGE_H */ diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 2b7cc5397f80..f73327397b89 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -9,6 +9,7 @@ struct ucode_patch { struct list_head plist; void *data; /* Intel uses only this one */ + unsigned int size; u32 patch_id; u16 equiv_cpu; }; @@ -133,11 +134,13 @@ extern void load_ucode_ap(void); void reload_early_microcode(void); extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); extern bool initrd_gone; +void microcode_bsp_resume(void); #else static inline int __init microcode_init(void) { return 0; }; static inline void __init load_ucode_bsp(void) { } static inline void load_ucode_ap(void) { } static inline void reload_early_microcode(void) { } +static inline void microcode_bsp_resume(void) { } static inline bool get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; } #endif diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 30f76b966857..871a8b06d430 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -247,13 +247,6 @@ bool hv_vcpu_is_preempted(int vcpu); static inline void hv_apic_init(void) {} #endif -static inline void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry, - struct msi_desc *msi_desc) -{ - msi_entry->address = msi_desc->msg.address_lo; - msi_entry->data = msi_desc->msg.data; -} - #else /* CONFIG_HYPERV */ static inline void hyperv_init(void) {} static inline void hyperv_setup_mmu_ops(void) {} diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 972a34d93505..5a8ee3b83af2 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -51,6 +51,8 @@ #define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */ #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ #define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ +#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */ +#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT) #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ #define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ @@ -91,6 +93,7 @@ #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a #define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */ #define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */ +#define ARCH_CAP_RSBA BIT(2) /* RET may use alternative branch predictors */ #define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */ #define ARCH_CAP_SSB_NO BIT(4) /* * Not susceptible to Speculative Store Bypass @@ -114,6 +117,41 @@ * Not susceptible to * TSX Async Abort (TAA) vulnerabilities. */ +#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /* + * Not susceptible to SBDR and SSDP + * variants of Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FBSDP_NO BIT(14) /* + * Not susceptible to FBSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_PSDP_NO BIT(15) /* + * Not susceptible to PSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FB_CLEAR BIT(17) /* + * VERW clears CPU fill buffer + * even on MDS_NO CPUs. + */ +#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /* + * MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS] + * bit available to control VERW + * behavior. + */ +#define ARCH_CAP_RRSBA BIT(19) /* + * Indicates RET may use predictors + * other than the RSB. With eIBRS + * enabled predictions in kernel mode + * are restricted to targets in + * kernel. + */ +#define ARCH_CAP_PBRSB_NO BIT(24) /* + * Not susceptible to Post-Barrier + * Return Stack Buffer Predictions. + */ #define MSR_IA32_FLUSH_CMD 0x0000010b #define L1D_FLUSH BIT(0) /* @@ -131,6 +169,7 @@ /* SRBDS support */ #define MSR_IA32_MCU_OPT_CTRL 0x00000123 #define RNGDS_MITG_DIS BIT(0) +#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 @@ -450,6 +489,11 @@ #define MSR_AMD64_CPUID_FN_1 0xc0011004 #define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_DC_CFG 0xc0011022 + +#define MSR_AMD64_DE_CFG 0xc0011029 +#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 +#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) + #define MSR_AMD64_BU_CFG2 0xc001102a #define MSR_AMD64_IBSFETCHCTL 0xc0011030 #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 @@ -482,6 +526,9 @@ /* Fam 17h MSRs */ #define MSR_F17H_IRPERF 0xc00000e9 +#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3 +#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1) + /* Fam 16h MSRs */ #define MSR_F16H_L2I_PERF_CTL 0xc0010230 #define MSR_F16H_L2I_PERF_CTR 0xc0010231 @@ -523,9 +570,6 @@ #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL #define FAM10H_MMIO_CONF_BASE_SHIFT 20 #define MSR_FAM10H_NODE_ID 0xc001100c -#define MSR_F10H_DECFG 0xc0011029 -#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1 -#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT) /* K8 MSRs */ #define MSR_K8_TOP_MEM1 0xc001001a diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 4d0f5386e637..f14cdf951249 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -5,12 +5,15 @@ #include #include +#include #include -#include #include #include #include +#include + +#define RETPOLINE_THUNK_SIZE 32 /* * Fill the CPU return stack buffer. @@ -32,32 +35,57 @@ #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ /* + * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN. + */ +#define __FILL_RETURN_SLOT \ + ANNOTATE_INTRA_FUNCTION_CALL; \ + call 772f; \ + int3; \ +772: + +/* + * Stuff the entire RSB. + * * Google experimented with loop-unrolling and this turned out to be * the optimal version — two calls, each with their own speculation * trap should their return address end up getting used, in a loop. */ -#define __FILL_RETURN_BUFFER(reg, nr, sp) \ - mov $(nr/2), reg; \ -771: \ - ANNOTATE_INTRA_FUNCTION_CALL; \ - call 772f; \ -773: /* speculation trap */ \ - UNWIND_HINT_EMPTY; \ - pause; \ - lfence; \ - jmp 773b; \ -772: \ - ANNOTATE_INTRA_FUNCTION_CALL; \ - call 774f; \ -775: /* speculation trap */ \ - UNWIND_HINT_EMPTY; \ - pause; \ - lfence; \ - jmp 775b; \ -774: \ - add $(BITS_PER_LONG/8) * 2, sp; \ - dec reg; \ - jnz 771b; +#ifdef CONFIG_X86_64 +#define __FILL_RETURN_BUFFER(reg, nr) \ + mov $(nr/2), reg; \ +771: \ + __FILL_RETURN_SLOT \ + __FILL_RETURN_SLOT \ + add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \ + dec reg; \ + jnz 771b; \ + /* barrier for jnz misprediction */ \ + lfence; +#else +/* + * i386 doesn't unconditionally have LFENCE, as such it can't + * do a loop. + */ +#define __FILL_RETURN_BUFFER(reg, nr) \ + .rept nr; \ + __FILL_RETURN_SLOT; \ + .endr; \ + add $(BITS_PER_LONG/8) * nr, %_ASM_SP; +#endif + +/* + * Stuff a single RSB slot. + * + * To mitigate Post-Barrier RSB speculation, one CALL instruction must be + * forced to retire before letting a RET instruction execute. + * + * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed + * before this point. + */ +#define __FILL_ONE_RETURN \ + __FILL_RETURN_SLOT \ + add $(BITS_PER_LONG/8), %_ASM_SP; \ + lfence; #ifdef __ASSEMBLY__ @@ -73,6 +101,23 @@ .popsection .endm +/* + * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions + * vs RETBleed validation. + */ +#define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE + +/* + * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should + * eventually turn into it's own annotation. + */ +.macro ANNOTATE_UNRET_END +#ifdef CONFIG_DEBUG_ENTRY + ANNOTATE_RETPOLINE_SAFE + nop +#endif +.endm + /* * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple * indirect jmp/call which may be susceptible to the Spectre variant 2 @@ -81,7 +126,7 @@ .macro JMP_NOSPEC reg:req #ifdef CONFIG_RETPOLINE ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \ - __stringify(jmp __x86_retpoline_\reg), X86_FEATURE_RETPOLINE, \ + __stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_LFENCE #else jmp *%\reg @@ -91,7 +136,7 @@ .macro CALL_NOSPEC reg:req #ifdef CONFIG_RETPOLINE ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \ - __stringify(call __x86_retpoline_\reg), X86_FEATURE_RETPOLINE, \ + __stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_LFENCE #else call *%\reg @@ -102,11 +147,37 @@ * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP * monstrosity above, manually. */ -.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req -#ifdef CONFIG_RETPOLINE - ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr - __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP) +.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS) + ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \ + __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \ + __stringify(__FILL_ONE_RETURN), \ftr2 + .Lskip_rsb_\@: +.endm + +#ifdef CONFIG_CPU_UNRET_ENTRY +#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret" +#else +#define CALL_ZEN_UNTRAIN_RET "" +#endif + +/* + * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the + * return thunk isn't mapped into the userspace tables (then again, AMD + * typically has NO_MELTDOWN). + * + * While zen_untrain_ret() doesn't clobber anything but requires stack, + * entry_ibpb() will clobber AX, CX, DX. + * + * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point + * where we have a stack but before any RET instruction. + */ +.macro UNTRAIN_RET +#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) + ANNOTATE_UNRET_END + ALTERNATIVE_2 "", \ + CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ + "call entry_ibpb", X86_FEATURE_ENTRY_IBPB #endif .endm @@ -118,7 +189,21 @@ _ASM_PTR " 999b\n\t" \ ".popsection\n\t" +extern void __x86_return_thunk(void); +extern void zen_untrain_ret(void); +extern void entry_ibpb(void); + #ifdef CONFIG_RETPOLINE + +typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE]; + +#define GEN(reg) \ + extern retpoline_thunk_t __x86_indirect_thunk_ ## reg; +#include +#undef GEN + +extern retpoline_thunk_t __x86_indirect_thunk_array[]; + #ifdef CONFIG_X86_64 /* @@ -129,7 +214,7 @@ ALTERNATIVE_2( \ ANNOTATE_RETPOLINE_SAFE \ "call *%[thunk_target]\n", \ - "call __x86_retpoline_%V[thunk_target]\n", \ + "call __x86_indirect_thunk_%V[thunk_target]\n", \ X86_FEATURE_RETPOLINE, \ "lfence;\n" \ ANNOTATE_RETPOLINE_SAFE \ @@ -181,6 +266,7 @@ enum spectre_v2_mitigation { SPECTRE_V2_EIBRS, SPECTRE_V2_EIBRS_RETPOLINE, SPECTRE_V2_EIBRS_LFENCE, + SPECTRE_V2_IBRS, }; /* The indirect branch speculation control variants */ @@ -223,6 +309,9 @@ static inline void indirect_branch_prediction_barrier(void) /* The Intel SPEC CTRL MSR base value cache */ extern u64 x86_spec_ctrl_base; +DECLARE_PER_CPU(u64, x86_spec_ctrl_current); +extern void update_spec_ctrl_cond(u64 val); +extern u64 spec_ctrl_current(void); /* * With retpoline, we must use IBRS to restrict branch prediction @@ -232,18 +321,18 @@ extern u64 x86_spec_ctrl_base; */ #define firmware_restrict_branch_speculation_start() \ do { \ - u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \ - \ preempt_disable(); \ - alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ + alternative_msr_write(MSR_IA32_SPEC_CTRL, \ + spec_ctrl_current() | SPEC_CTRL_IBRS, \ X86_FEATURE_USE_IBRS_FW); \ + alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \ + X86_FEATURE_USE_IBPB_FW); \ } while (0) #define firmware_restrict_branch_speculation_end() \ do { \ - u64 val = x86_spec_ctrl_base; \ - \ - alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ + alternative_msr_write(MSR_IA32_SPEC_CTRL, \ + spec_ctrl_current(), \ X86_FEATURE_USE_IBRS_FW); \ preempt_enable(); \ } while (0) @@ -255,6 +344,8 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); DECLARE_STATIC_KEY_FALSE(mds_user_clear); DECLARE_STATIC_KEY_FALSE(mds_idle_clear); +DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); + #include /** @@ -304,63 +395,4 @@ static inline void mds_idle_clear_cpu_buffers(void) #endif /* __ASSEMBLY__ */ -/* - * Below is used in the eBPF JIT compiler and emits the byte sequence - * for the following assembly: - * - * With retpolines configured: - * - * callq do_rop - * spec_trap: - * pause - * lfence - * jmp spec_trap - * do_rop: - * mov %rcx,(%rsp) for x86_64 - * mov %edx,(%esp) for x86_32 - * retq - * - * Without retpolines configured: - * - * jmp *%rcx for x86_64 - * jmp *%edx for x86_32 - */ -#ifdef CONFIG_RETPOLINE -# ifdef CONFIG_X86_64 -# define RETPOLINE_RCX_BPF_JIT_SIZE 17 -# define RETPOLINE_RCX_BPF_JIT() \ -do { \ - EMIT1_off32(0xE8, 7); /* callq do_rop */ \ - /* spec_trap: */ \ - EMIT2(0xF3, 0x90); /* pause */ \ - EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ - EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ - /* do_rop: */ \ - EMIT4(0x48, 0x89, 0x0C, 0x24); /* mov %rcx,(%rsp) */ \ - EMIT1(0xC3); /* retq */ \ -} while (0) -# else /* !CONFIG_X86_64 */ -# define RETPOLINE_EDX_BPF_JIT() \ -do { \ - EMIT1_off32(0xE8, 7); /* call do_rop */ \ - /* spec_trap: */ \ - EMIT2(0xF3, 0x90); /* pause */ \ - EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ - EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ - /* do_rop: */ \ - EMIT3(0x89, 0x14, 0x24); /* mov %edx,(%esp) */ \ - EMIT1(0xC3); /* ret */ \ -} while (0) -# endif -#else /* !CONFIG_RETPOLINE */ -# ifdef CONFIG_X86_64 -# define RETPOLINE_RCX_BPF_JIT_SIZE 2 -# define RETPOLINE_RCX_BPF_JIT() \ - EMIT2(0xFF, 0xE1); /* jmp *%rcx */ -# else /* !CONFIG_X86_64 */ -# define RETPOLINE_EDX_BPF_JIT() \ - EMIT2(0xFF, 0xE2) /* jmp *%edx */ -# endif -#endif - #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5647bcdba776..4a32b0d34376 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -630,7 +630,7 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu); "call " #func ";" \ PV_RESTORE_ALL_CALLER_REGS \ FRAME_END \ - "ret;" \ + ASM_RET \ ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \ ".popsection") diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h index 159622ee0674..1474cf96251d 100644 --- a/arch/x86/include/asm/qspinlock_paravirt.h +++ b/arch/x86/include/asm/qspinlock_paravirt.h @@ -48,7 +48,7 @@ asm (".pushsection .text;" "jne .slowpath;" "pop %rdx;" FRAME_END - "ret;" + ASM_RET ".slowpath: " "push %rsi;" "movzbl %al,%esi;" @@ -56,7 +56,7 @@ asm (".pushsection .text;" "pop %rsi;" "pop %rdx;" FRAME_END - "ret;" + ASM_RET ".size " PV_UNLOCK ", .-" PV_UNLOCK ";" ".popsection"); diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h index 8b58d6975d5d..ea1d8eb644cb 100644 --- a/arch/x86/include/asm/smap.h +++ b/arch/x86/include/asm/smap.h @@ -11,6 +11,7 @@ #include #include +#include /* "Raw" instruction opcodes */ #define __ASM_CLAC ".byte 0x0f,0x01,0xca" @@ -18,8 +19,6 @@ #ifdef __ASSEMBLY__ -#include - #ifdef CONFIG_X86_SMAP #define ASM_CLAC \ @@ -37,8 +36,6 @@ #else /* __ASSEMBLY__ */ -#include - #ifdef CONFIG_X86_SMAP static __always_inline void clac(void) diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h index cbb67b6030f9..491aadfac611 100644 --- a/arch/x86/include/asm/static_call.h +++ b/arch/x86/include/asm/static_call.h @@ -21,6 +21,16 @@ * relative displacement across sections. */ +/* + * The trampoline is 8 bytes and of the general form: + * + * jmp.d32 \func + * ud1 %esp, %ecx + * + * That trailing #UD provides both a speculation stop and serves as a unique + * 3 byte signature identifying static call trampolines. Also see tramp_ud[] + * and __static_call_fixup(). + */ #define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, insns) \ asm(".pushsection .static_call.text, \"ax\" \n" \ ".align 4 \n" \ @@ -34,8 +44,13 @@ #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \ __ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)") +#ifdef CONFIG_RETHUNK #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \ - __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; nop; nop; nop; nop") + __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "jmp __x86_return_thunk") +#else +#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \ + __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; int3; nop; nop; nop") +#endif #define ARCH_ADD_TRAMP_KEY(name) \ @@ -44,4 +59,6 @@ ".long " STATIC_CALL_KEY_STR(name) " - . \n" \ ".popsection \n") +extern bool __static_call_fixup(void *tramp, u8 op, void *dest); + #endif /* _ASM_STATIC_CALL_H */ diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h index fdbd9d7b7bca..3b97aa921543 100644 --- a/arch/x86/include/asm/suspend_32.h +++ b/arch/x86/include/asm/suspend_32.h @@ -21,7 +21,6 @@ struct saved_context { #endif unsigned long cr0, cr2, cr3, cr4; u64 misc_enable; - bool misc_enable_saved; struct saved_msrs saved_msrs; struct desc_ptr gdt_desc; struct desc_ptr idt; @@ -30,6 +29,7 @@ struct saved_context { unsigned long tr; unsigned long safety; unsigned long return_address; + bool misc_enable_saved; } __attribute__((packed)); /* routines for saving/restoring kernel state */ diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h index 35bb35d28733..54df06687d83 100644 --- a/arch/x86/include/asm/suspend_64.h +++ b/arch/x86/include/asm/suspend_64.h @@ -14,9 +14,13 @@ * Image of the saved processor state, used by the low level ACPI suspend to * RAM code and by the low level hibernation code. * - * If you modify it, fix arch/x86/kernel/acpi/wakeup_64.S and make sure that - * __save/__restore_processor_state(), defined in arch/x86/kernel/suspend_64.c, - * still work as required. + * If you modify it, check how it is used in arch/x86/kernel/acpi/wakeup_64.S + * and make sure that __save/__restore_processor_state(), defined in + * arch/x86/power/cpu.c, still work as required. + * + * Because the structure is packed, make sure to avoid unaligned members. For + * optimisation purposes but also because tools like kmemleak only search for + * pointers that are aligned. */ struct saved_context { struct pt_regs regs; @@ -36,7 +40,6 @@ struct saved_context { unsigned long cr0, cr2, cr3, cr4; u64 misc_enable; - bool misc_enable_saved; struct saved_msrs saved_msrs; unsigned long efer; u16 gdt_pad; /* Unused */ @@ -48,6 +51,7 @@ struct saved_context { unsigned long tr; unsigned long safety; unsigned long return_address; + bool misc_enable_saved; } __attribute__((packed)); #define loaddebug(thread,register) \ diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index e701f29b4881..012c8ee93b67 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -93,6 +93,7 @@ struct thread_info { #define TIF_NOTSC 16 /* TSC is not accessible in userland */ #define TIF_IA32 17 /* IA32 compatibility process */ #define TIF_SLD 18 /* Restore split lock detection on context switch */ +#define TIF_NOTIFY_SIGNAL 19 /* signal notifications exist */ #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */ #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ @@ -121,6 +122,7 @@ struct thread_info { #define _TIF_NOCPUID (1 << TIF_NOCPUID) #define _TIF_NOTSC (1 << TIF_NOTSC) #define _TIF_IA32 (1 << TIF_IA32) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_SLD (1 << TIF_SLD) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) diff --git a/arch/x86/include/asm/timex.h b/arch/x86/include/asm/timex.h index a4a8b1b16c0c..956e4145311b 100644 --- a/arch/x86/include/asm/timex.h +++ b/arch/x86/include/asm/timex.h @@ -5,6 +5,15 @@ #include #include +static inline unsigned long random_get_entropy(void) +{ + if (!IS_ENABLED(CONFIG_X86_TSC) && + !cpu_feature_enabled(X86_FEATURE_TSC)) + return random_get_entropy_fallback(); + return rdtsc(); +} +#define random_get_entropy random_get_entropy + /* Assume we use the PIT time source for the clock tick */ #define CLOCK_TICK_RATE PIT_TICK_RATE diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 01a300a9700b..fbdc3d951494 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -20,13 +20,12 @@ extern void disable_TSC(void); static inline cycles_t get_cycles(void) { -#ifndef CONFIG_X86_TSC - if (!boot_cpu_has(X86_FEATURE_TSC)) + if (!IS_ENABLED(CONFIG_X86_TSC) && + !cpu_feature_enabled(X86_FEATURE_TSC)) return 0; -#endif - return rdtsc(); } +#define get_cycles get_cycles extern struct system_counterval_t convert_art_to_tsc(u64 art); extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns); diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h index 664d4610d700..56664b31b6da 100644 --- a/arch/x86/include/asm/unwind_hints.h +++ b/arch/x86/include/asm/unwind_hints.h @@ -8,7 +8,11 @@ #ifdef __ASSEMBLY__ .macro UNWIND_HINT_EMPTY - UNWIND_HINT sp_reg=ORC_REG_UNDEFINED type=UNWIND_HINT_TYPE_CALL end=1 + UNWIND_HINT type=UNWIND_HINT_TYPE_CALL end=1 +.endm + +.macro UNWIND_HINT_ENTRY + UNWIND_HINT type=UNWIND_HINT_TYPE_ENTRY end=1 .endm .macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 partial=0 @@ -48,17 +52,16 @@ UNWIND_HINT_REGS base=\base offset=\offset partial=1 .endm -.macro UNWIND_HINT_FUNC sp_offset=8 - UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=\sp_offset type=UNWIND_HINT_TYPE_CALL +.macro UNWIND_HINT_FUNC + UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=8 type=UNWIND_HINT_TYPE_FUNC .endm -/* - * RET_OFFSET: Used on instructions that terminate a function; mostly RETURN - * and sibling calls. On these, sp_offset denotes the expected offset from - * initial_func_cfi. - */ -.macro UNWIND_HINT_RET_OFFSET sp_offset=8 - UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_RET_OFFSET sp_offset=\sp_offset +.macro UNWIND_HINT_SAVE + UNWIND_HINT type=UNWIND_HINT_TYPE_SAVE +.endm + +.macro UNWIND_HINT_RESTORE + UNWIND_HINT type=UNWIND_HINT_TYPE_RESTORE .endm #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index daf88f8143c5..cf69081073b5 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -60,7 +60,7 @@ save_registers: popl saved_context_eflags movl $ret_point, saved_eip - ret + RET restore_registers: @@ -70,7 +70,7 @@ restore_registers: movl saved_context_edi, %edi pushl saved_context_eflags popfl - ret + RET SYM_CODE_START(do_suspend_lowlevel) call save_processor_state @@ -86,7 +86,7 @@ SYM_CODE_START(do_suspend_lowlevel) ret_point: call restore_registers call restore_processor_state - ret + RET SYM_CODE_END(do_suspend_lowlevel) .data diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index c5f58615f448..92f0a974516d 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -28,6 +28,7 @@ #include #include #include +#include int __read_mostly alternatives_patched; @@ -268,6 +269,8 @@ static void __init_or_module add_nops(void *insns, unsigned int len) } } +extern s32 __retpoline_sites[], __retpoline_sites_end[]; +extern s32 __return_sites[], __return_sites_end[]; extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; extern s32 __smp_locks[], __smp_locks_end[]; void text_poke_early(void *addr, const void *opcode, size_t len); @@ -337,26 +340,70 @@ done: n_dspl, (unsigned long)orig_insn + n_dspl + repl_len); } +/* + * optimize_nops_range() - Optimize a sequence of single byte NOPs (0x90) + * + * @instr: instruction byte stream + * @instrlen: length of the above + * @off: offset within @instr where the first NOP has been detected + * + * Return: number of NOPs found (and replaced). + */ +static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off) +{ + unsigned long flags; + int i = off, nnops; + + while (i < instrlen) { + if (instr[i] != 0x90) + break; + + i++; + } + + nnops = i - off; + + if (nnops <= 1) + return nnops; + + local_irq_save(flags); + add_nops(instr + off, nnops); + local_irq_restore(flags); + + DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i); + + return nnops; +} + /* * "noinline" to cause control flow change and thus invalidate I$ and * cause refetch after modification. */ -static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) +static void __init_or_module noinline optimize_nops(u8 *instr, size_t len) { - unsigned long flags; - int i; + struct insn insn; + int i = 0; - for (i = 0; i < a->padlen; i++) { - if (instr[i] != 0x90) + /* + * Jump over the non-NOP insns and optimize single-byte NOPs into bigger + * ones. + */ + for (;;) { + if (insn_decode_kernel(&insn, &instr[i])) + return; + + /* + * See if this and any potentially following NOPs can be + * optimized. + */ + if (insn.length == 1 && insn.opcode.bytes[0] == 0x90) + i += optimize_nops_range(instr, len, i); + else + i += insn.length; + + if (i >= len) return; } - - local_irq_save(flags); - add_nops(instr + (a->instrlen - a->padlen), a->padlen); - local_irq_restore(flags); - - DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ", - instr, a->instrlen - a->padlen, a->padlen); } /* @@ -388,23 +435,29 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, */ for (a = start; a < end; a++) { int insn_buff_sz = 0; + /* Mask away "NOT" flag bit for feature to test. */ + u16 feature = a->cpuid & ~ALTINSTR_FLAG_INV; instr = (u8 *)&a->instr_offset + a->instr_offset; replacement = (u8 *)&a->repl_offset + a->repl_offset; BUG_ON(a->instrlen > sizeof(insn_buff)); - BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); - if (!boot_cpu_has(a->cpuid)) { - if (a->padlen > 1) - optimize_nops(a, instr); + BUG_ON(feature >= (NCAPINTS + NBUGINTS) * 32); - continue; - } + /* + * Patch if either: + * - feature is present + * - feature not present but ALTINSTR_FLAG_INV is set to mean, + * patch if feature is *NOT* present. + */ + if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV)) + goto next; - DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d", - a->cpuid >> 5, - a->cpuid & 0x1f, + DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)", + (a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "", + feature >> 5, + feature & 0x1f, instr, instr, a->instrlen, - replacement, a->replacementlen, a->padlen); + replacement, a->replacementlen); DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr); DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement); @@ -428,17 +481,262 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, if (a->replacementlen && is_jmp(replacement[0])) recompute_jump(a, instr, replacement, insn_buff); - if (a->instrlen > a->replacementlen) { - add_nops(insn_buff + a->replacementlen, - a->instrlen - a->replacementlen); - insn_buff_sz += a->instrlen - a->replacementlen; - } + for (; insn_buff_sz < a->instrlen; insn_buff_sz++) + insn_buff[insn_buff_sz] = 0x90; + DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr); text_poke_early(instr, insn_buff, insn_buff_sz); + +next: + optimize_nops(instr, a->instrlen); } } +#if defined(CONFIG_RETPOLINE) && defined(CONFIG_STACK_VALIDATION) + +/* + * CALL/JMP *%\reg + */ +static int emit_indirect(int op, int reg, u8 *bytes) +{ + int i = 0; + u8 modrm; + + switch (op) { + case CALL_INSN_OPCODE: + modrm = 0x10; /* Reg = 2; CALL r/m */ + break; + + case JMP32_INSN_OPCODE: + modrm = 0x20; /* Reg = 4; JMP r/m */ + break; + + default: + WARN_ON_ONCE(1); + return -1; + } + + if (reg >= 8) { + bytes[i++] = 0x41; /* REX.B prefix */ + reg -= 8; + } + + modrm |= 0xc0; /* Mod = 3 */ + modrm += reg; + + bytes[i++] = 0xff; /* opcode */ + bytes[i++] = modrm; + + return i; +} + +/* + * Rewrite the compiler generated retpoline thunk calls. + * + * For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate + * indirect instructions, avoiding the extra indirection. + * + * For example, convert: + * + * CALL __x86_indirect_thunk_\reg + * + * into: + * + * CALL *%\reg + * + * It also tries to inline spectre_v2=retpoline,amd when size permits. + */ +static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes) +{ + retpoline_thunk_t *target; + int reg, ret, i = 0; + u8 op, cc; + + target = addr + insn->length + insn->immediate.value; + reg = target - __x86_indirect_thunk_array; + + if (WARN_ON_ONCE(reg & ~0xf)) + return -1; + + /* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */ + BUG_ON(reg == 4); + + if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) && + !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) + return -1; + + op = insn->opcode.bytes[0]; + + /* + * Convert: + * + * Jcc.d32 __x86_indirect_thunk_\reg + * + * into: + * + * Jncc.d8 1f + * [ LFENCE ] + * JMP *%\reg + * [ NOP ] + * 1: + */ + /* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */ + if (op == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80) { + cc = insn->opcode.bytes[1] & 0xf; + cc ^= 1; /* invert condition */ + + bytes[i++] = 0x70 + cc; /* Jcc.d8 */ + bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */ + + /* Continue as if: JMP.d32 __x86_indirect_thunk_\reg */ + op = JMP32_INSN_OPCODE; + } + + /* + * For RETPOLINE_AMD: prepend the indirect CALL/JMP with an LFENCE. + */ + if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { + bytes[i++] = 0x0f; + bytes[i++] = 0xae; + bytes[i++] = 0xe8; /* LFENCE */ + } + + ret = emit_indirect(op, reg, bytes + i); + if (ret < 0) + return ret; + i += ret; + + for (; i < insn->length;) + bytes[i++] = 0x90; + + return i; +} + +/* + * Generated by 'objtool --retpoline'. + */ +void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) +{ + s32 *s; + + for (s = start; s < end; s++) { + void *addr = (void *)s + *s; + struct insn insn; + int len, ret; + u8 bytes[16]; + u8 op1, op2; + + ret = insn_decode_kernel(&insn, addr); + if (WARN_ON_ONCE(ret < 0)) + continue; + + op1 = insn.opcode.bytes[0]; + op2 = insn.opcode.bytes[1]; + + switch (op1) { + case CALL_INSN_OPCODE: + case JMP32_INSN_OPCODE: + break; + + case 0x0f: /* escape */ + if (op2 >= 0x80 && op2 <= 0x8f) + break; + fallthrough; + default: + WARN_ON_ONCE(1); + continue; + } + + DPRINTK("retpoline at: %pS (%px) len: %d to: %pS", + addr, addr, insn.length, + addr + insn.length + insn.immediate.value); + + len = patch_retpoline(addr, &insn, bytes); + if (len == insn.length) { + optimize_nops(bytes, len); + DUMP_BYTES(((u8*)addr), len, "%px: orig: ", addr); + DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr); + text_poke_early(addr, bytes, len); + } + } +} + +#ifdef CONFIG_RETHUNK +/* + * Rewrite the compiler generated return thunk tail-calls. + * + * For example, convert: + * + * JMP __x86_return_thunk + * + * into: + * + * RET + */ +static int patch_return(void *addr, struct insn *insn, u8 *bytes) +{ + int i = 0; + + if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) + return -1; + + bytes[i++] = RET_INSN_OPCODE; + + for (; i < insn->length;) + bytes[i++] = INT3_INSN_OPCODE; + + return i; +} + +void __init_or_module noinline apply_returns(s32 *start, s32 *end) +{ + s32 *s; + + for (s = start; s < end; s++) { + void *dest = NULL, *addr = (void *)s + *s; + struct insn insn; + int len, ret; + u8 bytes[16]; + u8 op; + + ret = insn_decode_kernel(&insn, addr); + if (WARN_ON_ONCE(ret < 0)) + continue; + + op = insn.opcode.bytes[0]; + if (op == JMP32_INSN_OPCODE) + dest = addr + insn.length + insn.immediate.value; + + if (__static_call_fixup(addr, op, dest) || + WARN_ONCE(dest != &__x86_return_thunk, + "missing return thunk: %pS-%pS: %*ph", + addr, dest, 5, addr)) + continue; + + DPRINTK("return thunk at: %pS (%px) len: %d to: %pS", + addr, addr, insn.length, + addr + insn.length + insn.immediate.value); + + len = patch_return(addr, &insn, bytes); + if (len == insn.length) { + DUMP_BYTES(((u8*)addr), len, "%px: orig: ", addr); + DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr); + text_poke_early(addr, bytes, len); + } + } +} +#else +void __init_or_module noinline apply_returns(s32 *start, s32 *end) { } +#endif /* CONFIG_RETHUNK */ + +#else /* !RETPOLINES || !CONFIG_STACK_VALIDATION */ + +void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { } +void __init_or_module noinline apply_returns(s32 *start, s32 *end) { } + +#endif /* CONFIG_RETPOLINE && CONFIG_STACK_VALIDATION */ + #ifdef CONFIG_SMP static void alternatives_smp_lock(const s32 *start, const s32 *end, u8 *text, u8 *text_end) @@ -710,6 +1008,13 @@ void __init alternative_instructions(void) * patching. */ + /* + * Rewrite the retpolines, must be done before alternatives since + * those can rewrite the retpoline thunks. + */ + apply_retpolines(__retpoline_sites, __retpoline_sites_end); + apply_returns(__return_sites, __return_sites_end); + apply_alternatives(__alt_instructions, __alt_instructions_end); #ifdef CONFIG_SMP @@ -996,10 +1301,13 @@ void text_poke_sync(void) } struct text_poke_loc { - s32 rel_addr; /* addr := _stext + rel_addr */ - s32 rel32; + /* addr := _stext + rel_addr */ + s32 rel_addr; + s32 disp; + u8 len; u8 opcode; const u8 text[POKE_MAX_OPCODE_SIZE]; + /* see text_poke_bp_batch() */ u8 old; }; @@ -1009,21 +1317,23 @@ struct bp_patching_desc { atomic_t refs; }; -static struct bp_patching_desc *bp_desc; +static struct bp_patching_desc bp_desc; static __always_inline -struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp) +struct bp_patching_desc *try_get_desc(void) { - struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */ + struct bp_patching_desc *desc = &bp_desc; - if (!desc || !arch_atomic_inc_not_zero(&desc->refs)) + if (!arch_atomic_inc_not_zero(&desc->refs)) return NULL; return desc; } -static __always_inline void put_desc(struct bp_patching_desc *desc) +static __always_inline void put_desc(void) { + struct bp_patching_desc *desc = &bp_desc; + smp_mb__before_atomic(); arch_atomic_dec(&desc->refs); } @@ -1048,7 +1358,7 @@ noinstr int poke_int3_handler(struct pt_regs *regs) { struct bp_patching_desc *desc; struct text_poke_loc *tp; - int len, ret = 0; + int ret = 0; void *ip; if (user_mode(regs)) @@ -1056,15 +1366,15 @@ noinstr int poke_int3_handler(struct pt_regs *regs) /* * Having observed our INT3 instruction, we now must observe - * bp_desc: + * bp_desc with non-zero refcount: * - * bp_desc = desc INT3 + * bp_desc.refs = 1 INT3 * WMB RMB - * write INT3 if (desc) + * write INT3 if (bp_desc.refs != 0) */ smp_rmb(); - desc = try_get_desc(&bp_desc); + desc = try_get_desc(); if (!desc) return 0; @@ -1088,8 +1398,7 @@ noinstr int poke_int3_handler(struct pt_regs *regs) goto out_put; } - len = text_opcode_size(tp->opcode); - ip += len; + ip += tp->len; switch (tp->opcode) { case INT3_INSN_OPCODE: @@ -1104,12 +1413,12 @@ noinstr int poke_int3_handler(struct pt_regs *regs) break; case CALL_INSN_OPCODE: - int3_emulate_call(regs, (long)ip + tp->rel32); + int3_emulate_call(regs, (long)ip + tp->disp); break; case JMP32_INSN_OPCODE: case JMP8_INSN_OPCODE: - int3_emulate_jmp(regs, (long)ip + tp->rel32); + int3_emulate_jmp(regs, (long)ip + tp->disp); break; default: @@ -1119,7 +1428,7 @@ noinstr int poke_int3_handler(struct pt_regs *regs) ret = 1; out_put: - put_desc(desc); + put_desc(); return ret; } @@ -1150,18 +1459,20 @@ static int tp_vec_nr; */ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries) { - struct bp_patching_desc desc = { - .vec = tp, - .nr_entries = nr_entries, - .refs = ATOMIC_INIT(1), - }; unsigned char int3 = INT3_INSN_OPCODE; unsigned int i; int do_sync; lockdep_assert_held(&text_mutex); - smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */ + bp_desc.vec = tp; + bp_desc.nr_entries = nr_entries; + + /* + * Corresponds to the implicit memory barrier in try_get_desc() to + * ensure reading a non-zero refcount provides up to date bp_desc data. + */ + atomic_set_release(&bp_desc.refs, 1); /* * Corresponding read barrier in int3 notifier for making sure the @@ -1184,7 +1495,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries */ for (do_sync = 0, i = 0; i < nr_entries; i++) { u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, }; - int len = text_opcode_size(tp[i].opcode); + int len = tp[i].len; if (len - INT3_INSN_SIZE > 0) { memcpy(old + INT3_INSN_SIZE, @@ -1249,32 +1560,46 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries text_poke_sync(); /* - * Remove and synchronize_rcu(), except we have a very primitive - * refcount based completion. + * Remove and wait for refs to be zero. */ - WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */ - if (!atomic_dec_and_test(&desc.refs)) - atomic_cond_read_acquire(&desc.refs, !VAL); + if (!atomic_dec_and_test(&bp_desc.refs)) + atomic_cond_read_acquire(&bp_desc.refs, !VAL); } static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, const void *opcode, size_t len, const void *emulate) { struct insn insn; + int ret, i; memcpy((void *)tp->text, opcode, len); if (!emulate) emulate = opcode; - kernel_insn_init(&insn, emulate, MAX_INSN_SIZE); - insn_get_length(&insn); - - BUG_ON(!insn_complete(&insn)); - BUG_ON(len != insn.length); + ret = insn_decode_kernel(&insn, emulate); + BUG_ON(ret < 0); tp->rel_addr = addr - (void *)_stext; + tp->len = len; tp->opcode = insn.opcode.bytes[0]; + switch (tp->opcode) { + case RET_INSN_OPCODE: + case JMP32_INSN_OPCODE: + case JMP8_INSN_OPCODE: + /* + * Control flow instructions without implied execution of the + * next instruction can be padded with INT3. + */ + for (i = insn.length; i < len; i++) + BUG_ON(tp->text[i] != INT3_INSN_OPCODE); + break; + + default: + BUG_ON(len != insn.length); + }; + + switch (tp->opcode) { case INT3_INSN_OPCODE: case RET_INSN_OPCODE: @@ -1283,7 +1608,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, case CALL_INSN_OPCODE: case JMP32_INSN_OPCODE: case JMP8_INSN_OPCODE: - tp->rel32 = insn.immediate.value; + tp->disp = insn.immediate.value; break; default: /* assume NOP */ @@ -1291,13 +1616,13 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, case 2: /* NOP2 -- emulate as JMP8+0 */ BUG_ON(memcmp(emulate, ideal_nops[len], len)); tp->opcode = JMP8_INSN_OPCODE; - tp->rel32 = 0; + tp->disp = 0; break; case 5: /* NOP5 -- emulate as JMP32+0 */ BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len)); tp->opcode = JMP32_INSN_OPCODE; - tp->rel32 = 0; + tp->disp = 0; break; default: /* unknown instruction */ diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 24539a05c58c..1c96f2425eaf 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -168,7 +168,7 @@ static __init int setup_apicpmtimer(char *s) { apic_calibrate_pmtmr = 1; notsc_setup(NULL); - return 0; + return 1; } __setup("apicpmtimer", setup_apicpmtimer); #endif diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 40f466de8924..9c283562dfd4 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -199,7 +199,13 @@ static void __init uv_tsc_check_sync(void) int mmr_shift; char *state; - /* Different returns from different UV BIOS versions */ + /* UV5 guarantees synced TSCs; do not zero TSC_ADJUST */ + if (!is_uv(UV2|UV3|UV4)) { + mark_tsc_async_resets("UV5+"); + return; + } + + /* UV2,3,4, UV BIOS TSC sync state available */ mmr = uv_early_read_mmr(UVH_TSC_SYNC_MMR); mmr_shift = is_uv2_hub() ? UVH_TSC_SYNC_SHIFT_UV2K : UVH_TSC_SYNC_SHIFT; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index acea05eed27d..ec3fa4dc9031 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -822,8 +822,6 @@ static void init_amd_gh(struct cpuinfo_x86 *c) set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); } -#define MSR_AMD64_DE_CFG 0xC0011029 - static void init_amd_ln(struct cpuinfo_x86 *c) { /* @@ -914,6 +912,28 @@ static void init_amd_bd(struct cpuinfo_x86 *c) clear_rdrand_cpuid_bit(c); } +void init_spectral_chicken(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_CPU_UNRET_ENTRY + u64 value; + + /* + * On Zen2 we offer this chicken (bit) on the altar of Speculation. + * + * This suppresses speculation from the middle of a basic block, i.e. it + * suppresses non-branch predictions. + * + * We use STIBP as a heuristic to filter out Zen2 from the rest of F17H + */ + if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_AMD_STIBP)) { + if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) { + value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT; + wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); + } + } +#endif +} + static void init_amd_zn(struct cpuinfo_x86 *c) { set_cpu_cap(c, X86_FEATURE_ZEN); @@ -922,12 +942,21 @@ static void init_amd_zn(struct cpuinfo_x86 *c) node_reclaim_distance = 32; #endif - /* - * Fix erratum 1076: CPB feature bit not being set in CPUID. - * Always set it, except when running under a hypervisor. - */ - if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB)) - set_cpu_cap(c, X86_FEATURE_CPB); + /* Fix up CPUID bits, but only if not virtualised. */ + if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { + + /* Erratum 1076: CPB feature bit not being set in CPUID. */ + if (!cpu_has(c, X86_FEATURE_CPB)) + set_cpu_cap(c, X86_FEATURE_CPB); + + /* + * Zen3 (Fam19 model < 0x10) parts are not susceptible to + * Branch Type Confusion, but predate the allocation of the + * BTC_NO bit. + */ + if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO)) + set_cpu_cap(c, X86_FEATURE_BTC_NO); + } } static void init_amd(struct cpuinfo_x86 *c) @@ -959,7 +988,8 @@ static void init_amd(struct cpuinfo_x86 *c) case 0x12: init_amd_ln(c); break; case 0x15: init_amd_bd(c); break; case 0x16: init_amd_jg(c); break; - case 0x17: fallthrough; + case 0x17: init_spectral_chicken(c); + fallthrough; case 0x19: init_amd_zn(c); break; } @@ -986,8 +1016,8 @@ static void init_amd(struct cpuinfo_x86 *c) * msr_set_bit() uses the safe accessors, too, even if the MSR * is not present. */ - msr_set_bit(MSR_F10H_DECFG, - MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); + msr_set_bit(MSR_AMD64_DE_CFG, + MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT); /* A serializing LFENCE stops RDTSC speculation */ set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 78b9514a3844..e2e22a5740a4 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -38,23 +38,58 @@ static void __init spectre_v1_select_mitigation(void); static void __init spectre_v2_select_mitigation(void); +static void __init retbleed_select_mitigation(void); +static void __init spectre_v2_user_select_mitigation(void); static void __init ssb_select_mitigation(void); static void __init l1tf_select_mitigation(void); static void __init mds_select_mitigation(void); -static void __init mds_print_mitigation(void); +static void __init md_clear_update_mitigation(void); +static void __init md_clear_select_mitigation(void); static void __init taa_select_mitigation(void); +static void __init mmio_select_mitigation(void); static void __init srbds_select_mitigation(void); -/* The base value of the SPEC_CTRL MSR that always has to be preserved. */ +/* The base value of the SPEC_CTRL MSR without task-specific bits set */ u64 x86_spec_ctrl_base; EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); + +/* The current value of the SPEC_CTRL MSR with task-specific bits set */ +DEFINE_PER_CPU(u64, x86_spec_ctrl_current); +EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); + static DEFINE_MUTEX(spec_ctrl_mutex); +/* Update SPEC_CTRL MSR and its cached copy unconditionally */ +static void update_spec_ctrl(u64 val) +{ + this_cpu_write(x86_spec_ctrl_current, val); + wrmsrl(MSR_IA32_SPEC_CTRL, val); +} + /* - * The vendor and possibly platform specific bits which can be modified in - * x86_spec_ctrl_base. + * Keep track of the SPEC_CTRL MSR value for the current task, which may differ + * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). */ -static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; +void update_spec_ctrl_cond(u64 val) +{ + if (this_cpu_read(x86_spec_ctrl_current) == val) + return; + + this_cpu_write(x86_spec_ctrl_current, val); + + /* + * When KERNEL_IBRS this MSR is written on return-to-user, unless + * forced the update can be delayed until that time. + */ + if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) + wrmsrl(MSR_IA32_SPEC_CTRL, val); +} + +u64 spec_ctrl_current(void) +{ + return this_cpu_read(x86_spec_ctrl_current); +} +EXPORT_SYMBOL_GPL(spec_ctrl_current); /* * AMD specific MSR info for Speculative Store Bypass control. @@ -77,6 +112,10 @@ EXPORT_SYMBOL_GPL(mds_user_clear); DEFINE_STATIC_KEY_FALSE(mds_idle_clear); EXPORT_SYMBOL_GPL(mds_idle_clear); +/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */ +DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); +EXPORT_SYMBOL_GPL(mmio_stale_data_clear); + void __init check_bugs(void) { identify_boot_cpu(); @@ -100,25 +139,26 @@ void __init check_bugs(void) if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); - /* Allow STIBP in MSR_SPEC_CTRL if supported */ - if (boot_cpu_has(X86_FEATURE_STIBP)) - x86_spec_ctrl_mask |= SPEC_CTRL_STIBP; - /* Select the proper CPU mitigations before patching alternatives: */ spectre_v1_select_mitigation(); spectre_v2_select_mitigation(); + /* + * retbleed_select_mitigation() relies on the state set by + * spectre_v2_select_mitigation(); specifically it wants to know about + * spectre_v2=ibrs. + */ + retbleed_select_mitigation(); + /* + * spectre_v2_user_select_mitigation() relies on the state set by + * retbleed_select_mitigation(); specifically the STIBP selection is + * forced for UNRET or IBPB. + */ + spectre_v2_user_select_mitigation(); ssb_select_mitigation(); l1tf_select_mitigation(); - mds_select_mitigation(); - taa_select_mitigation(); + md_clear_select_mitigation(); srbds_select_mitigation(); - /* - * As MDS and TAA mitigations are inter-related, print MDS - * mitigation until after TAA mitigation selection is done. - */ - mds_print_mitigation(); - arch_smt_update(); #ifdef CONFIG_X86_32 @@ -153,31 +193,17 @@ void __init check_bugs(void) #endif } +/* + * NOTE: For VMX, this function is not called in the vmexit path. + * It uses vmx_spec_ctrl_restore_host() instead. + */ void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) { - u64 msrval, guestval, hostval = x86_spec_ctrl_base; + u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current(); struct thread_info *ti = current_thread_info(); - /* Is MSR_SPEC_CTRL implemented ? */ if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { - /* - * Restrict guest_spec_ctrl to supported values. Clear the - * modifiable bits in the host base value and or the - * modifiable bits from the guest value. - */ - guestval = hostval & ~x86_spec_ctrl_mask; - guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; - - /* SSBD controlled in MSR_SPEC_CTRL */ - if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || - static_cpu_has(X86_FEATURE_AMD_SSBD)) - hostval |= ssbd_tif_to_spec_ctrl(ti->flags); - - /* Conditional STIBP enabled? */ - if (static_branch_unlikely(&switch_to_cond_stibp)) - hostval |= stibp_tif_to_spec_ctrl(ti->flags); - if (hostval != guestval) { msrval = setguest ? guestval : hostval; wrmsrl(MSR_IA32_SPEC_CTRL, msrval); @@ -258,14 +284,6 @@ static void __init mds_select_mitigation(void) } } -static void __init mds_print_mitigation(void) -{ - if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) - return; - - pr_info("%s\n", mds_strings[mds_mitigation]); -} - static int __init mds_cmdline(char *str) { if (!boot_cpu_has_bug(X86_BUG_MDS)) @@ -320,7 +338,7 @@ static void __init taa_select_mitigation(void) /* TSX previously disabled by tsx=off */ if (!boot_cpu_has(X86_FEATURE_RTM)) { taa_mitigation = TAA_MITIGATION_TSX_DISABLED; - goto out; + return; } if (cpu_mitigations_off()) { @@ -334,7 +352,7 @@ static void __init taa_select_mitigation(void) */ if (taa_mitigation == TAA_MITIGATION_OFF && mds_mitigation == MDS_MITIGATION_OFF) - goto out; + return; if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) taa_mitigation = TAA_MITIGATION_VERW; @@ -366,18 +384,6 @@ static void __init taa_select_mitigation(void) if (taa_nosmt || cpu_mitigations_auto_nosmt()) cpu_smt_disable(false); - - /* - * Update MDS mitigation, if necessary, as the mds_user_clear is - * now enabled for TAA mitigation. - */ - if (mds_mitigation == MDS_MITIGATION_OFF && - boot_cpu_has_bug(X86_BUG_MDS)) { - mds_mitigation = MDS_MITIGATION_FULL; - mds_select_mitigation(); - } -out: - pr_info("%s\n", taa_strings[taa_mitigation]); } static int __init tsx_async_abort_parse_cmdline(char *str) @@ -401,6 +407,154 @@ static int __init tsx_async_abort_parse_cmdline(char *str) } early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); +#undef pr_fmt +#define pr_fmt(fmt) "MMIO Stale Data: " fmt + +enum mmio_mitigations { + MMIO_MITIGATION_OFF, + MMIO_MITIGATION_UCODE_NEEDED, + MMIO_MITIGATION_VERW, +}; + +/* Default mitigation for Processor MMIO Stale Data vulnerabilities */ +static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW; +static bool mmio_nosmt __ro_after_init = false; + +static const char * const mmio_strings[] = { + [MMIO_MITIGATION_OFF] = "Vulnerable", + [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", + [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", +}; + +static void __init mmio_select_mitigation(void) +{ + u64 ia32_cap; + + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || + boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) || + cpu_mitigations_off()) { + mmio_mitigation = MMIO_MITIGATION_OFF; + return; + } + + if (mmio_mitigation == MMIO_MITIGATION_OFF) + return; + + ia32_cap = x86_read_arch_cap_msr(); + + /* + * Enable CPU buffer clear mitigation for host and VMM, if also affected + * by MDS or TAA. Otherwise, enable mitigation for VMM only. + */ + if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && + boot_cpu_has(X86_FEATURE_RTM))) + static_branch_enable(&mds_user_clear); + else + static_branch_enable(&mmio_stale_data_clear); + + /* + * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can + * be propagated to uncore buffers, clearing the Fill buffers on idle + * is required irrespective of SMT state. + */ + if (!(ia32_cap & ARCH_CAP_FBSDP_NO)) + static_branch_enable(&mds_idle_clear); + + /* + * Check if the system has the right microcode. + * + * CPU Fill buffer clear mitigation is enumerated by either an explicit + * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS + * affected systems. + */ + if ((ia32_cap & ARCH_CAP_FB_CLEAR) || + (boot_cpu_has(X86_FEATURE_MD_CLEAR) && + boot_cpu_has(X86_FEATURE_FLUSH_L1D) && + !(ia32_cap & ARCH_CAP_MDS_NO))) + mmio_mitigation = MMIO_MITIGATION_VERW; + else + mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; + + if (mmio_nosmt || cpu_mitigations_auto_nosmt()) + cpu_smt_disable(false); +} + +static int __init mmio_stale_data_parse_cmdline(char *str) +{ + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) + return 0; + + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) { + mmio_mitigation = MMIO_MITIGATION_OFF; + } else if (!strcmp(str, "full")) { + mmio_mitigation = MMIO_MITIGATION_VERW; + } else if (!strcmp(str, "full,nosmt")) { + mmio_mitigation = MMIO_MITIGATION_VERW; + mmio_nosmt = true; + } + + return 0; +} +early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); + +#undef pr_fmt +#define pr_fmt(fmt) "" fmt + +static void __init md_clear_update_mitigation(void) +{ + if (cpu_mitigations_off()) + return; + + if (!static_key_enabled(&mds_user_clear)) + goto out; + + /* + * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data + * mitigation, if necessary. + */ + if (mds_mitigation == MDS_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_MDS)) { + mds_mitigation = MDS_MITIGATION_FULL; + mds_select_mitigation(); + } + if (taa_mitigation == TAA_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_TAA)) { + taa_mitigation = TAA_MITIGATION_VERW; + taa_select_mitigation(); + } + if (mmio_mitigation == MMIO_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { + mmio_mitigation = MMIO_MITIGATION_VERW; + mmio_select_mitigation(); + } +out: + if (boot_cpu_has_bug(X86_BUG_MDS)) + pr_info("MDS: %s\n", mds_strings[mds_mitigation]); + if (boot_cpu_has_bug(X86_BUG_TAA)) + pr_info("TAA: %s\n", taa_strings[taa_mitigation]); + if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) + pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); + else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) + pr_info("MMIO Stale Data: Unknown: No mitigations\n"); +} + +static void __init md_clear_select_mitigation(void) +{ + mds_select_mitigation(); + taa_select_mitigation(); + mmio_select_mitigation(); + + /* + * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update + * and print their mitigation after MDS, TAA and MMIO Stale Data + * mitigation selection is done. + */ + md_clear_update_mitigation(); +} + #undef pr_fmt #define pr_fmt(fmt) "SRBDS: " fmt @@ -462,11 +616,13 @@ static void __init srbds_select_mitigation(void) return; /* - * Check to see if this is one of the MDS_NO systems supporting - * TSX that are only exposed to SRBDS when TSX is enabled. + * Check to see if this is one of the MDS_NO systems supporting TSX that + * are only exposed to SRBDS when TSX is enabled or when CPU is affected + * by Processor MMIO Stale Data vulnerability. */ ia32_cap = x86_read_arch_cap_msr(); - if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM)) + if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && + !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; @@ -582,12 +738,180 @@ static int __init nospectre_v1_cmdline(char *str) } early_param("nospectre_v1", nospectre_v1_cmdline); -#undef pr_fmt -#define pr_fmt(fmt) "Spectre V2 : " fmt - static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; +#undef pr_fmt +#define pr_fmt(fmt) "RETBleed: " fmt + +enum retbleed_mitigation { + RETBLEED_MITIGATION_NONE, + RETBLEED_MITIGATION_UNRET, + RETBLEED_MITIGATION_IBPB, + RETBLEED_MITIGATION_IBRS, + RETBLEED_MITIGATION_EIBRS, +}; + +enum retbleed_mitigation_cmd { + RETBLEED_CMD_OFF, + RETBLEED_CMD_AUTO, + RETBLEED_CMD_UNRET, + RETBLEED_CMD_IBPB, +}; + +const char * const retbleed_strings[] = { + [RETBLEED_MITIGATION_NONE] = "Vulnerable", + [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", + [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", + [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", + [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", +}; + +static enum retbleed_mitigation retbleed_mitigation __ro_after_init = + RETBLEED_MITIGATION_NONE; +static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init = + RETBLEED_CMD_AUTO; + +static int __ro_after_init retbleed_nosmt = false; + +static int __init retbleed_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + while (str) { + char *next = strchr(str, ','); + if (next) { + *next = 0; + next++; + } + + if (!strcmp(str, "off")) { + retbleed_cmd = RETBLEED_CMD_OFF; + } else if (!strcmp(str, "auto")) { + retbleed_cmd = RETBLEED_CMD_AUTO; + } else if (!strcmp(str, "unret")) { + retbleed_cmd = RETBLEED_CMD_UNRET; + } else if (!strcmp(str, "ibpb")) { + retbleed_cmd = RETBLEED_CMD_IBPB; + } else if (!strcmp(str, "nosmt")) { + retbleed_nosmt = true; + } else { + pr_err("Ignoring unknown retbleed option (%s).", str); + } + + str = next; + } + + return 0; +} +early_param("retbleed", retbleed_parse_cmdline); + +#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" +#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" + +static void __init retbleed_select_mitigation(void) +{ + bool mitigate_smt = false; + + if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) + return; + + switch (retbleed_cmd) { + case RETBLEED_CMD_OFF: + return; + + case RETBLEED_CMD_UNRET: + if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) { + retbleed_mitigation = RETBLEED_MITIGATION_UNRET; + } else { + pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n"); + goto do_cmd_auto; + } + break; + + case RETBLEED_CMD_IBPB: + if (!boot_cpu_has(X86_FEATURE_IBPB)) { + pr_err("WARNING: CPU does not support IBPB.\n"); + goto do_cmd_auto; + } else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { + retbleed_mitigation = RETBLEED_MITIGATION_IBPB; + } else { + pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); + goto do_cmd_auto; + } + break; + +do_cmd_auto: + case RETBLEED_CMD_AUTO: + default: + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) + retbleed_mitigation = RETBLEED_MITIGATION_UNRET; + else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB)) + retbleed_mitigation = RETBLEED_MITIGATION_IBPB; + } + + /* + * The Intel mitigation (IBRS or eIBRS) was already selected in + * spectre_v2_select_mitigation(). 'retbleed_mitigation' will + * be set accordingly below. + */ + + break; + } + + switch (retbleed_mitigation) { + case RETBLEED_MITIGATION_UNRET: + setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_UNRET); + + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + pr_err(RETBLEED_UNTRAIN_MSG); + + mitigate_smt = true; + break; + + case RETBLEED_MITIGATION_IBPB: + setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); + mitigate_smt = true; + break; + + default: + break; + } + + if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && + (retbleed_nosmt || cpu_mitigations_auto_nosmt())) + cpu_smt_disable(false); + + /* + * Let IBRS trump all on Intel without affecting the effects of the + * retbleed= cmdline option. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { + switch (spectre_v2_enabled) { + case SPECTRE_V2_IBRS: + retbleed_mitigation = RETBLEED_MITIGATION_IBRS; + break; + case SPECTRE_V2_EIBRS: + case SPECTRE_V2_EIBRS_RETPOLINE: + case SPECTRE_V2_EIBRS_LFENCE: + retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; + break; + default: + pr_err(RETBLEED_INTEL_MSG); + } + } + + pr_info("%s\n", retbleed_strings[retbleed_mitigation]); +} + +#undef pr_fmt +#define pr_fmt(fmt) "Spectre V2 : " fmt + static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = SPECTRE_V2_USER_NONE; static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = @@ -617,6 +941,7 @@ static inline const char *spectre_v2_module_string(void) { return ""; } #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" +#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" #ifdef CONFIG_BPF_SYSCALL void unpriv_ebpf_notify(int new_state) @@ -658,6 +983,7 @@ enum spectre_v2_mitigation_cmd { SPECTRE_V2_CMD_EIBRS, SPECTRE_V2_CMD_EIBRS_RETPOLINE, SPECTRE_V2_CMD_EIBRS_LFENCE, + SPECTRE_V2_CMD_IBRS, }; enum spectre_v2_user_cmd { @@ -698,13 +1024,15 @@ static void __init spec_v2_user_print_cond(const char *reason, bool secure) pr_info("spectre_v2_user=%s forced on command line.\n", reason); } +static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; + static enum spectre_v2_user_cmd __init -spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd) +spectre_v2_parse_user_cmdline(void) { char arg[20]; int ret, i; - switch (v2_cmd) { + switch (spectre_v2_cmd) { case SPECTRE_V2_CMD_NONE: return SPECTRE_V2_USER_CMD_NONE; case SPECTRE_V2_CMD_FORCE: @@ -730,15 +1058,16 @@ spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd) return SPECTRE_V2_USER_CMD_AUTO; } -static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode) +static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) { - return (mode == SPECTRE_V2_EIBRS || - mode == SPECTRE_V2_EIBRS_RETPOLINE || - mode == SPECTRE_V2_EIBRS_LFENCE); + return mode == SPECTRE_V2_IBRS || + mode == SPECTRE_V2_EIBRS || + mode == SPECTRE_V2_EIBRS_RETPOLINE || + mode == SPECTRE_V2_EIBRS_LFENCE; } static void __init -spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) +spectre_v2_user_select_mitigation(void) { enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; bool smt_possible = IS_ENABLED(CONFIG_SMP); @@ -751,7 +1080,7 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) cpu_smt_control == CPU_SMT_NOT_SUPPORTED) smt_possible = false; - cmd = spectre_v2_parse_user_cmdline(v2_cmd); + cmd = spectre_v2_parse_user_cmdline(); switch (cmd) { case SPECTRE_V2_USER_CMD_NONE: goto set_mode; @@ -799,12 +1128,12 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) } /* - * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not - * required. + * If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible, + * STIBP is not required. */ if (!boot_cpu_has(X86_FEATURE_STIBP) || !smt_possible || - spectre_v2_in_eibrs_mode(spectre_v2_enabled)) + spectre_v2_in_ibrs_mode(spectre_v2_enabled)) return; /* @@ -816,6 +1145,14 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) mode = SPECTRE_V2_USER_STRICT_PREFERRED; + if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || + retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { + if (mode != SPECTRE_V2_USER_STRICT && + mode != SPECTRE_V2_USER_STRICT_PREFERRED) + pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); + mode = SPECTRE_V2_USER_STRICT_PREFERRED; + } + spectre_v2_user_stibp = mode; set_mode: @@ -829,6 +1166,7 @@ static const char * const spectre_v2_strings[] = { [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS", [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE", [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines", + [SPECTRE_V2_IBRS] = "Mitigation: IBRS", }; static const struct { @@ -846,6 +1184,7 @@ static const struct { { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false }, { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, { "auto", SPECTRE_V2_CMD_AUTO, false }, + { "ibrs", SPECTRE_V2_CMD_IBRS, false }, }; static void __init spec_v2_print_cond(const char *reason, bool secure) @@ -908,6 +1247,30 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) return SPECTRE_V2_CMD_AUTO; } + if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) { + pr_err("%s selected but not compiled in. Switching to AUTO select\n", + mitigation_options[i].option); + return SPECTRE_V2_CMD_AUTO; + } + + if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { + pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", + mitigation_options[i].option); + return SPECTRE_V2_CMD_AUTO; + } + + if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { + pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n", + mitigation_options[i].option); + return SPECTRE_V2_CMD_AUTO; + } + + if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) { + pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", + mitigation_options[i].option); + return SPECTRE_V2_CMD_AUTO; + } + spec_v2_print_cond(mitigation_options[i].option, mitigation_options[i].secure); return cmd; @@ -923,6 +1286,69 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) return SPECTRE_V2_RETPOLINE; } +/* Disable in-kernel use of non-RSB RET predictors */ +static void __init spec_ctrl_disable_kernel_rrsba(void) +{ + u64 ia32_cap; + + if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) + return; + + ia32_cap = x86_read_arch_cap_msr(); + + if (ia32_cap & ARCH_CAP_RRSBA) { + x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; + update_spec_ctrl(x86_spec_ctrl_base); + } +} + +static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode) +{ + /* + * Similar to context switches, there are two types of RSB attacks + * after VM exit: + * + * 1) RSB underflow + * + * 2) Poisoned RSB entry + * + * When retpoline is enabled, both are mitigated by filling/clearing + * the RSB. + * + * When IBRS is enabled, while #1 would be mitigated by the IBRS branch + * prediction isolation protections, RSB still needs to be cleared + * because of #2. Note that SMEP provides no protection here, unlike + * user-space-poisoned RSB entries. + * + * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB + * bug is present then a LITE version of RSB protection is required, + * just a single call needs to retire before a RET is executed. + */ + switch (mode) { + case SPECTRE_V2_NONE: + return; + + case SPECTRE_V2_EIBRS_LFENCE: + case SPECTRE_V2_EIBRS: + if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { + setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); + pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); + } + return; + + case SPECTRE_V2_EIBRS_RETPOLINE: + case SPECTRE_V2_RETPOLINE: + case SPECTRE_V2_LFENCE: + case SPECTRE_V2_IBRS: + setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); + pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n"); + return; + } + + pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit"); + dump_stack(); +} + static void __init spectre_v2_select_mitigation(void) { enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); @@ -947,6 +1373,15 @@ static void __init spectre_v2_select_mitigation(void) break; } + if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) && + boot_cpu_has_bug(X86_BUG_RETBLEED) && + retbleed_cmd != RETBLEED_CMD_OFF && + boot_cpu_has(X86_FEATURE_IBRS) && + boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { + mode = SPECTRE_V2_IBRS; + break; + } + mode = spectre_v2_select_retpoline(); break; @@ -963,6 +1398,10 @@ static void __init spectre_v2_select_mitigation(void) mode = spectre_v2_select_retpoline(); break; + case SPECTRE_V2_CMD_IBRS: + mode = SPECTRE_V2_IBRS; + break; + case SPECTRE_V2_CMD_EIBRS: mode = SPECTRE_V2_EIBRS; break; @@ -979,10 +1418,9 @@ static void __init spectre_v2_select_mitigation(void) if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); - if (spectre_v2_in_eibrs_mode(mode)) { - /* Force it so VMEXIT will restore correctly */ + if (spectre_v2_in_ibrs_mode(mode)) { x86_spec_ctrl_base |= SPEC_CTRL_IBRS; - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + update_spec_ctrl(x86_spec_ctrl_base); } switch (mode) { @@ -990,6 +1428,12 @@ static void __init spectre_v2_select_mitigation(void) case SPECTRE_V2_EIBRS: break; + case SPECTRE_V2_IBRS: + setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); + if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) + pr_warn(SPECTRE_V2_IBRS_PERF_MSG); + break; + case SPECTRE_V2_LFENCE: case SPECTRE_V2_EIBRS_LFENCE: setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); @@ -1001,43 +1445,96 @@ static void __init spectre_v2_select_mitigation(void) break; } + /* + * Disable alternate RSB predictions in kernel when indirect CALLs and + * JMPs gets protection against BHI and Intramode-BTI, but RET + * prediction from a non-RSB predictor is still a risk. + */ + if (mode == SPECTRE_V2_EIBRS_LFENCE || + mode == SPECTRE_V2_EIBRS_RETPOLINE || + mode == SPECTRE_V2_RETPOLINE) + spec_ctrl_disable_kernel_rrsba(); + spectre_v2_enabled = mode; pr_info("%s\n", spectre_v2_strings[mode]); /* - * If spectre v2 protection has been enabled, unconditionally fill - * RSB during a context switch; this protects against two independent - * issues: + * If Spectre v2 protection has been enabled, fill the RSB during a + * context switch. In general there are two types of RSB attacks + * across context switches, for which the CALLs/RETs may be unbalanced. * - * - RSB underflow (and switch to BTB) on Skylake+ - * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs + * 1) RSB underflow + * + * Some Intel parts have "bottomless RSB". When the RSB is empty, + * speculated return targets may come from the branch predictor, + * which could have a user-poisoned BTB or BHB entry. + * + * AMD has it even worse: *all* returns are speculated from the BTB, + * regardless of the state of the RSB. + * + * When IBRS or eIBRS is enabled, the "user -> kernel" attack + * scenario is mitigated by the IBRS branch prediction isolation + * properties, so the RSB buffer filling wouldn't be necessary to + * protect against this type of attack. + * + * The "user -> user" attack scenario is mitigated by RSB filling. + * + * 2) Poisoned RSB entry + * + * If the 'next' in-kernel return stack is shorter than 'prev', + * 'next' could be tricked into speculating with a user-poisoned RSB + * entry. + * + * The "user -> kernel" attack scenario is mitigated by SMEP and + * eIBRS. + * + * The "user -> user" scenario, also known as SpectreBHB, requires + * RSB clearing. + * + * So to mitigate all cases, unconditionally fill RSB on context + * switches. + * + * FIXME: Is this pointless for retbleed-affected AMD? */ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); + spectre_v2_determine_rsb_fill_type_at_vmexit(mode); + /* - * Retpoline means the kernel is safe because it has no indirect - * branches. Enhanced IBRS protects firmware too, so, enable restricted - * speculation around firmware calls only when Enhanced IBRS isn't - * supported. + * Retpoline protects the kernel, but doesn't protect firmware. IBRS + * and Enhanced IBRS protect firmware too, so enable IBRS around + * firmware calls only when IBRS / Enhanced IBRS aren't otherwise + * enabled. * * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because * the user might select retpoline on the kernel command line and if * the CPU supports Enhanced IBRS, kernel might un-intentionally not * enable IBRS around firmware calls. */ - if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) { + if (boot_cpu_has_bug(X86_BUG_RETBLEED) && + boot_cpu_has(X86_FEATURE_IBPB) && + (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { + + if (retbleed_cmd != RETBLEED_CMD_IBPB) { + setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); + pr_info("Enabling Speculation Barrier for firmware calls\n"); + } + + } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); pr_info("Enabling Restricted Speculation for firmware calls\n"); } /* Set up IBPB and STIBP depending on the general spectre V2 command */ - spectre_v2_user_select_mitigation(cmd); + spectre_v2_cmd = cmd; } static void update_stibp_msr(void * __unused) { - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); + update_spec_ctrl(val); } /* Update x86_spec_ctrl_base in case SMT state changed. */ @@ -1072,6 +1569,8 @@ static void update_indir_branch_cond(void) /* Update the static key controlling the MDS CPU buffer clear in idle */ static void update_mds_branch_idle(void) { + u64 ia32_cap = x86_read_arch_cap_msr(); + /* * Enable the idle clearing if SMT is active on CPUs which are * affected only by MSBDS and not any other MDS variant. @@ -1083,14 +1582,17 @@ static void update_mds_branch_idle(void) if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) return; - if (sched_smt_active()) + if (sched_smt_active()) { static_branch_enable(&mds_idle_clear); - else + } else if (mmio_mitigation == MMIO_MITIGATION_OFF || + (ia32_cap & ARCH_CAP_FBSDP_NO)) { static_branch_disable(&mds_idle_clear); + } } #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" +#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" void cpu_bugs_smt_update(void) { @@ -1135,6 +1637,16 @@ void cpu_bugs_smt_update(void) break; } + switch (mmio_mitigation) { + case MMIO_MITIGATION_VERW: + case MMIO_MITIGATION_UCODE_NEEDED: + if (sched_smt_active()) + pr_warn_once(MMIO_MSG_SMT); + break; + case MMIO_MITIGATION_OFF: + break; + } + mutex_unlock(&spec_ctrl_mutex); } @@ -1238,16 +1750,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) break; } - /* - * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper - * bit in the mask to allow guests to use the mitigation even in the - * case where the host does not enable it. - */ - if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || - static_cpu_has(X86_FEATURE_AMD_SSBD)) { - x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; - } - /* * We have three CPU feature flags that are in play here: * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. @@ -1265,7 +1767,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) x86_amd_ssb_disable(); } else { x86_spec_ctrl_base |= SPEC_CTRL_SSBD; - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + update_spec_ctrl(x86_spec_ctrl_base); } } @@ -1483,7 +1985,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) void x86_spec_ctrl_setup_ap(void) { if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + update_spec_ctrl(x86_spec_ctrl_base); if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) x86_amd_ssb_disable(); @@ -1704,9 +2206,26 @@ static ssize_t tsx_async_abort_show_state(char *buf) sched_smt_active() ? "vulnerable" : "disabled"); } +static ssize_t mmio_stale_data_show_state(char *buf) +{ + if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) + return sysfs_emit(buf, "Unknown: No mitigations\n"); + + if (mmio_mitigation == MMIO_MITIGATION_OFF) + return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { + return sysfs_emit(buf, "%s; SMT Host state unknown\n", + mmio_strings[mmio_mitigation]); + } + + return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], + sched_smt_active() ? "vulnerable" : "disabled"); +} + static char *stibp_state(void) { - if (spectre_v2_in_eibrs_mode(spectre_v2_enabled)) + if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) return ""; switch (spectre_v2_user_stibp) { @@ -1736,6 +2255,19 @@ static char *ibpb_state(void) return ""; } +static char *pbrsb_eibrs_state(void) +{ + if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { + if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || + boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) + return ", PBRSB-eIBRS: SW sequence"; + else + return ", PBRSB-eIBRS: Vulnerable"; + } else { + return ", PBRSB-eIBRS: Not affected"; + } +} + static ssize_t spectre_v2_show_state(char *buf) { if (spectre_v2_enabled == SPECTRE_V2_LFENCE) @@ -1748,12 +2280,13 @@ static ssize_t spectre_v2_show_state(char *buf) spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); - return sprintf(buf, "%s%s%s%s%s%s\n", + return sprintf(buf, "%s%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], ibpb_state(), boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", stibp_state(), boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", + pbrsb_eibrs_state(), spectre_v2_module_string()); } @@ -1762,6 +2295,25 @@ static ssize_t srbds_show_state(char *buf) return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); } +static ssize_t retbleed_show_state(char *buf) +{ + if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || + retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return sprintf(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); + + return sprintf(buf, "%s; SMT %s\n", + retbleed_strings[retbleed_mitigation], + !sched_smt_active() ? "disabled" : + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? + "enabled with STIBP protection" : "vulnerable"); + } + + return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]); +} + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { @@ -1804,6 +2356,13 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_SRBDS: return srbds_show_state(buf); + case X86_BUG_MMIO_STALE_DATA: + case X86_BUG_MMIO_UNKNOWN: + return mmio_stale_data_show_state(buf); + + case X86_BUG_RETBLEED: + return retbleed_show_state(buf); + default: break; } @@ -1855,4 +2414,17 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char * { return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); } + +ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) +{ + if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN); + else + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); +} + +ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); +} #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 9c8fc6f513ed..56573241d029 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1024,6 +1024,8 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) #define NO_SWAPGS BIT(6) #define NO_ITLB_MULTIHIT BIT(7) #define NO_SPECTRE_V2 BIT(8) +#define NO_MMIO BIT(9) +#define NO_EIBRS_PBRSB BIT(10) #define VULNWL(vendor, family, model, whitelist) \ X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist) @@ -1044,6 +1046,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), /* Intel Family 6 */ + VULNWL_INTEL(TIGERLAKE, NO_MMIO), + VULNWL_INTEL(TIGERLAKE_L, NO_MMIO), + VULNWL_INTEL(ALDERLAKE, NO_MMIO), + VULNWL_INTEL(ALDERLAKE_L, NO_MMIO), + VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), @@ -1062,9 +1069,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), + VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), + VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB), /* * Technically, swapgs isn't serializing on AMD (despite it previously @@ -1074,42 +1081,80 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { * good enough for our purposes. */ - VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT), + VULNWL_INTEL(ATOM_TREMONT, NO_EIBRS_PBRSB), + VULNWL_INTEL(ATOM_TREMONT_L, NO_EIBRS_PBRSB), + VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB), /* AMD Family 0xf - 0x12 */ - VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), + VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), + VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), + VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ - VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), + VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), + VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), /* Zhaoxin Family 7 */ - VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS), - VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS), + VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO), + VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO), {} }; +#define VULNBL(vendor, family, model, blacklist) \ + X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist) + #define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \ X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \ INTEL_FAM6_##model, steppings, \ X86_FEATURE_ANY, issues) +#define VULNBL_AMD(family, blacklist) \ + VULNBL(AMD, family, X86_MODEL_ANY, blacklist) + +#define VULNBL_HYGON(family, blacklist) \ + VULNBL(HYGON, family, X86_MODEL_ANY, blacklist) + #define SRBDS BIT(0) +/* CPU is affected by X86_BUG_MMIO_STALE_DATA */ +#define MMIO BIT(1) +/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */ +#define MMIO_SBDS BIT(2) +/* CPU is affected by RETbleed, speculating where you would not expect it */ +#define RETBLEED BIT(3) static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), - VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS), - VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS), - VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xC), SRBDS), - VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xD), SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED), + VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), + VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), + VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), + VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), + VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS), + + VULNBL_AMD(0x15, RETBLEED), + VULNBL_AMD(0x16, RETBLEED), + VULNBL_AMD(0x17, RETBLEED), + VULNBL_HYGON(0x18, RETBLEED), {} }; @@ -1130,6 +1175,13 @@ u64 x86_read_arch_cap_msr(void) return ia32_cap; } +static bool arch_cap_mmio_immune(u64 ia32_cap) +{ + return (ia32_cap & ARCH_CAP_FBSDP_NO && + ia32_cap & ARCH_CAP_PSDP_NO && + ia32_cap & ARCH_CAP_SBDR_SSDP_NO); +} + static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) { u64 ia32_cap = x86_read_arch_cap_msr(); @@ -1183,12 +1235,43 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) /* * SRBDS affects CPUs which support RDRAND or RDSEED and are listed * in the vulnerability blacklist. + * + * Some of the implications and mitigation of Shared Buffers Data + * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as + * SRBDS. */ if ((cpu_has(c, X86_FEATURE_RDRAND) || cpu_has(c, X86_FEATURE_RDSEED)) && - cpu_matches(cpu_vuln_blacklist, SRBDS)) + cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS)) setup_force_cpu_bug(X86_BUG_SRBDS); + /* + * Processor MMIO Stale Data bug enumeration + * + * Affected CPU list is generally enough to enumerate the vulnerability, + * but for virtualization case check for ARCH_CAP MSR bits also, VMM may + * not want the guest to enumerate the bug. + * + * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist, + * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits. + */ + if (!arch_cap_mmio_immune(ia32_cap)) { + if (cpu_matches(cpu_vuln_blacklist, MMIO)) + setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); + else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO)) + setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN); + } + + if (!cpu_has(c, X86_FEATURE_BTC_NO)) { + if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA)) + setup_force_cpu_bug(X86_BUG_RETBLEED); + } + + if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) && + !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) && + !(ia32_cap & ARCH_CAP_PBRSB_NO)) + setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB); + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 093f5fc860e3..91df90abc1d9 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -60,6 +60,8 @@ extern void tsx_disable(void); static inline void tsx_init(void) { } #endif /* CONFIG_CPU_SUP_INTEL */ +extern void init_spectral_chicken(struct cpuinfo_x86 *c); + extern void get_cpu_cap(struct cpuinfo_x86 *c); extern void get_cpu_address_sizes(struct cpuinfo_x86 *c); extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c index 29a3bedabd06..d7541851288e 100644 --- a/arch/x86/kernel/cpu/feat_ctl.c +++ b/arch/x86/kernel/cpu/feat_ctl.c @@ -1,11 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include #include #include #include -#include "cpu.h" #undef pr_fmt #define pr_fmt(fmt) "x86/cpu: " fmt diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index b78c471ec344..205fa420ee7c 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -318,6 +318,12 @@ static void init_hygon(struct cpuinfo_x86 *c) /* get apicid instead of initial apic id from cpuid */ c->apicid = hard_smp_processor_id(); + /* + * XXX someone from Hygon needs to confirm this DTRT + * + init_spectral_chicken(c); + */ + set_cpu_cap(c, X86_FEATURE_ZEN); set_cpu_cap(c, X86_FEATURE_CPB); @@ -336,8 +342,8 @@ static void init_hygon(struct cpuinfo_x86 *c) * msr_set_bit() uses the safe accessors, too, even if the MSR * is not present. */ - msr_set_bit(MSR_F10H_DECFG, - MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); + msr_set_bit(MSR_AMD64_DE_CFG, + MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT); /* A serializing LFENCE stops RDTSC speculation */ set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 816fdbec795a..c6ad53e38f65 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -88,7 +88,7 @@ static bool ring3mwait_disabled __read_mostly; static int __init ring3mwait_disable(char *__unused) { ring3mwait_disabled = true; - return 0; + return 1; } __setup("ring3mwait=disable", ring3mwait_disable); diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index f73f1184b1c1..09f7c652346a 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -1457,10 +1457,23 @@ out_free: kfree(bank); } +static void __threshold_remove_device(struct threshold_bank **bp) +{ + unsigned int bank, numbanks = this_cpu_read(mce_num_banks); + + for (bank = 0; bank < numbanks; bank++) { + if (!bp[bank]) + continue; + + threshold_remove_bank(bp[bank]); + bp[bank] = NULL; + } + kfree(bp); +} + int mce_threshold_remove_device(unsigned int cpu) { struct threshold_bank **bp = this_cpu_read(threshold_banks); - unsigned int bank, numbanks = this_cpu_read(mce_num_banks); if (!bp) return 0; @@ -1471,13 +1484,7 @@ int mce_threshold_remove_device(unsigned int cpu) */ this_cpu_write(threshold_banks, NULL); - for (bank = 0; bank < numbanks; bank++) { - if (bp[bank]) { - threshold_remove_bank(bp[bank]); - bp[bank] = NULL; - } - } - kfree(bp); + __threshold_remove_device(bp); return 0; } @@ -1514,15 +1521,14 @@ int mce_threshold_create_device(unsigned int cpu) if (!(this_cpu_read(bank_map) & (1 << bank))) continue; err = threshold_create_bank(bp, cpu, bank); - if (err) - goto out_err; + if (err) { + __threshold_remove_device(bp); + return err; + } } this_cpu_write(threshold_banks, bp); if (thresholding_irq_en) mce_threshold_vector = amd_threshold_interrupt; return 0; -out_err: - mce_threshold_remove_device(cpu); - return err; } diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 3f6b137ef4e6..234a96f25248 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -441,7 +441,13 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p return ret; native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - if (rev >= mc->hdr.patch_id) + + /* + * Allow application of the same revision to pick up SMT-specific + * changes even if the revision of the other SMT thread is already + * up-to-date. + */ + if (rev > mc->hdr.patch_id) return ret; if (!__apply_microcode_amd(mc)) { @@ -523,8 +529,12 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax) native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - /* Check whether we have saved a new patch already: */ - if (*new_rev && rev < mc->hdr.patch_id) { + /* + * Check whether a new patch has been saved already. Also, allow application of + * the same revision in order to pick up SMT-thread-specific configuration even + * if the sibling SMT thread already has an up-to-date revision. + */ + if (*new_rev && rev <= mc->hdr.patch_id) { if (!__apply_microcode_amd(mc)) { *new_rev = mc->hdr.patch_id; return; @@ -783,6 +793,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, kfree(patch); return -EINVAL; } + patch->size = *patch_size; mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE); proc_id = mc_hdr->processor_rev_id; @@ -864,7 +875,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) return ret; memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); - memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE)); + memcpy(amd_ucode_patch, p->data, min_t(u32, p->size, PATCH_MAX_SIZE)); return ret; } diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index bbbd248fe913..0b1732b98e71 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -775,9 +775,9 @@ static struct subsys_interface mc_cpu_interface = { }; /** - * mc_bp_resume - Update boot CPU microcode during resume. + * microcode_bsp_resume - Update boot CPU microcode during resume. */ -static void mc_bp_resume(void) +void microcode_bsp_resume(void) { int cpu = smp_processor_id(); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; @@ -789,7 +789,7 @@ static void mc_bp_resume(void) } static struct syscore_ops mc_syscore_ops = { - .resume = mc_bp_resume, + .resume = microcode_bsp_resume, }; static int mc_cpu_starting(unsigned int cpu) diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 65d11711cd7b..021cd067733e 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -84,7 +84,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0) inc_irq_stat(hyperv_stimer0_count); if (hv_stimer0_handler) hv_stimer0_handler(); - add_interrupt_randomness(HYPERV_STIMER0_VECTOR, 0); + add_interrupt_randomness(HYPERV_STIMER0_VECTOR); ack_APIC_irq(); set_irq_regs(old_regs); diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c index 6a80f36b5d59..5f436cb4f7c4 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.c +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c @@ -794,8 +794,6 @@ void mtrr_ap_init(void) if (!use_intel() || mtrr_aps_delayed_init) return; - rcu_cpu_starting(smp_processor_id()); - /* * Ideally we should hold mtrr_mutex here to avoid mtrr entries * changed, but this routine will be called in cpu boot time, diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 0daf2f1cf7a8..465dce141bfc 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -416,6 +416,7 @@ static int pseudo_lock_fn(void *_rdtgrp) struct pseudo_lock_region *plr = rdtgrp->plr; u32 rmid_p, closid_p; unsigned long i; + u64 saved_msr; #ifdef CONFIG_KASAN /* * The registers used for local register variables are also used @@ -459,6 +460,7 @@ static int pseudo_lock_fn(void *_rdtgrp) * the buffer and evict pseudo-locked memory read earlier from the * cache. */ + saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL); __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); closid_p = this_cpu_read(pqr_state.cur_closid); rmid_p = this_cpu_read(pqr_state.cur_rmid); @@ -510,7 +512,7 @@ static int pseudo_lock_fn(void *_rdtgrp) __wrmsr(IA32_PQR_ASSOC, rmid_p, closid_p); /* Re-enable the hardware prefetcher(s) */ - wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0); + wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr); local_irq_enable(); plr->thread_done = 1; @@ -867,6 +869,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) static int measure_cycles_lat_fn(void *_plr) { struct pseudo_lock_region *plr = _plr; + u32 saved_low, saved_high; unsigned long i; u64 start, end; void *mem_r; @@ -875,6 +878,7 @@ static int measure_cycles_lat_fn(void *_plr) /* * Disable hardware prefetchers. */ + rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); mem_r = READ_ONCE(plr->kmem); /* @@ -891,7 +895,7 @@ static int measure_cycles_lat_fn(void *_plr) end = rdtsc_ordered(); trace_pseudo_lock_mem_latency((u32)(end - start)); } - wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0); + wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); local_irq_enable(); plr->thread_done = 1; wake_up_interruptible(&plr->lock_thread_wq); @@ -936,6 +940,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, u64 hits_before = 0, hits_after = 0, miss_before = 0, miss_after = 0; struct perf_event *miss_event, *hit_event; int hit_pmcnum, miss_pmcnum; + u32 saved_low, saved_high; unsigned int line_size; unsigned int size; unsigned long i; @@ -969,6 +974,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, /* * Disable hardware prefetchers. */ + rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); /* Initialize rest of local variables */ @@ -1027,7 +1033,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, */ rmb(); /* Re-enable hardware prefetchers */ - wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0); + wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); local_irq_enable(); out_hit: perf_event_release_kernel(hit_event); diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 866c9a9bcdee..82fe492121bb 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -26,6 +26,7 @@ struct cpuid_bit { static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, + { X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 }, { X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 }, { X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 }, { X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 }, diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index 91288da29599..37d48ab3d077 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -96,6 +96,8 @@ int detect_extended_topology(struct cpuinfo_x86 *c) unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width; unsigned int core_select_mask, core_level_siblings; unsigned int die_select_mask, die_level_siblings; + unsigned int pkg_mask_width; + bool die_level_present = false; int leaf; leaf = detect_extended_topology_leaf(c); @@ -110,10 +112,10 @@ int detect_extended_topology(struct cpuinfo_x86 *c) core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); die_level_siblings = LEVEL_MAX_SIBLINGS(ebx); - die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); + pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); sub_index = 1; - do { + while (true) { cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx); /* @@ -126,23 +128,33 @@ int detect_extended_topology(struct cpuinfo_x86 *c) die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); } if (LEAFB_SUBTYPE(ecx) == DIE_TYPE) { + die_level_present = true; die_level_siblings = LEVEL_MAX_SIBLINGS(ebx); die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); } - sub_index++; - } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE); + if (LEAFB_SUBTYPE(ecx) != INVALID_TYPE) + pkg_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); + else + break; - core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; + sub_index++; + } + + core_select_mask = (~(-1 << pkg_mask_width)) >> ht_mask_width; die_select_mask = (~(-1 << die_plus_mask_width)) >> core_plus_mask_width; c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width) & core_select_mask; - c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid, - core_plus_mask_width) & die_select_mask; + + if (die_level_present) { + c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid, + core_plus_mask_width) & die_select_mask; + } + c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, - die_plus_mask_width); + pkg_mask_width); /* * Reinit the apicid, now that we have extended initial_apicid. */ diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c index e2ad30e474f8..da06bbb5d68e 100644 --- a/arch/x86/kernel/cpu/tsx.c +++ b/arch/x86/kernel/cpu/tsx.c @@ -58,24 +58,6 @@ void tsx_enable(void) wrmsrl(MSR_IA32_TSX_CTRL, tsx); } -static bool __init tsx_ctrl_is_supported(void) -{ - u64 ia32_cap = x86_read_arch_cap_msr(); - - /* - * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this - * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES. - * - * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a - * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES - * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get - * MSR_IA32_TSX_CTRL support even after a microcode update. Thus, - * tsx= cmdline requests will do nothing on CPUs without - * MSR_IA32_TSX_CTRL support. - */ - return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR); -} - static enum tsx_ctrl_states x86_get_tsx_auto_mode(void) { if (boot_cpu_has_bug(X86_BUG_TAA)) @@ -89,9 +71,22 @@ void __init tsx_init(void) char arg[5] = {}; int ret; - if (!tsx_ctrl_is_supported()) + /* + * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this + * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES. + * + * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a + * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES + * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get + * MSR_IA32_TSX_CTRL support even after a microcode update. Thus, + * tsx= cmdline requests will do nothing on CPUs without + * MSR_IA32_TSX_CTRL support. + */ + if (!(x86_read_arch_cap_msr() & ARCH_CAP_TSX_CTRL_MSR)) return; + setup_force_cpu_cap(X86_FEATURE_MSR_TSX_CTRL); + ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg)); if (ret >= 0) { if (!strcmp(arg, "on")) { diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 7edbd5ee5ed4..d096b5a1dbeb 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -93,6 +93,7 @@ static int ftrace_verify_code(unsigned long ip, const char *old_code) /* Make sure it is what we expect it to be */ if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) { + ftrace_expected = old_code; WARN_ON(1); return -EINVAL; } @@ -308,7 +309,7 @@ union ftrace_op_code_union { } __attribute__((packed)); }; -#define RET_SIZE 1 +#define RET_SIZE (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS)) static unsigned long create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) @@ -321,12 +322,12 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) unsigned long offset; unsigned long npages; unsigned long size; - unsigned long retq; unsigned long *ptr; void *trampoline; void *ip; /* 48 8b 15 is movq (%rip), %rdx */ unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 }; + unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE }; union ftrace_op_code_union op_ptr; int ret; @@ -366,10 +367,10 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) ip = trampoline + size; /* The trampoline ends with ret(q) */ - retq = (unsigned long)ftrace_stub; - ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE); - if (WARN_ON(ret < 0)) - goto fail; + if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) + memcpy(ip, text_gen_insn(JMP32_INSN_OPCODE, ip, &__x86_return_thunk), JMP32_INSN_SIZE); + else + memcpy(ip, retq, sizeof(retq)); /* No need to test direct calls on created trampolines */ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S index e405fe1a8bf4..a0ed0e4a2c0c 100644 --- a/arch/x86/kernel/ftrace_32.S +++ b/arch/x86/kernel/ftrace_32.S @@ -19,7 +19,7 @@ #endif SYM_FUNC_START(__fentry__) - ret + RET SYM_FUNC_END(__fentry__) EXPORT_SYMBOL(__fentry__) @@ -84,7 +84,7 @@ ftrace_graph_call: /* This is weak to keep gas from relaxing the jumps */ SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK) - ret + RET SYM_CODE_END(ftrace_caller) SYM_CODE_START(ftrace_regs_caller) @@ -177,7 +177,7 @@ SYM_CODE_START(ftrace_graph_caller) popl %edx popl %ecx popl %eax - ret + RET SYM_CODE_END(ftrace_graph_caller) .globl return_to_handler diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index ac3d5f22fe64..5b2dabedcf66 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -132,7 +132,7 @@ #ifdef CONFIG_DYNAMIC_FTRACE SYM_FUNC_START(__fentry__) - retq + RET SYM_FUNC_END(__fentry__) EXPORT_SYMBOL(__fentry__) @@ -170,10 +170,10 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) /* * This is weak to keep gas from relaxing the jumps. - * It is also used to copy the retq for trampolines. */ SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK) - retq + UNWIND_HINT_FUNC + RET SYM_FUNC_END(ftrace_epilogue) SYM_FUNC_START(ftrace_regs_caller) @@ -265,7 +265,7 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) restore_mcount_regs 8 /* Restore flags */ popfq - UNWIND_HINT_RET_OFFSET + UNWIND_HINT_FUNC jmp ftrace_epilogue SYM_FUNC_END(ftrace_regs_caller) @@ -287,7 +287,7 @@ fgraph_trace: #endif SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL) - retq + RET trace: /* save_mcount_regs fills in first two parameters */ @@ -319,12 +319,12 @@ SYM_FUNC_START(ftrace_graph_caller) restore_mcount_regs - retq + RET SYM_FUNC_END(ftrace_graph_caller) SYM_CODE_START(return_to_handler) UNWIND_HINT_EMPTY - subq $24, %rsp + subq $16, %rsp /* Save the return values */ movq %rax, (%rsp) @@ -336,7 +336,19 @@ SYM_CODE_START(return_to_handler) movq %rax, %rdi movq 8(%rsp), %rdx movq (%rsp), %rax - addq $24, %rsp - JMP_NOSPEC rdi + + addq $16, %rsp + /* + * Jump back to the old return address. This cannot be JMP_NOSPEC rdi + * since IBT would demand that contain ENDBR, which simply isn't so for + * return addresses. Use a retpoline here to keep the RSB balanced. + */ + ANNOTATE_INTRA_FUNCTION_CALL + call .Ldo_rop + int3 +.Ldo_rop: + mov %rdi, (%rsp) + UNWIND_HINT_FUNC + RET SYM_CODE_END(return_to_handler) #endif diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 05e117137b45..efe13ab366f4 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -419,6 +419,8 @@ static void __init clear_bss(void) { memset(__bss_start, 0, (unsigned long) __bss_stop - (unsigned long) __bss_start); + memset(__brk_base, 0, + (unsigned long) __brk_limit - (unsigned long) __brk_base); } static unsigned long get_cmd_line_ptr(void) diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 7ed84c282233..3f1691b89231 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -354,7 +355,7 @@ setup_once: #endif andl $0,setup_once_ref /* Once is enough, thanks */ - ret + RET SYM_FUNC_START(early_idt_handler_array) # 36(%esp) %eflags diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 3c417734790f..0424c2a6c15b 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -321,6 +321,8 @@ SYM_CODE_END(start_cpu0) SYM_CODE_START_NOALIGN(vc_boot_ghcb) UNWIND_HINT_IRET_REGS offset=8 + ANNOTATE_UNRET_END + /* Build pt_regs */ PUSH_AND_CLEAR_REGS @@ -378,6 +380,7 @@ SYM_CODE_START(early_idt_handler_array) SYM_CODE_END(early_idt_handler_array) SYM_CODE_START_LOCAL(early_idt_handler_common) + ANNOTATE_UNRET_END /* * The stack is the hardware frame, an error code or zero, and the * vector number. @@ -424,6 +427,8 @@ SYM_CODE_END(early_idt_handler_common) SYM_CODE_START_NOALIGN(vc_no_ghcb) UNWIND_HINT_IRET_REGS offset=8 + ANNOTATE_UNRET_END + /* Build pt_regs */ PUSH_AND_CLEAR_REGS diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 4ab7a9757e52..574df24a8e5a 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -1325,8 +1325,12 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) hpet_rtc_timer_reinit(); memset(&curr_time, 0, sizeof(struct rtc_time)); - if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) - mc146818_get_time(&curr_time); + if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) { + if (unlikely(mc146818_get_time(&curr_time) < 0)) { + pr_err_ratelimited("unable to read current time from RTC\n"); + return IRQ_HANDLED; + } + } if (hpet_rtc_flags & RTC_UIE && curr_time.tm_sec != hpet_prev_update_sec) { diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c index 7dfb1e808928..bd218470d145 100644 --- a/arch/x86/kernel/ima_arch.c +++ b/arch/x86/kernel/ima_arch.c @@ -88,6 +88,8 @@ const char * const *arch_get_ima_policy(void) if (IS_ENABLED(CONFIG_IMA_ARCH_POLICY) && arch_ima_get_secureboot()) { if (IS_ENABLED(CONFIG_MODULE_SIG)) set_module_sig_enforced(); + if (IS_ENABLED(CONFIG_KEXEC_SIG)) + set_kexec_sig_enforced(); return sb_arch_rules; } return NULL; diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S index 0db0375235b4..a9c36400bdf8 100644 --- a/arch/x86/kernel/irqflags.S +++ b/arch/x86/kernel/irqflags.S @@ -10,7 +10,7 @@ SYM_FUNC_START(native_save_fl) pushf pop %_ASM_AX - ret + RET SYM_FUNC_END(native_save_fl) EXPORT_SYMBOL(native_save_fl) @@ -21,6 +21,6 @@ EXPORT_SYMBOL(native_save_fl) SYM_FUNC_START(native_restore_fl) push %_ASM_ARG1 popf - ret + RET SYM_FUNC_END(native_restore_fl) EXPORT_SYMBOL(native_restore_fl) diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 535da74c124e..ee85f1b258d0 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -768,7 +768,7 @@ asm( RESTORE_REGS_STRING " popfl\n" #endif - " ret\n" + ASM_RET ".size kretprobe_trampoline, .-kretprobe_trampoline\n" ); NOKPROBE_SYMBOL(kretprobe_trampoline); diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 18e952fed021..fe9babe94861 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -66,6 +66,7 @@ static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __align DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible; static int has_steal_clock = 0; +static int has_guest_poll = 0; /* * No need for any "IO delay" on KVM */ @@ -187,7 +188,7 @@ void kvm_async_pf_task_wake(u32 token) { u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; - struct kvm_task_sleep_node *n; + struct kvm_task_sleep_node *n, *dummy = NULL; if (token == ~0) { apf_task_wake_all(); @@ -199,28 +200,41 @@ again: n = _find_apf_task(b, token); if (!n) { /* - * async PF was not yet handled. - * Add dummy entry for the token. + * Async #PF not yet handled, add a dummy entry for the token. + * Allocating the token must be down outside of the raw lock + * as the allocator is preemptible on PREEMPT_RT kernels. */ - n = kzalloc(sizeof(*n), GFP_ATOMIC); - if (!n) { - /* - * Allocation failed! Busy wait while other cpu - * handles async PF. - */ + if (!dummy) { raw_spin_unlock(&b->lock); - cpu_relax(); + dummy = kzalloc(sizeof(*dummy), GFP_ATOMIC); + + /* + * Continue looping on allocation failure, eventually + * the async #PF will be handled and allocating a new + * node will be unnecessary. + */ + if (!dummy) + cpu_relax(); + + /* + * Recheck for async #PF completion before enqueueing + * the dummy token to avoid duplicate list entries. + */ goto again; } - n->token = token; - n->cpu = smp_processor_id(); - init_swait_queue_head(&n->wq); - hlist_add_head(&n->link, &b->list); + dummy->token = token; + dummy->cpu = smp_processor_id(); + init_swait_queue_head(&dummy->wq); + hlist_add_head(&dummy->link, &b->list); + dummy = NULL; } else { apf_task_wake_one(n); } raw_spin_unlock(&b->lock); - return; + + /* A dummy token might be allocated and ultimately not used. */ + if (dummy) + kfree(dummy); } EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); @@ -624,14 +638,26 @@ static int kvm_cpu_down_prepare(unsigned int cpu) static int kvm_suspend(void) { + u64 val = 0; + kvm_guest_cpu_offline(false); +#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL + if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) + rdmsrl(MSR_KVM_POLL_CONTROL, val); + has_guest_poll = !(val & 1); +#endif return 0; } static void kvm_resume(void) { kvm_cpu_online(raw_smp_processor_id()); + +#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL + if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll) + wrmsrl(MSR_KVM_POLL_CONTROL, 0); +#endif } static struct syscore_ops kvm_syscore_ops = { @@ -927,7 +953,7 @@ asm( "movq __per_cpu_offset(,%rdi,8), %rax;" "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);" "setne %al;" -"ret;" +ASM_RET ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;" ".popsection"); diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index c98c220d39a9..eeb6992f3307 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -255,7 +255,8 @@ int module_finalize(const Elf_Ehdr *hdr, struct module *me) { const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL, - *para = NULL, *orc = NULL, *orc_ip = NULL; + *para = NULL, *orc = NULL, *orc_ip = NULL, + *retpolines = NULL, *returns = NULL; char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { @@ -271,8 +272,20 @@ int module_finalize(const Elf_Ehdr *hdr, orc = s; if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name)) orc_ip = s; + if (!strcmp(".retpoline_sites", secstrings + s->sh_name)) + retpolines = s; + if (!strcmp(".return_sites", secstrings + s->sh_name)) + returns = s; } + if (retpolines) { + void *rseg = (void *)retpolines->sh_addr; + apply_retpolines(rseg, rseg + retpolines->sh_size); + } + if (returns) { + void *rseg = (void *)returns->sh_addr; + apply_returns(rseg, rseg + returns->sh_size); + } if (alt) { /* patch .altinstructions */ void *aseg = (void *)alt->sh_addr; diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 5e5fcf5c376d..e21937680d1f 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -40,7 +40,7 @@ extern void _paravirt_nop(void); asm (".pushsection .entry.text, \"ax\"\n" ".global _paravirt_nop\n" "_paravirt_nop:\n\t" - "ret\n\t" + ASM_RET ".size _paravirt_nop, . - _paravirt_nop\n\t" ".type _paravirt_nop, @function\n\t" ".popsection"); diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c index 6b07faaa1579..23154d24b117 100644 --- a/arch/x86/kernel/pmem.c +++ b/arch/x86/kernel/pmem.c @@ -27,6 +27,11 @@ static __init int register_e820_pmem(void) * simply here to trigger the module to load on demand. */ pdev = platform_device_alloc("e820_pmem", -1); - return platform_device_add(pdev); + + rc = platform_device_add(pdev); + if (rc) + platform_device_put(pdev); + + return rc; } device_initcall(register_e820_pmem); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 0aa1baf9a3af..5e17c3939dd1 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -178,6 +178,23 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, task_user_gs(p) = get_user_gs(current_pt_regs()); #endif + if (unlikely(p->flags & PF_IO_WORKER)) { + /* + * An IO thread is a user space thread, but it doesn't + * return to ret_after_fork(). + * + * In order to indicate that to tools like gdb, + * we reset the stack and instruction pointers. + * + * It does the same kernel frame setup to return to a kernel + * function that a kernel thread does. + */ + childregs->sp = 0; + childregs->ip = 0; + kthread_frame_init(frame, sp, arg); + return 0; + } + /* Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) ret = set_new_tls(p, tls); @@ -556,7 +573,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, } if (updmsr) - wrmsrl(MSR_IA32_SPEC_CTRL, msr); + update_spec_ctrl_cond(msr); } static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) @@ -777,6 +794,10 @@ static void amd_e400_idle(void) */ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) { + /* User has disallowed the use of MWAIT. Fallback to HALT */ + if (boot_option_idle_override == IDLE_NOMWAIT) + return 0; + if (c->x86_vendor != X86_VENDOR_INTEL) return 0; @@ -885,9 +906,8 @@ static int __init idle_setup(char *str) } else if (!strcmp(str, "nomwait")) { /* * If the boot option of "idle=nomwait" is added, - * it means that mwait will be disabled for CPU C2/C3 - * states. In such case it won't touch the variable - * of boot_option_idle_override. + * it means that mwait will be disabled for CPU C1/C2/C3 + * states. */ boot_option_idle_override = IDLE_NOMWAIT; } else diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index 94b33885f8d2..82a839885a3c 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S @@ -7,10 +7,12 @@ #include #include #include +#include #include /* - * Must be relocatable PIC code callable as a C function + * Must be relocatable PIC code callable as a C function, in particular + * there must be a plain RET and not jump to return thunk. */ #define PTR(x) (x << 2) @@ -91,7 +93,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel) movl %edi, %eax addl $(identity_mapped - relocate_kernel), %eax pushl %eax + ANNOTATE_UNRET_SAFE ret + int3 SYM_CODE_END(relocate_kernel) SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) @@ -159,12 +163,15 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) xorl %edx, %edx xorl %esi, %esi xorl %ebp, %ebp + ANNOTATE_UNRET_SAFE ret + int3 1: popl %edx movl CP_PA_SWAP_PAGE(%edi), %esp addl $PAGE_SIZE, %esp 2: + ANNOTATE_RETPOLINE_SAFE call *%edx /* get the re-entry point of the peer system */ @@ -190,7 +197,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) movl %edi, %eax addl $(virtual_mapped - relocate_kernel), %eax pushl %eax + ANNOTATE_UNRET_SAFE ret + int3 SYM_CODE_END(identity_mapped) SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) @@ -208,7 +217,9 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) popl %edi popl %esi popl %ebx + ANNOTATE_UNRET_SAFE ret + int3 SYM_CODE_END(virtual_mapped) /* Do the copies */ @@ -271,7 +282,9 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) popl %edi popl %ebx popl %ebp + ANNOTATE_UNRET_SAFE ret + int3 SYM_CODE_END(swap_pages) .globl kexec_control_code_size diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index a4d9a261425b..d2b7d212caa4 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -13,7 +13,8 @@ #include /* - * Must be relocatable PIC code callable as a C function + * Must be relocatable PIC code callable as a C function, in particular + * there must be a plain RET and not jump to return thunk. */ #define PTR(x) (x << 3) @@ -104,7 +105,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel) /* jump to identity mapped page */ addq $(identity_mapped - relocate_kernel), %r8 pushq %r8 + ANNOTATE_UNRET_SAFE ret + int3 SYM_CODE_END(relocate_kernel) SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) @@ -191,7 +194,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) xorl %r14d, %r14d xorl %r15d, %r15d + ANNOTATE_UNRET_SAFE ret + int3 1: popq %rdx @@ -210,7 +215,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) call swap_pages movq $virtual_mapped, %rax pushq %rax + ANNOTATE_UNRET_SAFE ret + int3 SYM_CODE_END(identity_mapped) SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) @@ -231,7 +238,9 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) popq %r12 popq %rbp popq %rbx + ANNOTATE_UNRET_SAFE ret + int3 SYM_CODE_END(virtual_mapped) /* Do the copies */ @@ -288,7 +297,9 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) lea PAGE_SIZE(%rax), %rsi jmp 0b 3: + ANNOTATE_UNRET_SAFE ret + int3 SYM_CODE_END(swap_pages) .globl kexec_control_code_size diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c index c222fab112cb..f441002c2327 100644 --- a/arch/x86/kernel/sev-es.c +++ b/arch/x86/kernel/sev-es.c @@ -236,7 +236,7 @@ static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) return ES_EXCEPTION; } - if (!insn_decode(&ctxt->insn, ctxt->regs, buffer, res)) + if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, res)) return ES_DECODE_FAILED; } else { res = vc_fetch_insn_kernel(ctxt, buffer); diff --git a/arch/x86/kernel/sev_verify_cbit.S b/arch/x86/kernel/sev_verify_cbit.S index ee04941a6546..3355e27c69eb 100644 --- a/arch/x86/kernel/sev_verify_cbit.S +++ b/arch/x86/kernel/sev_verify_cbit.S @@ -85,5 +85,5 @@ SYM_FUNC_START(sev_verify_cbit) #endif /* Return page-table pointer */ movq %rdi, %rax - ret + RET SYM_FUNC_END(sev_verify_cbit) diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index b001ba811cab..9eff48171532 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -798,11 +798,11 @@ static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ -void arch_do_signal(struct pt_regs *regs) +void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { struct ksignal ksig; - if (get_signal(&ksig)) { + if (has_signal && get_signal(&ksig)) { /* Whee! Actually deliver the signal. */ handle_signal(&ksig, regs); return; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 8baff500914e..e8e5515fb7e9 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -229,6 +229,7 @@ static void notrace start_secondary(void *unused) #endif cpu_init_exception_handling(); cpu_init(); + rcu_cpu_starting(raw_smp_processor_id()); x86_cpuinit.early_percpu_clock_init(); smp_callin(); diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c index ca9a380d9c0b..2973b3fb0ec1 100644 --- a/arch/x86/kernel/static_call.c +++ b/arch/x86/kernel/static_call.c @@ -11,7 +11,17 @@ enum insn_type { RET = 3, /* tramp / site cond-tail-call */ }; -static void __ref __static_call_transform(void *insn, enum insn_type type, void *func) +/* + * ud1 %esp, %ecx - a 3 byte #UD that is unique to trampolines, chosen such + * that there is no false-positive trampoline identification while also being a + * speculation stop. + */ +static const u8 tramp_ud[] = { 0x0f, 0xb9, 0xcc }; + +static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc }; + +static void __ref __static_call_transform(void *insn, enum insn_type type, + void *func, bool modinit) { int size = CALL_INSN_SIZE; const void *code; @@ -30,15 +40,17 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, void break; case RET: - code = text_gen_insn(RET_INSN_OPCODE, insn, func); - size = RET_INSN_SIZE; + if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) + code = text_gen_insn(JMP32_INSN_OPCODE, insn, &__x86_return_thunk); + else + code = &retinsn; break; } if (memcmp(insn, code, size) == 0) return; - if (unlikely(system_state == SYSTEM_BOOTING)) + if (system_state == SYSTEM_BOOTING || modinit) return text_poke_early(insn, code, size); text_poke_bp(insn, code, size, NULL); @@ -85,14 +97,42 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail) if (tramp) { __static_call_validate(tramp, true); - __static_call_transform(tramp, __sc_insn(!func, true), func); + __static_call_transform(tramp, __sc_insn(!func, true), func, false); } if (IS_ENABLED(CONFIG_HAVE_STATIC_CALL_INLINE) && site) { __static_call_validate(site, tail); - __static_call_transform(site, __sc_insn(!func, tail), func); + __static_call_transform(site, __sc_insn(!func, tail), func, false); } mutex_unlock(&text_mutex); } EXPORT_SYMBOL_GPL(arch_static_call_transform); + +#ifdef CONFIG_RETHUNK +/* + * This is called by apply_returns() to fix up static call trampolines, + * specifically ARCH_DEFINE_STATIC_CALL_NULL_TRAMP which is recorded as + * having a return trampoline. + * + * The problem is that static_call() is available before determining + * X86_FEATURE_RETHUNK and, by implication, running alternatives. + * + * This means that __static_call_transform() above can have overwritten the + * return trampoline and we now need to fix things up to be consistent. + */ +bool __static_call_fixup(void *tramp, u8 op, void *dest) +{ + if (memcmp(tramp+5, tramp_ud, 3)) { + /* Not a trampoline site, not our problem. */ + return false; + } + + mutex_lock(&text_mutex); + if (op == RET_INSN_OPCODE || dest == &__x86_return_thunk) + __static_call_transform(tramp, RET, NULL, true); + mutex_unlock(&text_mutex); + + return true; +} +#endif diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index 60d2c3798ba2..2f97d1a1032f 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c @@ -175,8 +175,7 @@ void set_task_blockstep(struct task_struct *task, bool on) * * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if * task is current or it can't be running, otherwise we can race - * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but - * PTRACE_KILL is not safe. + * with __switch_to_xtra(). We rely on ptrace_freeze_traced(). */ local_irq_disable(); debugctl = get_debugctlmsr(); diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 504fa5425bce..3fd1c81eb5e3 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -68,9 +68,6 @@ static int __init control_va_addr_alignment(char *str) if (*str == 0) return 1; - if (*str == '=') - str++; - if (!strcmp(str, "32")) va_align.flags = ALIGN_VA_32; else if (!strcmp(str, "64")) @@ -80,11 +77,11 @@ static int __init control_va_addr_alignment(char *str) else if (!strcmp(str, "on")) va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; else - return 0; + pr_warn("invalid option value: 'align_va_addr=%s'\n", str); return 1; } -__setup("align_va_addr", control_va_addr_alignment); +__setup("align_va_addr=", control_va_addr_alignment); SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c index f6225bf22c02..8032f5f7eef9 100644 --- a/arch/x86/kernel/umip.c +++ b/arch/x86/kernel/umip.c @@ -356,7 +356,7 @@ bool fixup_umip_exception(struct pt_regs *regs) if (!nr_copied) return false; - if (!insn_decode(&insn, regs, buf, nr_copied)) + if (!insn_decode_from_regs(&insn, regs, buf, nr_copied)) return false; umip_inst = identify_insn(&insn); diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index c451d5f6422f..d557a545f4bc 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -93,22 +93,27 @@ static struct orc_entry *orc_find(unsigned long ip); static struct orc_entry *orc_ftrace_find(unsigned long ip) { struct ftrace_ops *ops; - unsigned long caller; + unsigned long tramp_addr, offset; ops = ftrace_ops_trampoline(ip); if (!ops) return NULL; + /* Set tramp_addr to the start of the code copied by the trampoline */ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) - caller = (unsigned long)ftrace_regs_call; + tramp_addr = (unsigned long)ftrace_regs_caller; else - caller = (unsigned long)ftrace_call; + tramp_addr = (unsigned long)ftrace_caller; + + /* Now place tramp_addr to the location within the trampoline ip is at */ + offset = ip - ops->trampoline; + tramp_addr += offset; /* Prevent unlikely recursion */ - if (ip == caller) + if (ip == tramp_addr) return NULL; - return orc_find(caller); + return orc_find(tramp_addr); } #else static struct orc_entry *orc_ftrace_find(unsigned long ip) @@ -692,7 +697,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, /* Otherwise, skip ahead to the user-specified starting frame: */ while (!unwind_done(state) && (!on_stack(&state->stack_info, first_frame, sizeof(long)) || - state->sp < (unsigned long)first_frame)) + state->sp <= (unsigned long)first_frame)) unwind_next_frame(state); return; diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S index 641f0fe1e5b4..1258a5872d12 100644 --- a/arch/x86/kernel/verify_cpu.S +++ b/arch/x86/kernel/verify_cpu.S @@ -132,9 +132,9 @@ SYM_FUNC_START_LOCAL(verify_cpu) .Lverify_cpu_no_longmode: popf # Restore caller passed flags movl $1,%eax - ret + RET .Lverify_cpu_sse_ok: popf # Restore caller passed flags xorl %eax, %eax - ret + RET SYM_FUNC_END(verify_cpu) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 9fef0742de1d..b36f5f42a754 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -142,7 +142,7 @@ SECTIONS #ifdef CONFIG_RETPOLINE __indirect_thunk_start = .; - *(.text.__x86.indirect_thunk) + *(.text.__x86.*) __indirect_thunk_end = .; #endif } :text =0xcccc @@ -272,6 +272,27 @@ SECTIONS __parainstructions_end = .; } +#ifdef CONFIG_RETPOLINE + /* + * List of instructions that call/jmp/jcc to retpoline thunks + * __x86_indirect_thunk_*(). These instructions can be patched along + * with alternatives, after which the section can be freed. + */ + . = ALIGN(8); + .retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) { + __retpoline_sites = .; + *(.retpoline_sites) + __retpoline_sites_end = .; + } + + . = ALIGN(8); + .return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) { + __return_sites = .; + *(.return_sites) + __return_sites_end = .; + } +#endif + /* * struct alt_inst entries. From the header (alternative.h): * "Alternative instructions for different CPU types or capabilities" diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 41b0dc37720e..06a776fdb90c 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -661,13 +661,16 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->edx = 0; } break; - case 9: - break; case 0xa: { /* Architectural Performance Monitoring */ struct x86_pmu_capability cap; union cpuid10_eax eax; union cpuid10_edx edx; + if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) { + entry->eax = entry->ebx = entry->ecx = entry->edx = 0; + break; + } + perf_get_x86_pmu_capability(&cap); /* @@ -810,11 +813,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->eax = min(entry->eax, 0x8000001f); break; case 0x80000001: + entry->ebx &= ~GENMASK(27, 16); cpuid_entry_override(entry, CPUID_8000_0001_EDX); cpuid_entry_override(entry, CPUID_8000_0001_ECX); break; case 0x80000006: - /* L2 cache and TLB: pass through host info. */ + /* Drop reserved bits, pass host L2 cache and TLB info. */ + entry->edx &= ~GENMASK(17, 16); break; case 0x80000007: /* Advanced power management */ /* invariant TSC is CPUID.80000007H:EDX[8] */ @@ -837,6 +842,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) g_phys_as = phys_as; entry->eax = g_phys_as | (virt_as << 8); + entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8)); entry->edx = 0; cpuid_entry_override(entry, CPUID_8000_0008_EBX); break; @@ -856,6 +862,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->ecx = entry->edx = 0; break; case 0x8000001a: + entry->eax &= GENMASK(2, 0); + entry->ebx = entry->ecx = entry->edx = 0; + break; case 0x8000001e: break; /* Support memory encryption cpuid if host supports it */ diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index a63df19ef4da..63efccc8f429 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -188,9 +188,6 @@ #define X8(x...) X4(x), X4(x) #define X16(x...) X8(x), X8(x) -#define NR_FASTOP (ilog2(sizeof(ulong)) + 1) -#define FASTOP_SIZE 8 - struct opcode { u64 flags : 56; u64 intercept : 8; @@ -304,9 +301,15 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for * different operand sizes can be reached by calculation, rather than a jump * table (which would be bigger than the code). + * + * The 16 byte alignment, considering 5 bytes for the RET thunk, 3 for ENDBR + * and 1 for the straight line speculation INT3, leaves 7 bytes for the + * body of the function. Currently none is larger than 4. */ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop); +#define FASTOP_SIZE 16 + #define __FOP_FUNC(name) \ ".align " __stringify(FASTOP_SIZE) " \n\t" \ ".type " name ", @function \n\t" \ @@ -316,19 +319,21 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop); __FOP_FUNC(#name) #define __FOP_RET(name) \ - "ret \n\t" \ + ASM_RET \ ".size " name ", .-" name "\n\t" #define FOP_RET(name) \ __FOP_RET(#name) -#define FOP_START(op) \ +#define __FOP_START(op, align) \ extern void em_##op(struct fastop *fake); \ asm(".pushsection .text, \"ax\" \n\t" \ ".global em_" #op " \n\t" \ - ".align " __stringify(FASTOP_SIZE) " \n\t" \ + ".align " __stringify(align) " \n\t" \ "em_" #op ":\n\t" +#define FOP_START(op) __FOP_START(op, FASTOP_SIZE) + #define FOP_END \ ".popsection") @@ -428,19 +433,29 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop); FOP_END /* Special case for SETcc - 1 instruction per cc */ + +/* + * Depending on .config the SETcc functions look like: + * + * SETcc %al [3 bytes] + * RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETHUNK] + * INT3 [1 byte; CONFIG_SLS] + */ +#define SETCC_ALIGN 16 + #define FOP_SETCC(op) \ - ".align 4 \n\t" \ + ".align " __stringify(SETCC_ALIGN) " \n\t" \ ".type " #op ", @function \n\t" \ #op ": \n\t" \ #op " %al \n\t" \ - __FOP_RET(#op) + __FOP_RET(#op) \ + ".skip " __stringify(SETCC_ALIGN) " - (.-" #op "), 0xcc \n\t" asm(".pushsection .fixup, \"ax\"\n" - ".global kvm_fastop_exception \n" - "kvm_fastop_exception: xor %esi, %esi; ret\n" + "kvm_fastop_exception: xor %esi, %esi; " ASM_RET ".popsection"); -FOP_START(setcc) +__FOP_START(setcc, SETCC_ALIGN) FOP_SETCC(seto) FOP_SETCC(setno) FOP_SETCC(setc) @@ -781,8 +796,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt, ctxt->mode, linear); } -static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst, - enum x86emul_mode mode) +static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst) { ulong linear; int rc; @@ -792,41 +806,71 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst, if (ctxt->op_bytes != sizeof(unsigned long)) addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); - rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear); + rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear); if (rc == X86EMUL_CONTINUE) ctxt->_eip = addr.ea; return rc; } -static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) +static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt) { - return assign_eip(ctxt, dst, ctxt->mode); + u64 efer; + struct desc_struct cs; + u16 selector; + u32 base3; + + ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); + + if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) { + /* Real mode. cpu must not have long mode active */ + if (efer & EFER_LMA) + return X86EMUL_UNHANDLEABLE; + ctxt->mode = X86EMUL_MODE_REAL; + return X86EMUL_CONTINUE; + } + + if (ctxt->eflags & X86_EFLAGS_VM) { + /* Protected/VM86 mode. cpu must not have long mode active */ + if (efer & EFER_LMA) + return X86EMUL_UNHANDLEABLE; + ctxt->mode = X86EMUL_MODE_VM86; + return X86EMUL_CONTINUE; + } + + if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS)) + return X86EMUL_UNHANDLEABLE; + + if (efer & EFER_LMA) { + if (cs.l) { + /* Proper long mode */ + ctxt->mode = X86EMUL_MODE_PROT64; + } else if (cs.d) { + /* 32 bit compatibility mode*/ + ctxt->mode = X86EMUL_MODE_PROT32; + } else { + ctxt->mode = X86EMUL_MODE_PROT16; + } + } else { + /* Legacy 32 bit / 16 bit mode */ + ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; + } + + return X86EMUL_CONTINUE; } -static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, - const struct desc_struct *cs_desc) +static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) { - enum x86emul_mode mode = ctxt->mode; - int rc; + return assign_eip(ctxt, dst); +} -#ifdef CONFIG_X86_64 - if (ctxt->mode >= X86EMUL_MODE_PROT16) { - if (cs_desc->l) { - u64 efer = 0; +static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst) +{ + int rc = emulator_recalc_and_set_mode(ctxt); - ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); - if (efer & EFER_LMA) - mode = X86EMUL_MODE_PROT64; - } else - mode = X86EMUL_MODE_PROT32; /* temporary value */ - } -#endif - if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32) - mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; - rc = assign_eip(ctxt, dst, mode); - if (rc == X86EMUL_CONTINUE) - ctxt->mode = mode; - return rc; + if (rc != X86EMUL_CONTINUE) + return rc; + + return assign_eip(ctxt, dst); } static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) @@ -1055,7 +1099,7 @@ static int em_bsr_c(struct x86_emulate_ctxt *ctxt) static __always_inline u8 test_cc(unsigned int condition, unsigned long flags) { u8 rc; - void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); + void (*fop)(void) = (void *)em_setcc + SETCC_ALIGN * (condition & 0xf); flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; asm("push %[flags]; popf; " CALL_NOSPEC @@ -1757,16 +1801,6 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, case VCPU_SREG_TR: if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) goto exception; - if (!seg_desc.p) { - err_vec = NP_VECTOR; - goto exception; - } - old_desc = seg_desc; - seg_desc.type |= 2; /* busy */ - ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, - sizeof(seg_desc), &ctxt->exception); - if (ret != X86EMUL_CONTINUE) - return ret; break; case VCPU_SREG_LDTR: if (seg_desc.s || seg_desc.type != 2) @@ -1804,8 +1838,17 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, if (ret != X86EMUL_CONTINUE) return ret; if (emul_is_noncanonical_address(get_desc_base(&seg_desc) | - ((u64)base3 << 32), ctxt)) - return emulate_gp(ctxt, 0); + ((u64)base3 << 32), ctxt)) + return emulate_gp(ctxt, err_code); + } + + if (seg == VCPU_SREG_TR) { + old_desc = seg_desc; + seg_desc.type |= 2; /* busy */ + ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, + sizeof(seg_desc), &ctxt->exception); + if (ret != X86EMUL_CONTINUE) + return ret; } load: ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); @@ -2025,7 +2068,7 @@ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) if (rc != X86EMUL_CONTINUE) return rc; - if (ctxt->modrm_reg == VCPU_SREG_SS) + if (seg == VCPU_SREG_SS) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; if (ctxt->op_bytes > 2) rsp_increment(ctxt, ctxt->op_bytes - 2); @@ -2242,7 +2285,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) if (rc != X86EMUL_CONTINUE) return rc; - rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); + rc = assign_eip_far(ctxt, ctxt->src.val); /* Error handling is not implemented. */ if (rc != X86EMUL_CONTINUE) return X86EMUL_UNHANDLEABLE; @@ -2323,7 +2366,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) &new_desc); if (rc != X86EMUL_CONTINUE) return rc; - rc = assign_eip_far(ctxt, eip, &new_desc); + rc = assign_eip_far(ctxt, eip); /* Error handling is not implemented. */ if (rc != X86EMUL_CONTINUE) return X86EMUL_UNHANDLEABLE; @@ -2943,6 +2986,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ctxt->_eip = rdx; + ctxt->mode = usermode; *reg_write(ctxt, VCPU_REGS_RSP) = rcx; return X86EMUL_CONTINUE; @@ -3539,7 +3583,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt) if (rc != X86EMUL_CONTINUE) return rc; - rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); + rc = assign_eip_far(ctxt, ctxt->src.val); if (rc != X86EMUL_CONTINUE) goto fail; @@ -3611,8 +3655,10 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt) { u64 tsc_aux = 0; - if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux)) + if (!ctxt->ops->guest_has_rdpid(ctxt)) return emulate_ud(ctxt); + + ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux); ctxt->dst.val = tsc_aux; return X86EMUL_CONTINUE; } @@ -3679,11 +3725,25 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt) static int em_cr_write(struct x86_emulate_ctxt *ctxt) { - if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) + int cr_num = ctxt->modrm_reg; + int r; + + if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val)) return emulate_gp(ctxt, 0); /* Disable writeback. */ ctxt->dst.type = OP_NONE; + + if (cr_num == 0) { + /* + * CR0 write might have updated CR0.PE and/or CR0.PG + * which can affect the cpu's execution mode. + */ + r = emulator_recalc_and_set_mode(ctxt); + if (r != X86EMUL_CONTINUE) + return r; + } + return X86EMUL_CONTINUE; } diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index d806139377bc..09ec1cda2d68 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -428,6 +428,9 @@ static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint) struct kvm_lapic_irq irq; int ret, vector; + if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm)) + return -EINVAL; + if (sint >= ARRAY_SIZE(synic->sint)) return -EINVAL; diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index 7d5be04dc661..aeed6da60e0c 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -225,6 +225,7 @@ struct x86_emulate_ops { bool (*guest_has_long_mode)(struct x86_emulate_ctxt *ctxt); bool (*guest_has_movbe)(struct x86_emulate_ctxt *ctxt); bool (*guest_has_fxsr)(struct x86_emulate_ctxt *ctxt); + bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt); void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index de11149e28e0..260727eaa6b9 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -113,7 +113,8 @@ static inline u32 kvm_x2apic_id(struct kvm_lapic *apic) static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu) { - return pi_inject_timer && kvm_vcpu_apicv_active(vcpu); + return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) && + (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm)); } bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu) @@ -296,6 +297,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); } + + /* Check if there are APF page ready requests pending */ + if (enabled) + kvm_make_request(KVM_REQ_APF_READY, apic->vcpu); } static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id) @@ -986,6 +991,10 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, *r = -1; if (irq->shorthand == APIC_DEST_SELF) { + if (KVM_BUG_ON(!src, kvm)) { + *r = 0; + return true; + } *r = kvm_apic_set_irq(src->vcpu, irq, dest_map); return true; } @@ -2106,10 +2115,9 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) break; case APIC_SELF_IPI: - if (apic_x2apic_mode(apic)) { - kvm_lapic_reg_write(apic, APIC_ICR, - APIC_DEST_SELF | (val & APIC_VECTOR_MASK)); - } else + if (apic_x2apic_mode(apic)) + kvm_apic_send_ipi(apic, APIC_DEST_SELF | (val & APIC_VECTOR_MASK), 0); + else ret = 1; break; default: @@ -2260,6 +2268,8 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) if (value & MSR_IA32_APICBASE_ENABLE) { kvm_apic_set_xapic_id(apic, vcpu->vcpu_id); static_key_slow_dec_deferred(&apic_hw_disabled); + /* Check if there are APF page ready requests pending */ + kvm_make_request(KVM_REQ_APF_READY, vcpu); } else { static_key_slow_inc(&apic_hw_disabled.key); atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 20d29ae8ed70..13bf3198d0ce 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3140,6 +3140,8 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, return; sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK); + if (WARN_ON(!sp)) + return; if (kvm_mmu_put_root(kvm, sp)) { if (sp->tdp_mmu_page) @@ -5176,14 +5178,16 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) uint i; if (pcid == kvm_get_active_pcid(vcpu)) { - mmu->invlpg(vcpu, gva, mmu->root_hpa); + if (mmu->invlpg) + mmu->invlpg(vcpu, gva, mmu->root_hpa); tlb_flush = true; } for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { if (VALID_PAGE(mmu->prev_roots[i].hpa) && pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) { - mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); + if (mmu->invlpg) + mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); tlb_flush = true; } } @@ -5373,6 +5377,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm) { struct kvm_mmu_page *sp, *node; int nr_zapped, batch = 0; + bool unstable; restart: list_for_each_entry_safe_reverse(sp, node, @@ -5404,11 +5409,12 @@ restart: goto restart; } - if (__kvm_mmu_prepare_zap_page(kvm, sp, - &kvm->arch.zapped_obsolete_pages, &nr_zapped)) { - batch += nr_zapped; + unstable = __kvm_mmu_prepare_zap_page(kvm, sp, + &kvm->arch.zapped_obsolete_pages, &nr_zapped); + batch += nr_zapped; + + if (unstable) goto restart; - } } /* @@ -5876,12 +5882,24 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) return 0; } -int kvm_mmu_module_init(void) +/* + * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as + * its default value of -1 is technically undefined behavior for a boolean. + */ +void __init kvm_mmu_x86_module_init(void) { - int ret = -ENOMEM; - if (nx_huge_pages == -1) __set_nx_huge_pages(get_nx_auto_mode()); +} + +/* + * The bulk of the MMU initialization is deferred until the vendor module is + * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need + * to be reset when a potentially different vendor module is loaded. + */ +int kvm_mmu_vendor_module_init(void) +{ + int ret = -ENOMEM; /* * MMU roles use union aliasing which is, generally speaking, an @@ -5955,7 +5973,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu) mmu_free_memory_caches(vcpu); } -void kvm_mmu_module_exit(void) +void kvm_mmu_vendor_module_exit(void) { mmu_destroy_caches(); percpu_counter_destroy(&kvm_total_used_mmu_pages); diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 2f83b5d948b3..e5322a0dc5bb 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include #include "x86.h" #include "cpuid.h" @@ -168,13 +170,21 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc) return true; } +static int cmp_u64(const void *pa, const void *pb) +{ + u64 a = *(u64 *)pa; + u64 b = *(u64 *)pb; + + return (a > b) - (a < b); +} + void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) { u64 config; u32 type = PERF_TYPE_RAW; struct kvm *kvm = pmc->vcpu->kvm; struct kvm_pmu_event_filter *filter; - int i; + struct kvm_pmu *pmu = vcpu_to_pmu(pmc->vcpu); bool allow_event = true; if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) @@ -189,16 +199,13 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu); if (filter) { - for (i = 0; i < filter->nevents; i++) - if (filter->events[i] == - (eventsel & AMD64_RAW_EVENT_MASK_NB)) - break; - if (filter->action == KVM_PMU_EVENT_ALLOW && - i == filter->nevents) - allow_event = false; - if (filter->action == KVM_PMU_EVENT_DENY && - i < filter->nevents) - allow_event = false; + __u64 key = eventsel & AMD64_RAW_EVENT_MASK_NB; + + if (bsearch(&key, filter->events, filter->nevents, + sizeof(__u64), cmp_u64)) + allow_event = filter->action == KVM_PMU_EVENT_ALLOW; + else + allow_event = filter->action == KVM_PMU_EVENT_DENY; } if (!allow_event) return; @@ -214,7 +221,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) } if (type == PERF_TYPE_RAW) - config = eventsel & AMD64_RAW_EVENT_MASK; + config = eventsel & pmu->raw_event_mask; if (pmc->current_config == eventsel && pmc_resume_counter(pmc)) return; @@ -507,6 +514,11 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp) /* Ensure nevents can't be changed between the user copies. */ *filter = tmp; + /* + * Sort the in-kernel list so that we can search it with bsearch. + */ + sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL); + mutex_lock(&kvm->lock); filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter, mutex_is_locked(&kvm->lock)); diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 23910e6a3f01..e7feaa7910ab 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1198,8 +1198,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, return -EINVAL; ret = -ENOMEM; - ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); - save = kzalloc(sizeof(*save), GFP_KERNEL); + ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT); + save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT); if (!ctl || !save) goto out_free; diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 4e7093bcb64b..35da84f63b20 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -44,6 +44,22 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = { [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, }; +/* duplicated from amd_f17h_perfmon_event_map. */ +static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = { + [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES }, + [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, + [2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES }, + [3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES }, + [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, + [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, + [6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, + [7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, +}; + +/* amd_pmc_perf_hw_id depends on these being the same size */ +static_assert(ARRAY_SIZE(amd_event_mapping) == + ARRAY_SIZE(amd_f17h_event_mapping)); + static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type) { struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); @@ -128,19 +144,25 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc) { + struct kvm_event_hw_type_mapping *event_mapping; u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT; u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; int i; + if (guest_cpuid_family(pmc->vcpu) >= 0x17) + event_mapping = amd_f17h_event_mapping; + else + event_mapping = amd_event_mapping; + for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++) - if (amd_event_mapping[i].eventsel == event_select - && amd_event_mapping[i].unit_mask == unit_mask) + if (event_mapping[i].eventsel == event_select + && event_mapping[i].unit_mask == unit_mask) break; if (i == ARRAY_SIZE(amd_event_mapping)) return PERF_COUNT_HW_MAX; - return amd_event_mapping[i].event_type; + return event_mapping[i].event_type; } /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */ @@ -253,12 +275,10 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) /* MSR_EVNTSELn */ pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); if (pmc) { - if (data == pmc->eventsel) - return 0; - if (!(data & pmu->reserved_bits)) { + data &= ~pmu->reserved_bits; + if (data != pmc->eventsel) reprogram_gp_counter(pmc, data); - return 0; - } + return 0; } return 1; @@ -275,6 +295,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu) pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1; pmu->reserved_bits = 0xfffffff000280000ull; + pmu->raw_event_mask = AMD64_RAW_EVENT_MASK; pmu->version = 1; /* not applicable to AMD; but clean them to prevent any fall out */ pmu->counter_bitmask[KVM_PMC_FIXED] = 0; diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 6c82ef22985d..c2b34998c27d 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -537,7 +537,7 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) } ret = -ENOMEM; - blob = kmalloc(params.len, GFP_KERNEL); + blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT); if (!blob) goto e_free; @@ -676,7 +676,7 @@ static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr, if (!IS_ALIGNED(dst_paddr, 16) || !IS_ALIGNED(paddr, 16) || !IS_ALIGNED(size, 16)) { - tpage = (void *)alloc_page(GFP_KERNEL); + tpage = (void *)alloc_page(GFP_KERNEL | __GFP_ZERO); if (!tpage) return -ENOMEM; @@ -1177,6 +1177,14 @@ void sev_hardware_teardown(void) sev_flush_asids(); } +void sev_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!sev_guest(kvm)) + return; + + wbinvd_on_all_cpus(); +} + void pre_sev_run(struct vcpu_svm *svm, int cpu) { struct svm_cpu_data *sd = per_cpu(svm_data, cpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7773a765f548..c34ba034ca11 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -305,12 +305,6 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) return 0; } -static int is_external_interrupt(u32 info) -{ - info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; - return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); -} - static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -1357,6 +1351,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) */ svm_clear_current_vmcb(svm->vmcb); + svm_leave_nested(vcpu); svm_free_nested(svm); __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT)); @@ -1692,14 +1687,16 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) update_cr0_intercept(svm); } -int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) +static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) +{ + return true; +} + +void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE; unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4; - if (cr4 & X86_CR4_VMXE) - return 1; - if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) svm_flush_tlb(vcpu); @@ -1709,7 +1706,6 @@ int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) cr4 |= host_cr4_mce; to_svm(vcpu)->vmcb->save.cr4 = cr4; vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); - return 0; } static void svm_set_segment(struct kvm_vcpu *vcpu, @@ -2474,9 +2470,9 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr) msr->data = 0; switch (msr->index) { - case MSR_F10H_DECFG: - if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) - msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; + case MSR_AMD64_DE_CFG: + if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC)) + msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE; break; case MSR_IA32_PERF_CAPABILITIES: return 0; @@ -2583,7 +2579,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = 0x1E; } break; - case MSR_F10H_DECFG: + case MSR_AMD64_DE_CFG: msr_info->data = svm->msr_decfg; break; default: @@ -2763,7 +2759,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_VM_IGNNE: vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); break; - case MSR_F10H_DECFG: { + case MSR_AMD64_DE_CFG: { struct kvm_msr_entry msr_entry; msr_entry.index = msr->index; @@ -3114,15 +3110,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) return 0; } - if (is_external_interrupt(svm->vmcb->control.exit_int_info) && - exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR && - exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH && - exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI) - printk(KERN_ERR "%s: unexpected exit_int_info 0x%x " - "exit_code 0x%x\n", - __func__, svm->vmcb->control.exit_int_info, - exit_code); - if (exit_fastpath != EXIT_FASTPATH_NONE) return 1; @@ -3188,8 +3175,6 @@ static void svm_set_irq(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - BUG_ON(!(gif_set(svm))); - trace_kvm_inj_virq(vcpu->arch.interrupt.nr); ++vcpu->stat.irq_injections; @@ -4243,6 +4228,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .get_cpl = svm_get_cpl, .get_cs_db_l_bits = kvm_get_cs_db_l_bits, .set_cr0 = svm_set_cr0, + .is_valid_cr4 = svm_is_valid_cr4, .set_cr4 = svm_set_cr4, .set_efer = svm_set_efer, .get_idt = svm_get_idt, @@ -4325,6 +4311,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .mem_enc_op = svm_mem_enc_op, .mem_enc_reg_region = svm_register_enc_region, .mem_enc_unreg_region = svm_unregister_enc_region, + .guest_memory_reclaimed = sev_guest_memory_reclaimed, .can_emulate_instruction = svm_can_emulate_instruction, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 2c007241fbf5..f62d13fc6e01 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -355,7 +355,7 @@ void svm_vcpu_free_msrpm(u32 *msrpm); int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); -int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); +void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); void svm_flush_tlb(struct kvm_vcpu *vcpu); void disable_nmi_singlestep(struct vcpu_svm *svm); bool svm_smi_blocked(struct kvm_vcpu *vcpu); @@ -491,6 +491,8 @@ int svm_register_enc_region(struct kvm *kvm, struct kvm_enc_region *range); int svm_unregister_enc_region(struct kvm *kvm, struct kvm_enc_region *range); +void sev_guest_memory_reclaimed(struct kvm *kvm); + void pre_sev_run(struct vcpu_svm *svm, int cpu); int __init sev_hardware_setup(void); void sev_hardware_teardown(void); diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 1ec1ac40e328..c18d812d00cd 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -128,6 +128,15 @@ SYM_FUNC_START(__svm_vcpu_run) mov %r15, VCPU_R15(%_ASM_AX) #endif + /* + * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be + * untrained as soon as we exit the VM and are back to the + * kernel. This should be done before re-enabling interrupts + * because interrupt handlers won't sanitize 'ret' if the return is + * from the kernel. + */ + UNTRAIN_RET + /* * Clear all general purpose registers except RSP and RAX to prevent * speculative use of the guest's values, even those that are reloaded @@ -166,5 +175,5 @@ SYM_FUNC_START(__svm_vcpu_run) pop %edi #endif pop %_ASM_BP - ret + RET SYM_FUNC_END(__svm_vcpu_run) diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index a2835d784f4b..3d4988ea8b57 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -304,25 +304,29 @@ TRACE_EVENT(kvm_inj_virq, * Tracepoint for kvm interrupt injection: */ TRACE_EVENT(kvm_inj_exception, - TP_PROTO(unsigned exception, bool has_error, unsigned error_code), - TP_ARGS(exception, has_error, error_code), + TP_PROTO(unsigned exception, bool has_error, unsigned error_code, + bool reinjected), + TP_ARGS(exception, has_error, error_code, reinjected), TP_STRUCT__entry( __field( u8, exception ) __field( u8, has_error ) __field( u32, error_code ) + __field( bool, reinjected ) ), TP_fast_assign( __entry->exception = exception; __entry->has_error = has_error; __entry->error_code = error_code; + __entry->reinjected = reinjected; ), - TP_printk("%s (0x%x)", + TP_printk("%s (0x%x)%s", __print_symbolic(__entry->exception, kvm_trace_sym_exc), /* FIXME: don't print error_code if not present */ - __entry->has_error ? __entry->error_code : 0) + __entry->has_error ? __entry->error_code : 0, + __entry->reinjected ? " [reinjected]" : "") ); /* diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 0c2389d0fdaf..498fed0dda98 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -12,6 +12,7 @@ #include "nested.h" #include "pmu.h" #include "trace.h" +#include "vmx.h" #include "x86.h" static bool __read_mostly enable_shadow_vmcs = 1; @@ -1244,7 +1245,7 @@ static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | /* reserved */ BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); - u64 vmx_basic = vmx->nested.msrs.basic; + u64 vmx_basic = vmcs_config.nested.basic; if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) return -EINVAL; @@ -1267,36 +1268,42 @@ static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) return 0; } -static int -vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) +static void vmx_get_control_msr(struct nested_vmx_msrs *msrs, u32 msr_index, + u32 **low, u32 **high) { - u64 supported; - u32 *lowp, *highp; - switch (msr_index) { case MSR_IA32_VMX_TRUE_PINBASED_CTLS: - lowp = &vmx->nested.msrs.pinbased_ctls_low; - highp = &vmx->nested.msrs.pinbased_ctls_high; + *low = &msrs->pinbased_ctls_low; + *high = &msrs->pinbased_ctls_high; break; case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: - lowp = &vmx->nested.msrs.procbased_ctls_low; - highp = &vmx->nested.msrs.procbased_ctls_high; + *low = &msrs->procbased_ctls_low; + *high = &msrs->procbased_ctls_high; break; case MSR_IA32_VMX_TRUE_EXIT_CTLS: - lowp = &vmx->nested.msrs.exit_ctls_low; - highp = &vmx->nested.msrs.exit_ctls_high; + *low = &msrs->exit_ctls_low; + *high = &msrs->exit_ctls_high; break; case MSR_IA32_VMX_TRUE_ENTRY_CTLS: - lowp = &vmx->nested.msrs.entry_ctls_low; - highp = &vmx->nested.msrs.entry_ctls_high; + *low = &msrs->entry_ctls_low; + *high = &msrs->entry_ctls_high; break; case MSR_IA32_VMX_PROCBASED_CTLS2: - lowp = &vmx->nested.msrs.secondary_ctls_low; - highp = &vmx->nested.msrs.secondary_ctls_high; + *low = &msrs->secondary_ctls_low; + *high = &msrs->secondary_ctls_high; break; default: BUG(); } +} + +static int +vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) +{ + u32 *lowp, *highp; + u64 supported; + + vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp); supported = vmx_control_msr(*lowp, *highp); @@ -1308,6 +1315,7 @@ vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) return -EINVAL; + vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp); *lowp = data; *highp = data >> 32; return 0; @@ -1321,10 +1329,8 @@ static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | /* reserved */ GENMASK_ULL(13, 9) | BIT_ULL(31); - u64 vmx_misc; - - vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, - vmx->nested.msrs.misc_high); + u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low, + vmcs_config.nested.misc_high); if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) return -EINVAL; @@ -1352,10 +1358,8 @@ static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) { - u64 vmx_ept_vpid_cap; - - vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps, - vmx->nested.msrs.vpid_caps); + u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps, + vmcs_config.nested.vpid_caps); /* Every bit is either reserved or a feature bit. */ if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) @@ -1366,20 +1370,21 @@ static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) return 0; } -static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) +static u64 *vmx_get_fixed0_msr(struct nested_vmx_msrs *msrs, u32 msr_index) { - u64 *msr; - switch (msr_index) { case MSR_IA32_VMX_CR0_FIXED0: - msr = &vmx->nested.msrs.cr0_fixed0; - break; + return &msrs->cr0_fixed0; case MSR_IA32_VMX_CR4_FIXED0: - msr = &vmx->nested.msrs.cr4_fixed0; - break; + return &msrs->cr4_fixed0; default: BUG(); } +} + +static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) +{ + const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index); /* * 1 bits (which indicates bits which "must-be-1" during VMX operation) @@ -1388,7 +1393,7 @@ static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) if (!is_bitwise_subset(data, *msr, -1ULL)) return -EINVAL; - *msr = data; + *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data; return 0; } @@ -1449,7 +1454,7 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) vmx->nested.msrs.vmcs_enum = data; return 0; case MSR_IA32_VMX_VMFUNC: - if (data & ~vmx->nested.msrs.vmfunc_controls) + if (data & ~vmcs_config.nested.vmfunc_controls) return -EINVAL; vmx->nested.msrs.vmfunc_controls = data; return 0; @@ -2227,7 +2232,8 @@ static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, } } -static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) +static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01, + struct vmcs12 *vmcs12) { u32 exec_control, vmcs12_exec_ctrl; u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); @@ -2238,7 +2244,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) /* * PIN CONTROLS */ - exec_control = vmx_pin_based_exec_ctrl(vmx); + exec_control = __pin_controls_get(vmcs01); exec_control |= (vmcs12->pin_based_vm_exec_control & ~PIN_BASED_VMX_PREEMPTION_TIMER); @@ -2253,7 +2259,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) /* * EXEC CONTROLS */ - exec_control = vmx_exec_control(vmx); /* L0's desires */ + exec_control = __exec_controls_get(vmcs01); /* L0's desires */ exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING; exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING; exec_control &= ~CPU_BASED_TPR_SHADOW; @@ -2290,17 +2296,20 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) * SECONDARY EXEC CONTROLS */ if (cpu_has_secondary_exec_ctrls()) { - exec_control = vmx->secondary_exec_control; + exec_control = __secondary_exec_controls_get(vmcs01); /* Take the following fields only from vmcs12 */ exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_ENABLE_RDTSCP | SECONDARY_EXEC_XSAVES | SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_APIC_REGISTER_VIRT | - SECONDARY_EXEC_ENABLE_VMFUNC); + SECONDARY_EXEC_ENABLE_VMFUNC | + SECONDARY_EXEC_DESC); + if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) { vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control & @@ -2336,9 +2345,15 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate * on the related bits (if supported by the CPU) in the hope that * we can avoid VMWrites during vmx_set_efer(). + * + * Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is + * loading PERF_GLOBAL_CTRL via the VMCS for L1, then KVM will want to + * do the same for L2. */ - exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) & - ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER; + exec_control = __vm_entry_controls_get(vmcs01); + exec_control |= (vmcs12->vm_entry_controls & + ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL); + exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER); if (cpu_has_load_ia32_efer()) { if (guest_efer & EFER_LMA) exec_control |= VM_ENTRY_IA32E_MODE; @@ -2354,9 +2369,11 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER * bits may be modified by vmx_set_efer() in prepare_vmcs02(). */ - exec_control = vmx_vmexit_ctrl(); + exec_control = __vm_exit_controls_get(vmcs01); if (cpu_has_load_ia32_efer() && guest_efer != host_efer) exec_control |= VM_EXIT_LOAD_IA32_EFER; + else + exec_control &= ~VM_EXIT_LOAD_IA32_EFER; vm_exit_controls_set(vmx, exec_control); /* @@ -3075,35 +3092,8 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) vmx->loaded_vmcs->host_state.cr4 = cr4; } - asm( - "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */ - "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t" - "je 1f \n\t" - __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t" - "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t" - "1: \n\t" - "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */ - - /* Check if vmlaunch or vmresume is needed */ - "cmpb $0, %c[launched](%[loaded_vmcs])\n\t" - - /* - * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set - * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail - * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the - * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail. - */ - "call vmx_vmenter\n\t" - - CC_SET(be) - : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail) - : [HOST_RSP]"r"((unsigned long)HOST_RSP), - [loaded_vmcs]"r"(vmx->loaded_vmcs), - [launched]"i"(offsetof(struct loaded_vmcs, launched)), - [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)), - [wordsize]"i"(sizeof(ulong)) - : "memory" - ); + vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, + __vmx_vcpu_run_flags(vmx)); if (vmx->msr_autoload.host.nr) vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); @@ -3363,10 +3353,12 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); - if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) + if (!vmx->nested.nested_run_pending || + !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); if (kvm_mpx_supported() && - !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) + (!vmx->nested.nested_run_pending || + !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); /* @@ -3390,7 +3382,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); - prepare_vmcs02_early(vmx, vmcs12); + prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12); if (from_vmentry) { if (unlikely(!nested_get_vmcs12_pages(vcpu))) { @@ -3668,12 +3660,34 @@ vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) } static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) + struct vmcs12 *vmcs12, + u32 vm_exit_reason, u32 exit_intr_info) { u32 idt_vectoring; unsigned int nr; - if (vcpu->arch.exception.injected) { + /* + * Per the SDM, VM-Exits due to double and triple faults are never + * considered to occur during event delivery, even if the double/triple + * fault is the result of an escalating vectoring issue. + * + * Note, the SDM qualifies the double fault behavior with "The original + * event results in a double-fault exception". It's unclear why the + * qualification exists since exits due to double fault can occur only + * while vectoring a different exception (injected events are never + * subject to interception), i.e. there's _always_ an original event. + * + * The SDM also uses NMI as a confusing example for the "original event + * causes the VM exit directly" clause. NMI isn't special in any way, + * the same rule applies to all events that cause an exit directly. + * NMI is an odd choice for the example because NMIs can only occur on + * instruction boundaries, i.e. they _can't_ occur during vectoring. + */ + if ((u16)vm_exit_reason == EXIT_REASON_TRIPLE_FAULT || + ((u16)vm_exit_reason == EXIT_REASON_EXCEPTION_NMI && + is_double_fault(exit_intr_info))) { + vmcs12->idt_vectoring_info_field = 0; + } else if (vcpu->arch.exception.injected) { nr = vcpu->arch.exception.nr; idt_vectoring = nr | VECTORING_INFO_VALID_MASK; @@ -3706,6 +3720,8 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, idt_vectoring |= INTR_TYPE_EXT_INTR; vmcs12->idt_vectoring_info_field = idt_vectoring; + } else { + vmcs12->idt_vectoring_info_field = 0; } } @@ -3772,7 +3788,16 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, u32 intr_info = nr | INTR_INFO_VALID_MASK; if (vcpu->arch.exception.has_error_code) { - vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; + /* + * Intel CPUs do not generate error codes with bits 31:16 set, + * and more importantly VMX disallows setting bits 31:16 in the + * injected error code for VM-Entry. Drop the bits to mimic + * hardware and avoid inducing failure on nested VM-Entry if L1 + * chooses to inject the exception back to L2. AMD CPUs _do_ + * generate "full" 32-bit error codes, so KVM allows userspace + * to inject exception error codes with bits 31:16 set. + */ + vmcs12->vm_exit_intr_error_code = (u16)vcpu->arch.exception.error_code; intr_info |= INTR_INFO_DELIVER_CODE_MASK; } @@ -4143,12 +4168,12 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, /* update exit information fields: */ vmcs12->vm_exit_reason = vm_exit_reason; vmcs12->exit_qualification = exit_qualification; - vmcs12->vm_exit_intr_info = exit_intr_info; - - vmcs12->idt_vectoring_info_field = 0; - vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); - vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + /* + * On VM-Exit due to a failed VM-Entry, the VMCS isn't marked launched + * and only EXIT_REASON and EXIT_QUALIFICATION are updated, all other + * exit info fields are unmodified. + */ if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { vmcs12->launch_state = 1; @@ -4160,7 +4185,12 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, * Transfer the event that L0 or L1 may wanted to inject into * L2 to IDT_VECTORING_INFO_FIELD. */ - vmcs12_save_pending_event(vcpu, vmcs12); + vmcs12_save_pending_event(vcpu, vmcs12, + vm_exit_reason, exit_intr_info); + + vmcs12->vm_exit_intr_info = exit_intr_info; + vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); + vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); /* * According to spec, there's no need to store the guest's @@ -4174,14 +4204,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL); } - - /* - * Drop what we picked up for L2 via vmx_complete_interrupts. It is - * preserved above and would only end up incorrectly in L1. - */ - vcpu->arch.nmi_injected = false; - kvm_clear_exception_queue(vcpu); - kvm_clear_interrupt_queue(vcpu); } /* @@ -4521,6 +4543,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, WARN_ON_ONCE(nested_early_check); } + /* + * Drop events/exceptions that were queued for re-injection to L2 + * (picked up via vmx_complete_interrupts()), as well as exceptions + * that were pending for L2. Note, this must NOT be hoisted above + * prepare_vmcs12(), events/exceptions queued for re-injection need to + * be captured in vmcs12 (see vmcs12_save_pending_event()). + */ + vcpu->arch.nmi_injected = false; + kvm_clear_exception_queue(vcpu); + kvm_clear_interrupt_queue(vcpu); + vmx_switch_vmcs(vcpu, &vmx->vmcs01); /* Update any VMCS fields that might have changed while L2 ran */ @@ -4868,20 +4901,25 @@ static int handle_vmon(struct kvm_vcpu *vcpu) | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; /* - * The Intel VMX Instruction Reference lists a bunch of bits that are - * prerequisite to running VMXON, most notably cr4.VMXE must be set to - * 1 (see vmx_set_cr4() for when we allow the guest to set this). - * Otherwise, we should fail with #UD. But most faulting conditions - * have already been checked by hardware, prior to the VM-exit for - * VMXON. We do test guest cr4.VMXE because processor CR4 always has - * that bit set to 1 in non-root mode. + * Note, KVM cannot rely on hardware to perform the CR0/CR4 #UD checks + * that have higher priority than VM-Exit (see Intel SDM's pseudocode + * for VMXON), as KVM must load valid CR0/CR4 values into hardware while + * running the guest, i.e. KVM needs to check the _guest_ values. + * + * Rely on hardware for the other two pre-VM-Exit checks, !VM86 and + * !COMPATIBILITY modes. KVM may run the guest in VM86 to emulate Real + * Mode, but KVM will never take the guest out of those modes. */ - if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { + if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) || + !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } - /* CPL=0 must be checked manually. */ + /* + * CPL=0 and all other checks that are lower priority than VM-Exit must + * be checked manually. + */ if (vmx_get_cpl(vcpu)) { kvm_inject_gp(vcpu, 0); return 1; diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index bd70c1d7f345..f938fc997766 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -104,6 +104,9 @@ static bool intel_pmc_is_enabled(struct kvm_pmc *pmc) { struct kvm_pmu *pmu = pmc_to_pmu(pmc); + if (pmu->version < 2) + return true; + return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); } @@ -153,12 +156,17 @@ static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, return &counters[array_index_nospec(idx, num_counters)]; } -static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu) +static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu) { if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) - return false; + return 0; - return vcpu->arch.perf_capabilities & PMU_CAP_FW_WRITES; + return vcpu->arch.perf_capabilities; +} + +static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu) +{ + return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0; } static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) @@ -254,7 +262,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_CORE_PERF_FIXED_CTR_CTRL: if (pmu->fixed_ctr_ctrl == data) return 0; - if (!(data & 0xfffffffffffff444ull)) { + if (!(data & pmu->fixed_ctr_ctrl_mask)) { reprogram_fixed_counters(pmu, data); return 0; } @@ -321,6 +329,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) struct kvm_cpuid_entry2 *entry; union cpuid10_eax eax; union cpuid10_edx edx; + int i; pmu->nr_arch_gp_counters = 0; pmu->nr_arch_fixed_counters = 0; @@ -328,7 +337,10 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) pmu->counter_bitmask[KVM_PMC_FIXED] = 0; pmu->version = 0; pmu->reserved_bits = 0xffffffff00200000ull; - vcpu->arch.perf_capabilities = 0; + pmu->raw_event_mask = X86_RAW_EVENT_MASK; + pmu->global_ctrl_mask = ~0ull; + pmu->global_ovf_ctrl_mask = ~0ull; + pmu->fixed_ctr_ctrl_mask = ~0ull; entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); if (!entry) @@ -341,8 +353,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) return; perf_get_x86_pmu_capability(&x86_pmu); - if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) - vcpu->arch.perf_capabilities = vmx_get_perf_capabilities(); pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, x86_pmu.num_counters_gp); @@ -364,6 +374,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) ((u64)1 << edx.split.bit_width_fixed) - 1; } + for (i = 0; i < pmu->nr_arch_fixed_counters; i++) + pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4)); pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); pmu->global_ctrl_mask = ~pmu->global_ctrl; @@ -406,6 +418,8 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu) pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; pmu->fixed_counters[i].current_config = 0; } + + vcpu->arch.perf_capabilities = vmx_get_perf_capabilities(); } static void intel_pmu_reset(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/run_flags.h b/arch/x86/kvm/vmx/run_flags.h new file mode 100644 index 000000000000..edc3f16cc189 --- /dev/null +++ b/arch/x86/kvm/vmx/run_flags.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KVM_X86_VMX_RUN_FLAGS_H +#define __KVM_X86_VMX_RUN_FLAGS_H + +#define VMX_RUN_VMRESUME (1 << 0) +#define VMX_RUN_SAVE_SPEC_CTRL (1 << 1) + +#endif /* __KVM_X86_VMX_RUN_FLAGS_H */ diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h index 571d9ad80a59..69c147df957f 100644 --- a/arch/x86/kvm/vmx/vmcs.h +++ b/arch/x86/kvm/vmx/vmcs.h @@ -102,6 +102,11 @@ static inline bool is_breakpoint(u32 intr_info) return is_exception_n(intr_info, BP_VECTOR); } +static inline bool is_double_fault(u32 intr_info) +{ + return is_exception_n(intr_info, DF_VECTOR); +} + static inline bool is_page_fault(u32 intr_info) { return is_exception_n(intr_info, PF_VECTOR); diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index 90ad7a6246e3..982138bebb70 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -5,6 +5,7 @@ #include #include #include +#include "run_flags.h" #define WORD_SIZE (BITS_PER_LONG / 8) @@ -30,73 +31,12 @@ .section .noinstr.text, "ax" -/** - * vmx_vmenter - VM-Enter the current loaded VMCS - * - * %RFLAGS.ZF: !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME - * - * Returns: - * %RFLAGS.CF is set on VM-Fail Invalid - * %RFLAGS.ZF is set on VM-Fail Valid - * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit - * - * Note that VMRESUME/VMLAUNCH fall-through and return directly if - * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump - * to vmx_vmexit. - */ -SYM_FUNC_START(vmx_vmenter) - /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */ - je 2f - -1: vmresume - ret - -2: vmlaunch - ret - -3: cmpb $0, kvm_rebooting - je 4f - ret -4: ud2 - - _ASM_EXTABLE(1b, 3b) - _ASM_EXTABLE(2b, 3b) - -SYM_FUNC_END(vmx_vmenter) - -/** - * vmx_vmexit - Handle a VMX VM-Exit - * - * Returns: - * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit - * - * This is vmx_vmenter's partner in crime. On a VM-Exit, control will jump - * here after hardware loads the host's state, i.e. this is the destination - * referred to by VMCS.HOST_RIP. - */ -SYM_FUNC_START(vmx_vmexit) -#ifdef CONFIG_RETPOLINE - ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE - /* Preserve guest's RAX, it's used to stuff the RSB. */ - push %_ASM_AX - - /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ - FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE - - /* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */ - or $1, %_ASM_AX - - pop %_ASM_AX -.Lvmexit_skip_rsb: -#endif - ret -SYM_FUNC_END(vmx_vmexit) - /** * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode - * @vmx: struct vcpu_vmx * (forwarded to vmx_update_host_rsp) + * @vmx: struct vcpu_vmx * * @regs: unsigned long * (to guest registers) - * @launched: %true if the VMCS has been launched + * @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH + * VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl * * Returns: * 0 on VM-Exit, 1 on VM-Fail @@ -115,24 +55,29 @@ SYM_FUNC_START(__vmx_vcpu_run) #endif push %_ASM_BX + /* Save @vmx for SPEC_CTRL handling */ + push %_ASM_ARG1 + + /* Save @flags for SPEC_CTRL handling */ + push %_ASM_ARG3 + /* * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and * @regs is needed after VM-Exit to save the guest's register values. */ push %_ASM_ARG2 - /* Copy @launched to BL, _ASM_ARG3 is volatile. */ + /* Copy @flags to BL, _ASM_ARG3 is volatile. */ mov %_ASM_ARG3B, %bl - /* Adjust RSP to account for the CALL to vmx_vmenter(). */ - lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2 + lea (%_ASM_SP), %_ASM_ARG2 call vmx_update_host_rsp /* Load @regs to RAX. */ mov (%_ASM_SP), %_ASM_AX /* Check if vmlaunch or vmresume is needed */ - cmpb $0, %bl + testb $VMX_RUN_VMRESUME, %bl /* Load guest registers. Don't clobber flags. */ mov VCPU_RCX(%_ASM_AX), %_ASM_CX @@ -154,11 +99,36 @@ SYM_FUNC_START(__vmx_vcpu_run) /* Load guest RAX. This kills the @regs pointer! */ mov VCPU_RAX(%_ASM_AX), %_ASM_AX - /* Enter guest mode */ - call vmx_vmenter + /* Check EFLAGS.ZF from 'testb' above */ + jz .Lvmlaunch - /* Jump on VM-Fail. */ - jbe 2f + /* + * After a successful VMRESUME/VMLAUNCH, control flow "magically" + * resumes below at 'vmx_vmexit' due to the VMCS HOST_RIP setting. + * So this isn't a typical function and objtool needs to be told to + * save the unwind state here and restore it below. + */ + UNWIND_HINT_SAVE + +/* + * If VMRESUME/VMLAUNCH and corresponding vmexit succeed, execution resumes at + * the 'vmx_vmexit' label below. + */ +.Lvmresume: + vmresume + jmp .Lvmfail + +.Lvmlaunch: + vmlaunch + jmp .Lvmfail + + _ASM_EXTABLE(.Lvmresume, .Lfixup) + _ASM_EXTABLE(.Lvmlaunch, .Lfixup) + +SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL) + + /* Restore unwind state from before the VMRESUME/VMLAUNCH. */ + UNWIND_HINT_RESTORE /* Temporarily save guest's RAX. */ push %_ASM_AX @@ -185,21 +155,23 @@ SYM_FUNC_START(__vmx_vcpu_run) mov %r15, VCPU_R15(%_ASM_AX) #endif - /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */ - xor %eax, %eax + /* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */ + xor %ebx, %ebx +.Lclear_regs: /* - * Clear all general purpose registers except RSP and RAX to prevent + * Clear all general purpose registers except RSP and RBX to prevent * speculative use of the guest's values, even those that are reloaded * via the stack. In theory, an L1 cache miss when restoring registers * could lead to speculative execution with the guest's values. * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially * free. RSP and RAX are exempt as RSP is restored by hardware during - * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail. + * VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return + * value. */ -1: xor %ecx, %ecx + xor %eax, %eax + xor %ecx, %ecx xor %edx, %edx - xor %ebx, %ebx xor %ebp, %ebp xor %esi, %esi xor %edi, %edi @@ -216,8 +188,32 @@ SYM_FUNC_START(__vmx_vcpu_run) /* "POP" @regs. */ add $WORD_SIZE, %_ASM_SP - pop %_ASM_BX + /* + * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before + * the first unbalanced RET after vmexit! + * + * For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB + * entries and (in some cases) RSB underflow. + * + * eIBRS has its own protection against poisoned RSB, so it doesn't + * need the RSB filling sequence. But it does need to be enabled, and a + * single call to retire, before the first unbalanced RET. + */ + + FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\ + X86_FEATURE_RSB_VMEXIT_LITE + + + pop %_ASM_ARG2 /* @flags */ + pop %_ASM_ARG1 /* @vmx */ + + call vmx_spec_ctrl_restore_host + + /* Put return value in AX */ + mov %_ASM_BX, %_ASM_AX + + pop %_ASM_BX #ifdef CONFIG_X86_64 pop %r12 pop %r13 @@ -228,11 +224,17 @@ SYM_FUNC_START(__vmx_vcpu_run) pop %edi #endif pop %_ASM_BP - ret + RET + +.Lfixup: + cmpb $0, kvm_rebooting + jne .Lvmfail + ud2 +.Lvmfail: + /* VM-Fail: set return value to 1 */ + mov $1, %_ASM_BX + jmp .Lclear_regs - /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */ -2: mov $1, %eax - jmp 1b SYM_FUNC_END(__vmx_vcpu_run) @@ -293,7 +295,7 @@ SYM_FUNC_START(vmread_error_trampoline) pop %_ASM_AX pop %_ASM_BP - ret + RET SYM_FUNC_END(vmread_error_trampoline) SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff) @@ -326,5 +328,5 @@ SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff) */ mov %_ASM_BP, %_ASM_SP pop %_ASM_BP - ret + RET SYM_FUNC_END(vmx_do_interrupt_nmi_irqoff) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 94f5f2129e3b..af6742d11ca1 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -226,6 +226,9 @@ static const struct { #define L1D_CACHE_ORDER 4 static void *vmx_l1d_flush_pages; +/* Control for disabling CPU Fill buffer clear */ +static bool __read_mostly vmx_fb_clear_ctrl_available; + static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) { struct page *page; @@ -357,6 +360,60 @@ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); } +static void vmx_setup_fb_clear_ctrl(void) +{ + u64 msr; + + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) && + !boot_cpu_has_bug(X86_BUG_MDS) && + !boot_cpu_has_bug(X86_BUG_TAA)) { + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); + if (msr & ARCH_CAP_FB_CLEAR_CTRL) + vmx_fb_clear_ctrl_available = true; + } +} + +static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx) +{ + u64 msr; + + if (!vmx->disable_fb_clear) + return; + + msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL); + msr |= FB_CLEAR_DIS; + native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr); + /* Cache the MSR value to avoid reading it later */ + vmx->msr_ia32_mcu_opt_ctrl = msr; +} + +static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx) +{ + if (!vmx->disable_fb_clear) + return; + + vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS; + native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl); +} + +static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) +{ + vmx->disable_fb_clear = vmx_fb_clear_ctrl_available; + + /* + * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS + * at VMEntry. Skip the MSR read/write when a guest has no use case to + * execute VERW. + */ + if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) || + ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO))) + vmx->disable_fb_clear = false; +} + static const struct kernel_param_ops vmentry_l1d_flush_ops = { .set = vmentry_l1d_flush_set, .get = vmentry_l1d_flush_get, @@ -562,7 +619,7 @@ static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu) * evmcs in singe VM shares same assist page. */ if (!*p_hv_pa_pg) - *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL); + *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); if (!*p_hv_pa_pg) return -ENOMEM; @@ -879,6 +936,24 @@ static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr) return true; } +unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx) +{ + unsigned int flags = 0; + + if (vmx->loaded_vmcs->launched) + flags |= VMX_RUN_VMRESUME; + + /* + * If writes to the SPEC_CTRL MSR aren't intercepted, the guest is free + * to change it directly without causing a vmexit. In that case read + * it after vmexit and store it in vmx->spec_ctrl. + */ + if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))) + flags |= VMX_RUN_SAVE_SPEC_CTRL; + + return flags; +} + static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit) { @@ -1662,7 +1737,17 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu) kvm_deliver_exception_payload(vcpu); if (has_error_code) { - vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); + /* + * Despite the error code being architecturally defined as 32 + * bits, and the VMCS field being 32 bits, Intel CPUs and thus + * VMX don't actually supporting setting bits 31:16. Hardware + * will (should) never provide a bogus error code, but AMD CPUs + * do generate error codes with bits 31:16 set, and so KVM's + * ABI lets userspace shove in arbitrary 32-bit values. Drop + * the upper bits to avoid VM-Fail, losing information that + * does't really exist is preferable to killing the VM. + */ + vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, (u16)error_code); intr_info |= INTR_INFO_DELIVER_CODE_MASK; } @@ -2211,6 +2296,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ret = kvm_set_msr_common(vcpu, msr_info); } + /* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */ + if (msr_index == MSR_IA32_ARCH_CAPABILITIES) + vmx_update_fb_clear_dis(vcpu, vmx); + return ret; } @@ -3104,7 +3193,23 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd, vmcs_writel(GUEST_CR3, guest_cr3); } -int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) +static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) +{ + /* + * We operate under the default treatment of SMM, so VMX cannot be + * enabled under SMM. Note, whether or not VMXE is allowed at all is + * handled by kvm_valid_cr4(). + */ + if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu)) + return false; + + if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) + return false; + + return true; +} + +void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { struct vcpu_vmx *vmx = to_vmx(vcpu); /* @@ -3132,21 +3237,6 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) } } - if (cr4 & X86_CR4_VMXE) { - /* - * To use VMXON (and later other VMX instructions), a guest - * must first be able to turn on cr4.VMXE (see handle_vmon()). - * So basically the check on whether to allow nested VMX - * is here. We operate under the default treatment of SMM, - * so VMX cannot be enabled under SMM. - */ - if (!nested_vmx_allowed(vcpu) || is_smm(vcpu)) - return 1; - } - - if (vmx->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) - return 1; - vcpu->arch.cr4 = cr4; kvm_register_mark_available(vcpu, VCPU_EXREG_CR4); @@ -3177,7 +3267,6 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) vmcs_writel(CR4_READ_SHADOW, cr4); vmcs_writel(GUEST_CR4, hw_cr4); - return 0; } void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) @@ -4483,6 +4572,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vpid_sync_context(vmx->vpid); if (init_event) vmx_clear_hlt(vcpu); + + vmx_update_fb_clear_dis(vcpu, vmx); } static void enable_irq_window(struct kvm_vcpu *vcpu) @@ -6612,6 +6703,31 @@ void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) } } +void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, + unsigned int flags) +{ + u64 hostval = this_cpu_read(x86_spec_ctrl_current); + + if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) + return; + + if (flags & VMX_RUN_SAVE_SPEC_CTRL) + vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL); + + /* + * If the guest/host SPEC_CTRL values differ, restore the host value. + * + * For legacy IBRS, the IBRS bit always needs to be written after + * transitioning from a less privileged predictor mode, regardless of + * whether the guest/host values differ. + */ + if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) || + vmx->spec_ctrl != hostval) + native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval); + + barrier_nospec(); +} + static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) { switch (to_vmx(vcpu)->exit_reason.basic) { @@ -6624,10 +6740,9 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) } } -bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); - static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, - struct vcpu_vmx *vmx) + struct vcpu_vmx *vmx, + unsigned long flags) { /* * VMENTER enables interrupts (host state), but the kernel state is @@ -6654,15 +6769,22 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, vmx_l1d_flush(vcpu); else if (static_branch_unlikely(&mds_user_clear)) mds_clear_cpu_buffers(); + else if (static_branch_unlikely(&mmio_stale_data_clear) && + kvm_arch_has_assigned_device(vcpu->kvm)) + mds_clear_cpu_buffers(); + + vmx_disable_fb_clear(vmx); if (vcpu->arch.cr2 != native_read_cr2()) native_write_cr2(vcpu->arch.cr2); vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, - vmx->loaded_vmcs->launched); + flags); vcpu->arch.cr2 = native_read_cr2(); + vmx_enable_fb_clear(vmx); + /* * VMEXIT disables interrupts (host state), but tracing and lockdep * have them in state 'on' as recorded before entering guest mode. @@ -6756,27 +6878,7 @@ reenter_guest: x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); /* The actual VMENTER/EXIT is in the .noinstr.text section. */ - vmx_vcpu_enter_exit(vcpu, vmx); - - /* - * We do not use IBRS in the kernel. If this vCPU has used the - * SPEC_CTRL MSR it may have left it on; save the value and - * turn it off. This is much more efficient than blindly adding - * it to the atomic save/restore list. Especially as the former - * (Saving guest MSRs on vmexit) doesn't even exist in KVM. - * - * For non-nested case: - * If the L01 MSR bitmap does not intercept the MSR, then we need to - * save it. - * - * For nested case: - * If the L02 MSR bitmap does not intercept the MSR, then we need to - * save it. - */ - if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))) - vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); - - x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); + vmx_vcpu_enter_exit(vcpu, vmx, __vmx_vcpu_run_flags(vmx)); /* All fields are clean at this point */ if (static_branch_unlikely(&enable_evmcs)) @@ -7660,6 +7762,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .get_cpl = vmx_get_cpl, .get_cs_db_l_bits = vmx_get_cs_db_l_bits, .set_cr0 = vmx_set_cr0, + .is_valid_cr4 = vmx_is_valid_cr4, .set_cr4 = vmx_set_cr4, .set_efer = vmx_set_efer, .get_idt = vmx_get_idt, @@ -8047,6 +8150,8 @@ static int __init vmx_init(void) return r; } + vmx_setup_fb_clear_ctrl(); + for_each_possible_cpu(cpu) { INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 5ff24537393e..ed4b6da83aa8 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -13,6 +13,7 @@ #include "vmcs.h" #include "vmx_ops.h" #include "cpuid.h" +#include "run_flags.h" extern const u32 vmx_msr_index[]; @@ -300,6 +301,8 @@ struct vcpu_vmx { u64 msr_ia32_feature_control; u64 msr_ia32_feature_control_valid_bits; u64 ept_pointer; + u64 msr_ia32_mcu_opt_ctrl; + bool disable_fb_clear; struct pt_desc pt_desc; @@ -344,7 +347,7 @@ u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu); void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask); int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer); void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); -int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); +void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); void set_cr4_guest_host_mask(struct vcpu_vmx *vmx); void ept_save_pdptrs(struct kvm_vcpu *vcpu); void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); @@ -363,6 +366,10 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu); struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr); void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu); void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); +void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags); +unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx); +bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, + unsigned int flags); int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr); void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); @@ -379,9 +386,13 @@ static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \ vmx->loaded_vmcs->controls_shadow.lname = val; \ } \ } \ +static inline u32 __##lname##_controls_get(struct loaded_vmcs *vmcs) \ +{ \ + return vmcs->controls_shadow.lname; \ +} \ static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \ { \ - return vmx->loaded_vmcs->controls_shadow.lname; \ + return __##lname##_controls_get(vmx->loaded_vmcs); \ } \ static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \ { \ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a5d6d79b023b..23d7c563e012 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -459,6 +459,7 @@ static int exception_class(int vector) #define EXCPT_TRAP 1 #define EXCPT_ABORT 2 #define EXCPT_INTERRUPT 3 +#define EXCPT_DB 4 static int exception_type(int vector) { @@ -469,8 +470,14 @@ static int exception_type(int vector) mask = 1 << vector; - /* #DB is trap, as instruction watchpoints are handled elsewhere */ - if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR))) + /* + * #DBs can be trap-like or fault-like, the caller must check other CPU + * state, e.g. DR6, to determine whether a #DB is a trap or fault. + */ + if (mask & (1 << DB_VECTOR)) + return EXCPT_DB; + + if (mask & ((1 << BP_VECTOR) | (1 << OF_VECTOR))) return EXCPT_TRAP; if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) @@ -986,6 +993,9 @@ int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) return -EINVAL; + if (!kvm_x86_ops.is_valid_cr4(vcpu, cr4)) + return -EINVAL; + return 0; } EXPORT_SYMBOL_GPL(kvm_valid_cr4); @@ -1020,8 +1030,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) return 1; } - if (kvm_x86_ops.set_cr4(vcpu, cr4)) - return 1; + kvm_x86_ops.set_cr4(vcpu, cr4); if (((cr4 ^ old_cr4) & mmu_role_bits) || (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) @@ -1353,7 +1362,7 @@ static const u32 msr_based_features_all[] = { MSR_IA32_VMX_EPT_VPID_CAP, MSR_IA32_VMX_VMFUNC, - MSR_F10H_DECFG, + MSR_AMD64_DE_CFG, MSR_IA32_UCODE_REV, MSR_IA32_ARCH_CAPABILITIES, MSR_IA32_PERF_CAPABILITIES, @@ -1362,12 +1371,32 @@ static const u32 msr_based_features_all[] = { static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)]; static unsigned int num_msr_based_features; +/* + * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM + * does not yet virtualize. These include: + * 10 - MISC_PACKAGE_CTRLS + * 11 - ENERGY_FILTERING_CTL + * 12 - DOITM + * 18 - FB_CLEAR_CTRL + * 21 - XAPIC_DISABLE_STATUS + * 23 - OVERCLOCKING_STATUS + */ + +#define KVM_SUPPORTED_ARCH_CAP \ + (ARCH_CAP_RDCL_NO | ARCH_CAP_IBRS_ALL | ARCH_CAP_RSBA | \ + ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \ + ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \ + ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \ + ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO) + static u64 kvm_get_arch_capabilities(void) { u64 data = 0; - if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data); + data &= KVM_SUPPORTED_ARCH_CAP; + } /* * If nx_huge_pages is enabled, KVM's shadow paging will ensure that @@ -2859,17 +2888,20 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) /* only 0 or all 1s can be written to IA32_MCi_CTL * some Linux kernels though clear bit 10 in bank 4 to * workaround a BIOS/GART TBL issue on AMD K8s, ignore - * this to avoid an uncatched #GP in the guest + * this to avoid an uncatched #GP in the guest. + * + * UNIXWARE clears bit 0 of MC1_CTL to ignore + * correctable, single-bit ECC data errors. */ if ((offset & 0x3) == 0 && - data != 0 && (data | (1 << 10)) != ~(u64)0) - return -1; + data != 0 && (data | (1 << 10) | 1) != ~(u64)0) + return 1; /* MCi_STATUS */ if (!msr_info->host_initiated && (offset & 0x3) == 1 && data != 0) { if (!can_set_mci_status(vcpu)) - return -1; + return 1; } vcpu->arch.mce_banks[offset] = data; @@ -5328,6 +5360,11 @@ split_irqchip_unlock: r = 0; break; case KVM_CAP_X86_USER_SPACE_MSR: + r = -EINVAL; + if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL | + KVM_MSR_EXIT_REASON_UNKNOWN | + KVM_MSR_EXIT_REASON_FILTER)) + break; kvm->arch.user_space_msr_mask = cap->args[0]; r = 0; break; @@ -5409,23 +5446,22 @@ err: return r; } -static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) +static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, + struct kvm_msr_filter *filter) { - struct kvm_msr_filter __user *user_msr_filter = argp; struct kvm_x86_msr_filter *new_filter, *old_filter; - struct kvm_msr_filter filter; bool default_allow; bool empty = true; int r = 0; u32 i; - if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) - return -EFAULT; + if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY) + return -EINVAL; - for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) - empty &= !filter.ranges[i].nmsrs; + for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) + empty &= !filter->ranges[i].nmsrs; - default_allow = !(filter.flags & KVM_MSR_FILTER_DEFAULT_DENY); + default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY); if (empty && !default_allow) return -EINVAL; @@ -5433,8 +5469,8 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) if (!new_filter) return -ENOMEM; - for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { - r = kvm_add_msr_filter(new_filter, &filter.ranges[i]); + for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) { + r = kvm_add_msr_filter(new_filter, &filter->ranges[i]); if (r) { kvm_free_msr_filter(new_filter); return r; @@ -5457,6 +5493,62 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) return 0; } +#ifdef CONFIG_KVM_COMPAT +/* for KVM_X86_SET_MSR_FILTER */ +struct kvm_msr_filter_range_compat { + __u32 flags; + __u32 nmsrs; + __u32 base; + __u32 bitmap; +}; + +struct kvm_msr_filter_compat { + __u32 flags; + struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES]; +}; + +#define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat) + +long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct kvm *kvm = filp->private_data; + long r = -ENOTTY; + + switch (ioctl) { + case KVM_X86_SET_MSR_FILTER_COMPAT: { + struct kvm_msr_filter __user *user_msr_filter = argp; + struct kvm_msr_filter_compat filter_compat; + struct kvm_msr_filter filter; + int i; + + if (copy_from_user(&filter_compat, user_msr_filter, + sizeof(filter_compat))) + return -EFAULT; + + filter.flags = filter_compat.flags; + for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { + struct kvm_msr_filter_range_compat *cr; + + cr = &filter_compat.ranges[i]; + filter.ranges[i] = (struct kvm_msr_filter_range) { + .flags = cr->flags, + .nmsrs = cr->nmsrs, + .base = cr->base, + .bitmap = (__u8 *)(ulong)cr->bitmap, + }; + } + + r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); + break; + } + } + + return r; +} +#endif + long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -5763,9 +5855,16 @@ set_pit2_out: case KVM_SET_PMU_EVENT_FILTER: r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp); break; - case KVM_X86_SET_MSR_FILTER: - r = kvm_vm_ioctl_set_msr_filter(kvm, argp); + case KVM_X86_SET_MSR_FILTER: { + struct kvm_msr_filter __user *user_msr_filter = argp; + struct kvm_msr_filter filter; + + if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) + return -EFAULT; + + r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); break; + } default: r = -ENOTTY; } @@ -6875,6 +6974,11 @@ static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt) return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR); } +static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt) +{ + return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID); +} + static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) { return kvm_register_read(emul_to_vcpu(ctxt), reg); @@ -6958,6 +7062,7 @@ static const struct x86_emulate_ops emulate_ops = { .guest_has_long_mode = emulator_guest_has_long_mode, .guest_has_movbe = emulator_guest_has_movbe, .guest_has_fxsr = emulator_guest_has_fxsr, + .guest_has_rdpid = emulator_guest_has_rdpid, .set_nmi_mask = emulator_set_nmi_mask, .get_hflags = emulator_get_hflags, .set_hflags = emulator_set_hflags, @@ -7289,7 +7394,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); -static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) +static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r) { if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { @@ -7358,25 +7463,23 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) } /* - * Decode to be emulated instruction. Return EMULATION_OK if success. + * Decode an instruction for emulation. The caller is responsible for handling + * code breakpoints. Note, manually detecting code breakpoints is unnecessary + * (and wrong) when emulating on an intercepted fault-like exception[*], as + * code breakpoints have higher priority and thus have already been done by + * hardware. + * + * [*] Except #MC, which is higher priority, but KVM should never emulate in + * response to a machine check. */ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, void *insn, int insn_len) { - int r = EMULATION_OK; struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; + int r; init_emulate_ctxt(vcpu); - /* - * We will reenter on the same instruction since we do not set - * complete_userspace_io. This does not handle watchpoints yet, - * those would be handled in the emulate_ops. - */ - if (!(emulation_type & EMULTYPE_SKIP) && - kvm_vcpu_check_breakpoint(vcpu, &r)) - return r; - ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; r = x86_decode_insn(ctxt, insn, insn_len); @@ -7411,6 +7514,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, if (!(emulation_type & EMULTYPE_NO_DECODE)) { kvm_clear_exception_queue(vcpu); + /* + * Return immediately if RIP hits a code breakpoint, such #DBs + * are fault-like and are higher priority than any faults on + * the code fetch itself. + */ + if (!(emulation_type & EMULTYPE_SKIP) && + kvm_vcpu_check_code_breakpoint(vcpu, &r)) + return r; + r = x86_decode_emulated_instruction(vcpu, emulation_type, insn, insn_len); if (r != EMULATION_OK) { @@ -7522,6 +7634,12 @@ restart: unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; + + /* + * Note, EXCPT_DB is assumed to be fault-like as the emulator + * only supports code breakpoints and general detect #DB, both + * of which are fault-like. + */ if (!ctxt->have_exception || exception_type(ctxt->exception.vector) == EXCPT_TRAP) { kvm_rip_write(vcpu, ctxt->eip); @@ -7999,7 +8117,7 @@ int kvm_arch_init(void *opaque) goto out_free_x86_emulator_cache; } - r = kvm_mmu_module_init(); + r = kvm_mmu_vendor_module_init(); if (r) goto out_free_percpu; @@ -8059,7 +8177,7 @@ void kvm_arch_exit(void) cancel_work_sync(&pvclock_gtod_work); #endif kvm_x86_ops.hardware_enable = NULL; - kvm_mmu_module_exit(); + kvm_mmu_vendor_module_exit(); free_percpu(user_return_msrs); kmem_cache_destroy(x86_emulator_cache); kmem_cache_destroy(x86_fpu_cache); @@ -8126,15 +8244,17 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, */ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) { - struct kvm_lapic_irq lapic_irq; + /* + * All other fields are unused for APIC_DM_REMRD, but may be consumed by + * common code, e.g. for tracing. Defer initialization to the compiler. + */ + struct kvm_lapic_irq lapic_irq = { + .delivery_mode = APIC_DM_REMRD, + .dest_mode = APIC_DEST_PHYSICAL, + .shorthand = APIC_DEST_NOSHORT, + .dest_id = apicid, + }; - lapic_irq.shorthand = APIC_DEST_NOSHORT; - lapic_irq.dest_mode = APIC_DEST_PHYSICAL; - lapic_irq.level = 0; - lapic_irq.dest_id = apicid; - lapic_irq.msi_redir_hint = false; - - lapic_irq.delivery_mode = APIC_DM_REMRD; kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); } @@ -8307,6 +8427,11 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) static void kvm_inject_exception(struct kvm_vcpu *vcpu) { + trace_kvm_inj_exception(vcpu->arch.exception.nr, + vcpu->arch.exception.has_error_code, + vcpu->arch.exception.error_code, + vcpu->arch.exception.injected); + if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) vcpu->arch.exception.error_code = false; kvm_x86_ops.queue_exception(vcpu); @@ -8364,13 +8489,16 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit /* try to inject new event if pending */ if (vcpu->arch.exception.pending) { - trace_kvm_inj_exception(vcpu->arch.exception.nr, - vcpu->arch.exception.has_error_code, - vcpu->arch.exception.error_code); - - vcpu->arch.exception.pending = false; - vcpu->arch.exception.injected = true; - + /* + * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS + * value pushed on the stack. Trap-like exception and all #DBs + * leave RF as-is (KVM follows Intel's behavior in this regard; + * AMD states that code breakpoint #DBs excplitly clear RF=0). + * + * Note, most versions of Intel's SDM and AMD's APM incorrectly + * describe the behavior of General Detect #DBs, which are + * fault-like. They do _not_ set RF, a la code breakpoints. + */ if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | X86_EFLAGS_RF); @@ -8384,6 +8512,10 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit } kvm_inject_exception(vcpu); + + vcpu->arch.exception.pending = false; + vcpu->arch.exception.injected = true; + can_inject = false; } @@ -8835,6 +8967,12 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); } +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ + if (kvm_x86_ops.guest_memory_reclaimed) + kvm_x86_ops.guest_memory_reclaimed(kvm); +} + void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) { if (!lapic_in_kernel(vcpu)) @@ -11140,7 +11278,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) if (!kvm_pv_async_pf_enabled(vcpu)) return true; else - return apf_pageready_slot_free(vcpu); + return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu); } void kvm_arch_start_assignment(struct kvm *kvm) @@ -11155,9 +11293,9 @@ void kvm_arch_end_assignment(struct kvm *kvm) } EXPORT_SYMBOL_GPL(kvm_arch_end_assignment); -bool kvm_arch_has_assigned_device(struct kvm *kvm) +bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm) { - return atomic_read(&kvm->arch.assigned_device_count); + return arch_atomic_read(&kvm->arch.assigned_device_count); } EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); @@ -11420,3 +11558,19 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request); + +static int __init kvm_x86_init(void) +{ + kvm_mmu_x86_module_init(); + return 0; +} +module_init(kvm_x86_init); + +static void __exit kvm_x86_exit(void) +{ + /* + * If module_init() is implemented, module_exit() must also be + * implemented to allow module unload. + */ +} +module_exit(kvm_x86_exit); diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S index 3b6544111ac9..e768815e58ae 100644 --- a/arch/x86/lib/atomic64_386_32.S +++ b/arch/x86/lib/atomic64_386_32.S @@ -6,84 +6,86 @@ */ #include -#include +#include /* if you want SMP support, implement these with real spinlocks */ -.macro LOCK reg +.macro IRQ_SAVE reg pushfl cli .endm -.macro UNLOCK reg +.macro IRQ_RESTORE reg popfl .endm -#define BEGIN(op) \ +#define BEGIN_IRQ_SAVE(op) \ .macro endp; \ SYM_FUNC_END(atomic64_##op##_386); \ .purgem endp; \ .endm; \ SYM_FUNC_START(atomic64_##op##_386); \ - LOCK v; + IRQ_SAVE v; #define ENDP endp -#define RET \ - UNLOCK v; \ - ret - -#define RET_ENDP \ - RET; \ - ENDP +#define RET_IRQ_RESTORE \ + IRQ_RESTORE v; \ + RET #define v %ecx -BEGIN(read) +BEGIN_IRQ_SAVE(read) movl (v), %eax movl 4(v), %edx -RET_ENDP + RET_IRQ_RESTORE +ENDP #undef v #define v %esi -BEGIN(set) +BEGIN_IRQ_SAVE(set) movl %ebx, (v) movl %ecx, 4(v) -RET_ENDP + RET_IRQ_RESTORE +ENDP #undef v #define v %esi -BEGIN(xchg) +BEGIN_IRQ_SAVE(xchg) movl (v), %eax movl 4(v), %edx movl %ebx, (v) movl %ecx, 4(v) -RET_ENDP + RET_IRQ_RESTORE +ENDP #undef v #define v %ecx -BEGIN(add) +BEGIN_IRQ_SAVE(add) addl %eax, (v) adcl %edx, 4(v) -RET_ENDP + RET_IRQ_RESTORE +ENDP #undef v #define v %ecx -BEGIN(add_return) +BEGIN_IRQ_SAVE(add_return) addl (v), %eax adcl 4(v), %edx movl %eax, (v) movl %edx, 4(v) -RET_ENDP + RET_IRQ_RESTORE +ENDP #undef v #define v %ecx -BEGIN(sub) +BEGIN_IRQ_SAVE(sub) subl %eax, (v) sbbl %edx, 4(v) -RET_ENDP + RET_IRQ_RESTORE +ENDP #undef v #define v %ecx -BEGIN(sub_return) +BEGIN_IRQ_SAVE(sub_return) negl %edx negl %eax sbbl $0, %edx @@ -91,47 +93,52 @@ BEGIN(sub_return) adcl 4(v), %edx movl %eax, (v) movl %edx, 4(v) -RET_ENDP + RET_IRQ_RESTORE +ENDP #undef v #define v %esi -BEGIN(inc) +BEGIN_IRQ_SAVE(inc) addl $1, (v) adcl $0, 4(v) -RET_ENDP + RET_IRQ_RESTORE +ENDP #undef v #define v %esi -BEGIN(inc_return) +BEGIN_IRQ_SAVE(inc_return) movl (v), %eax movl 4(v), %edx addl $1, %eax adcl $0, %edx movl %eax, (v) movl %edx, 4(v) -RET_ENDP + RET_IRQ_RESTORE +ENDP #undef v #define v %esi -BEGIN(dec) +BEGIN_IRQ_SAVE(dec) subl $1, (v) sbbl $0, 4(v) -RET_ENDP + RET_IRQ_RESTORE +ENDP #undef v #define v %esi -BEGIN(dec_return) +BEGIN_IRQ_SAVE(dec_return) movl (v), %eax movl 4(v), %edx subl $1, %eax sbbl $0, %edx movl %eax, (v) movl %edx, 4(v) -RET_ENDP + RET_IRQ_RESTORE +ENDP #undef v #define v %esi -BEGIN(add_unless) +BEGIN_IRQ_SAVE(add_unless) addl %eax, %ecx adcl %edx, %edi addl (v), %eax @@ -143,7 +150,7 @@ BEGIN(add_unless) movl %edx, 4(v) movl $1, %eax 2: - RET + RET_IRQ_RESTORE 3: cmpl %edx, %edi jne 1b @@ -153,7 +160,7 @@ ENDP #undef v #define v %esi -BEGIN(inc_not_zero) +BEGIN_IRQ_SAVE(inc_not_zero) movl (v), %eax movl 4(v), %edx testl %eax, %eax @@ -165,7 +172,7 @@ BEGIN(inc_not_zero) movl %edx, 4(v) movl $1, %eax 2: - RET + RET_IRQ_RESTORE 3: testl %edx, %edx jne 1b @@ -174,7 +181,7 @@ ENDP #undef v #define v %esi -BEGIN(dec_if_positive) +BEGIN_IRQ_SAVE(dec_if_positive) movl (v), %eax movl 4(v), %edx subl $1, %eax @@ -183,5 +190,6 @@ BEGIN(dec_if_positive) movl %eax, (v) movl %edx, 4(v) 1: -RET_ENDP + RET_IRQ_RESTORE +ENDP #undef v diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S index 1c5c81c16b06..90afb488b396 100644 --- a/arch/x86/lib/atomic64_cx8_32.S +++ b/arch/x86/lib/atomic64_cx8_32.S @@ -6,7 +6,7 @@ */ #include -#include +#include .macro read64 reg movl %ebx, %eax @@ -18,7 +18,7 @@ SYM_FUNC_START(atomic64_read_cx8) read64 %ecx - ret + RET SYM_FUNC_END(atomic64_read_cx8) SYM_FUNC_START(atomic64_set_cx8) @@ -28,7 +28,7 @@ SYM_FUNC_START(atomic64_set_cx8) cmpxchg8b (%esi) jne 1b - ret + RET SYM_FUNC_END(atomic64_set_cx8) SYM_FUNC_START(atomic64_xchg_cx8) @@ -37,7 +37,7 @@ SYM_FUNC_START(atomic64_xchg_cx8) cmpxchg8b (%esi) jne 1b - ret + RET SYM_FUNC_END(atomic64_xchg_cx8) .macro addsub_return func ins insc @@ -68,7 +68,7 @@ SYM_FUNC_START(atomic64_\func\()_return_cx8) popl %esi popl %ebx popl %ebp - ret + RET SYM_FUNC_END(atomic64_\func\()_return_cx8) .endm @@ -93,7 +93,7 @@ SYM_FUNC_START(atomic64_\func\()_return_cx8) movl %ebx, %eax movl %ecx, %edx popl %ebx - ret + RET SYM_FUNC_END(atomic64_\func\()_return_cx8) .endm @@ -118,7 +118,7 @@ SYM_FUNC_START(atomic64_dec_if_positive_cx8) movl %ebx, %eax movl %ecx, %edx popl %ebx - ret + RET SYM_FUNC_END(atomic64_dec_if_positive_cx8) SYM_FUNC_START(atomic64_add_unless_cx8) @@ -149,7 +149,7 @@ SYM_FUNC_START(atomic64_add_unless_cx8) addl $8, %esp popl %ebx popl %ebp - ret + RET 4: cmpl %edx, 4(%esp) jne 2b @@ -176,5 +176,5 @@ SYM_FUNC_START(atomic64_inc_not_zero_cx8) movl $1, %eax 3: popl %ebx - ret + RET SYM_FUNC_END(atomic64_inc_not_zero_cx8) diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index 4304320e51f4..929ad1747dea 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -127,7 +127,7 @@ SYM_FUNC_START(csum_partial) 8: popl %ebx popl %esi - ret + RET SYM_FUNC_END(csum_partial) #else @@ -245,7 +245,7 @@ SYM_FUNC_START(csum_partial) 90: popl %ebx popl %esi - ret + RET SYM_FUNC_END(csum_partial) #endif @@ -371,7 +371,7 @@ EXC( movb %cl, (%edi) ) popl %esi popl %edi popl %ecx # equivalent to addl $4,%esp - ret + RET SYM_FUNC_END(csum_partial_copy_generic) #else @@ -447,7 +447,7 @@ EXC( movb %dl, (%edi) ) popl %esi popl %edi popl %ebx - ret + RET SYM_FUNC_END(csum_partial_copy_generic) #undef ROUND diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S index c4c7dd115953..fe59b8ac4fcc 100644 --- a/arch/x86/lib/clear_page_64.S +++ b/arch/x86/lib/clear_page_64.S @@ -17,7 +17,7 @@ SYM_FUNC_START(clear_page_rep) movl $4096/8,%ecx xorl %eax,%eax rep stosq - ret + RET SYM_FUNC_END(clear_page_rep) EXPORT_SYMBOL_GPL(clear_page_rep) @@ -39,7 +39,7 @@ SYM_FUNC_START(clear_page_orig) leaq 64(%rdi),%rdi jnz .Lloop nop - ret + RET SYM_FUNC_END(clear_page_orig) EXPORT_SYMBOL_GPL(clear_page_orig) @@ -47,6 +47,6 @@ SYM_FUNC_START(clear_page_erms) movl $4096,%ecx xorl %eax,%eax rep stosb - ret + RET SYM_FUNC_END(clear_page_erms) EXPORT_SYMBOL_GPL(clear_page_erms) diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S index 3542502faa3b..33c70c0160ea 100644 --- a/arch/x86/lib/cmpxchg16b_emu.S +++ b/arch/x86/lib/cmpxchg16b_emu.S @@ -37,11 +37,11 @@ SYM_FUNC_START(this_cpu_cmpxchg16b_emu) popfq mov $1, %al - ret + RET .Lnot_same: popfq xor %al,%al - ret + RET SYM_FUNC_END(this_cpu_cmpxchg16b_emu) diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S index ca01ed6029f4..6a912d58fecc 100644 --- a/arch/x86/lib/cmpxchg8b_emu.S +++ b/arch/x86/lib/cmpxchg8b_emu.S @@ -32,7 +32,7 @@ SYM_FUNC_START(cmpxchg8b_emu) movl %ecx, 4(%esi) popfl - ret + RET .Lnot_same: movl (%esi), %eax @@ -40,7 +40,7 @@ SYM_FUNC_START(cmpxchg8b_emu) movl 4(%esi), %edx popfl - ret + RET SYM_FUNC_END(cmpxchg8b_emu) EXPORT_SYMBOL(cmpxchg8b_emu) diff --git a/arch/x86/lib/copy_mc_64.S b/arch/x86/lib/copy_mc_64.S index 892d8915f609..fdd1929b1f9d 100644 --- a/arch/x86/lib/copy_mc_64.S +++ b/arch/x86/lib/copy_mc_64.S @@ -86,7 +86,7 @@ SYM_FUNC_START(copy_mc_fragile) .L_done_memcpy_trap: xorl %eax, %eax .L_done: - ret + RET SYM_FUNC_END(copy_mc_fragile) EXPORT_SYMBOL_GPL(copy_mc_fragile) @@ -142,7 +142,7 @@ SYM_FUNC_START(copy_mc_enhanced_fast_string) rep movsb /* Copy successful. Return zero */ xorl %eax, %eax - ret + RET SYM_FUNC_END(copy_mc_enhanced_fast_string) .section .fixup, "ax" @@ -155,7 +155,7 @@ SYM_FUNC_END(copy_mc_enhanced_fast_string) * user-copy routines. */ movq %rcx, %rax - ret + RET .previous diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S index 2402d4c489d2..30ea644bf446 100644 --- a/arch/x86/lib/copy_page_64.S +++ b/arch/x86/lib/copy_page_64.S @@ -3,7 +3,7 @@ #include #include -#include +#include #include /* @@ -17,7 +17,7 @@ SYM_FUNC_START(copy_page) ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD movl $4096/8, %ecx rep movsq - ret + RET SYM_FUNC_END(copy_page) EXPORT_SYMBOL(copy_page) @@ -85,5 +85,5 @@ SYM_FUNC_START_LOCAL(copy_page_regs) movq (%rsp), %rbx movq 1*8(%rsp), %r12 addq $2*8, %rsp - ret + RET SYM_FUNC_END(copy_page_regs) diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 77b9b2a3b5c8..84cee84fc658 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include @@ -105,7 +105,7 @@ SYM_FUNC_START(copy_user_generic_unrolled) jnz 21b 23: xor %eax,%eax ASM_CLAC - ret + RET .section .fixup,"ax" 30: shll $6,%ecx @@ -173,7 +173,7 @@ SYM_FUNC_START(copy_user_generic_string) movsb xorl %eax,%eax ASM_CLAC - ret + RET .section .fixup,"ax" 11: leal (%rdx,%rcx,8),%ecx @@ -207,7 +207,7 @@ SYM_FUNC_START(copy_user_enhanced_fast_string) movsb xorl %eax,%eax ASM_CLAC - ret + RET .section .fixup,"ax" 12: movl %ecx,%edx /* ecx is zerorest also */ @@ -239,7 +239,7 @@ SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail) 1: rep movsb 2: mov %ecx,%eax ASM_CLAC - ret + RET /* * Return zero to pretend that this copy succeeded. This @@ -250,7 +250,7 @@ SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail) */ 3: xorl %eax,%eax ASM_CLAC - ret + RET _ASM_EXTABLE_CPY(1b, 2b) SYM_CODE_END(.Lcopy_user_handle_tail) @@ -361,7 +361,7 @@ SYM_FUNC_START(__copy_user_nocache) xorl %eax,%eax ASM_CLAC sfence - ret + RET .section .fixup,"ax" .L_fixup_4x8b_copy: diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S index 1fbd8ee9642d..d9e16a2cf285 100644 --- a/arch/x86/lib/csum-copy_64.S +++ b/arch/x86/lib/csum-copy_64.S @@ -201,7 +201,7 @@ SYM_FUNC_START(csum_partial_copy_generic) movq 3*8(%rsp), %r13 movq 4*8(%rsp), %r15 addq $5*8, %rsp - ret + RET .Lshort: movl %ecx, %r10d jmp .L1 diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 65d15df6212d..0e65d00e2339 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -54,8 +54,8 @@ static void delay_loop(u64 __loops) " jnz 2b \n" "3: dec %0 \n" - : /* we don't need output */ - :"a" (loops) + : "+a" (loops) + : ); } diff --git a/arch/x86/lib/error-inject.c b/arch/x86/lib/error-inject.c index be5b5fb1598b..520897061ee0 100644 --- a/arch/x86/lib/error-inject.c +++ b/arch/x86/lib/error-inject.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +#include #include #include @@ -10,7 +11,7 @@ asm( ".type just_return_func, @function\n" ".globl just_return_func\n" "just_return_func:\n" - " ret\n" + ASM_RET ".size just_return_func, .-just_return_func\n" ); diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index fa1bc2104b32..b70d98d79a9d 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -57,7 +57,7 @@ SYM_FUNC_START(__get_user_1) 1: movzbl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC - ret + RET SYM_FUNC_END(__get_user_1) EXPORT_SYMBOL(__get_user_1) @@ -71,7 +71,7 @@ SYM_FUNC_START(__get_user_2) 2: movzwl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC - ret + RET SYM_FUNC_END(__get_user_2) EXPORT_SYMBOL(__get_user_2) @@ -85,7 +85,7 @@ SYM_FUNC_START(__get_user_4) 3: movl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC - ret + RET SYM_FUNC_END(__get_user_4) EXPORT_SYMBOL(__get_user_4) @@ -100,7 +100,7 @@ SYM_FUNC_START(__get_user_8) 4: movq (%_ASM_AX),%rdx xor %eax,%eax ASM_CLAC - ret + RET #else LOAD_TASK_SIZE_MINUS_N(7) cmp %_ASM_DX,%_ASM_AX @@ -112,7 +112,7 @@ SYM_FUNC_START(__get_user_8) 5: movl 4(%_ASM_AX),%ecx xor %eax,%eax ASM_CLAC - ret + RET #endif SYM_FUNC_END(__get_user_8) EXPORT_SYMBOL(__get_user_8) @@ -124,7 +124,7 @@ SYM_FUNC_START(__get_user_nocheck_1) 6: movzbl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC - ret + RET SYM_FUNC_END(__get_user_nocheck_1) EXPORT_SYMBOL(__get_user_nocheck_1) @@ -134,7 +134,7 @@ SYM_FUNC_START(__get_user_nocheck_2) 7: movzwl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC - ret + RET SYM_FUNC_END(__get_user_nocheck_2) EXPORT_SYMBOL(__get_user_nocheck_2) @@ -144,7 +144,7 @@ SYM_FUNC_START(__get_user_nocheck_4) 8: movl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC - ret + RET SYM_FUNC_END(__get_user_nocheck_4) EXPORT_SYMBOL(__get_user_nocheck_4) @@ -159,7 +159,7 @@ SYM_FUNC_START(__get_user_nocheck_8) #endif xor %eax,%eax ASM_CLAC - ret + RET SYM_FUNC_END(__get_user_nocheck_8) EXPORT_SYMBOL(__get_user_nocheck_8) @@ -169,7 +169,7 @@ SYM_CODE_START_LOCAL(.Lbad_get_user_clac) bad_get_user: xor %edx,%edx mov $(-EFAULT),%_ASM_AX - ret + RET SYM_CODE_END(.Lbad_get_user_clac) #ifdef CONFIG_X86_32 @@ -179,7 +179,7 @@ bad_get_user_8: xor %edx,%edx xor %ecx,%ecx mov $(-EFAULT),%_ASM_AX - ret + RET SYM_CODE_END(.Lbad_get_user_8_clac) #endif diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S index dbf8cc97b7f5..12c16c6aa44a 100644 --- a/arch/x86/lib/hweight.S +++ b/arch/x86/lib/hweight.S @@ -32,7 +32,7 @@ SYM_FUNC_START(__sw_hweight32) imull $0x01010101, %eax, %eax # w_tmp *= 0x01010101 shrl $24, %eax # w = w_tmp >> 24 __ASM_SIZE(pop,) %__ASM_REG(dx) - ret + RET SYM_FUNC_END(__sw_hweight32) EXPORT_SYMBOL(__sw_hweight32) @@ -65,7 +65,7 @@ SYM_FUNC_START(__sw_hweight64) popq %rdx popq %rdi - ret + RET #else /* CONFIG_X86_32 */ /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */ pushl %ecx @@ -77,7 +77,7 @@ SYM_FUNC_START(__sw_hweight64) addl %ecx, %eax # result popl %ecx - ret + RET #endif SYM_FUNC_END(__sw_hweight64) EXPORT_SYMBOL(__sw_hweight64) diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c index 12539fca75c4..b0f3b2a62ae2 100644 --- a/arch/x86/lib/inat.c +++ b/arch/x86/lib/inat.c @@ -4,7 +4,7 @@ * * Written by Masami Hiramatsu */ -#include +#include /* __ignore_sync_check__ */ /* Attribute tables are generated from opcode map */ #include "inat-tables.c" diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index c6a19c88af54..ffc8b7dcf1fe 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -928,10 +928,11 @@ static int get_seg_base_limit(struct insn *insn, struct pt_regs *regs, static int get_eff_addr_reg(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { - insn_get_modrm(insn); + int ret; - if (!insn->modrm.nbytes) - return -EINVAL; + ret = insn_get_modrm(insn); + if (ret) + return ret; if (X86_MODRM_MOD(insn->modrm.value) != 3) return -EINVAL; @@ -977,14 +978,14 @@ static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs, int *regoff, long *eff_addr) { long tmp; + int ret; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; - insn_get_modrm(insn); - - if (!insn->modrm.nbytes) - return -EINVAL; + ret = insn_get_modrm(insn); + if (ret) + return ret; if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; @@ -1106,18 +1107,21 @@ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs, * @base_offset will have a register, as an offset from the base of pt_regs, * that can be used to resolve the associated segment. * - * -EINVAL on error. + * Negative value on error. */ static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs, int *base_offset, long *eff_addr) { long base, indx; int indx_offset; + int ret; if (insn->addr_bytes != 8 && insn->addr_bytes != 4) return -EINVAL; - insn_get_modrm(insn); + ret = insn_get_modrm(insn); + if (ret) + return ret; if (!insn->modrm.nbytes) return -EINVAL; @@ -1125,7 +1129,9 @@ static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs, if (X86_MODRM_MOD(insn->modrm.value) > 2) return -EINVAL; - insn_get_sib(insn); + ret = insn_get_sib(insn); + if (ret) + return ret; if (!insn->sib.nbytes) return -EINVAL; @@ -1194,8 +1200,8 @@ static void __user *get_addr_ref_16(struct insn *insn, struct pt_regs *regs) short eff_addr; long tmp; - insn_get_modrm(insn); - insn_get_displacement(insn); + if (insn_get_displacement(insn)) + goto out; if (insn->addr_bytes != 2) goto out; @@ -1492,7 +1498,7 @@ int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_IN } /** - * insn_decode() - Decode an instruction + * insn_decode_from_regs() - Decode an instruction * @insn: Structure to store decoded instruction * @regs: Structure with register values as seen when entering kernel mode * @buf: Buffer containing the instruction bytes @@ -1505,8 +1511,8 @@ int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_IN * * True if instruction was decoded, False otherwise. */ -bool insn_decode(struct insn *insn, struct pt_regs *regs, - unsigned char buf[MAX_INSN_SIZE], int buf_size) +bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs, + unsigned char buf[MAX_INSN_SIZE], int buf_size) { int seg_defs; @@ -1529,7 +1535,9 @@ bool insn_decode(struct insn *insn, struct pt_regs *regs, insn->addr_bytes = INSN_CODE_SEG_ADDR_SZ(seg_defs); insn->opnd_bytes = INSN_CODE_SEG_OPND_SZ(seg_defs); - insn_get_length(insn); + if (insn_get_length(insn)) + return false; + if (buf_size < insn->length) return false; diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 404279563891..24e89239dcca 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -10,10 +10,13 @@ #else #include #endif -#include -#include +#include /*__ignore_sync_check__ */ +#include /* __ignore_sync_check__ */ -#include +#include +#include + +#include /* __ignore_sync_check__ */ /* Verify next sizeof(t) bytes can be on the same instruction */ #define validate_next(t, insn, n) \ @@ -97,8 +100,12 @@ static void insn_get_emulate_prefix(struct insn *insn) * Populates the @insn->prefixes bitmap, and updates @insn->next_byte * to point to the (first) opcode. No effect if @insn->prefixes.got * is already set. + * + * * Returns: + * 0: on success + * < 0: on error */ -void insn_get_prefixes(struct insn *insn) +int insn_get_prefixes(struct insn *insn) { struct insn_field *prefixes = &insn->prefixes; insn_attr_t attr; @@ -106,7 +113,7 @@ void insn_get_prefixes(struct insn *insn) int i, nb; if (prefixes->got) - return; + return 0; insn_get_emulate_prefix(insn); @@ -217,8 +224,10 @@ vex_end: prefixes->got = 1; + return 0; + err_out: - return; + return -ENODATA; } /** @@ -230,16 +239,25 @@ err_out: * If necessary, first collects any preceding (prefix) bytes. * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got * is already 1. + * + * Returns: + * 0: on success + * < 0: on error */ -void insn_get_opcode(struct insn *insn) +int insn_get_opcode(struct insn *insn) { struct insn_field *opcode = &insn->opcode; + int pfx_id, ret; insn_byte_t op; - int pfx_id; + if (opcode->got) - return; - if (!insn->prefixes.got) - insn_get_prefixes(insn); + return 0; + + if (!insn->prefixes.got) { + ret = insn_get_prefixes(insn); + if (ret) + return ret; + } /* Get first opcode */ op = get_next(insn_byte_t, insn); @@ -254,9 +272,13 @@ void insn_get_opcode(struct insn *insn) insn->attr = inat_get_avx_attribute(op, m, p); if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) || (!inat_accept_vex(insn->attr) && - !inat_is_group(insn->attr))) - insn->attr = 0; /* This instruction is bad */ - goto end; /* VEX has only 1 byte for opcode */ + !inat_is_group(insn->attr))) { + /* This instruction is bad */ + insn->attr = 0; + return -EINVAL; + } + /* VEX has only 1 byte for opcode */ + goto end; } insn->attr = inat_get_opcode_attribute(op); @@ -267,13 +289,18 @@ void insn_get_opcode(struct insn *insn) pfx_id = insn_last_prefix_id(insn); insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr); } - if (inat_must_vex(insn->attr)) - insn->attr = 0; /* This instruction is bad */ + + if (inat_must_vex(insn->attr)) { + /* This instruction is bad */ + insn->attr = 0; + return -EINVAL; + } end: opcode->got = 1; + return 0; err_out: - return; + return -ENODATA; } /** @@ -283,15 +310,25 @@ err_out: * Populates @insn->modrm and updates @insn->next_byte to point past the * ModRM byte, if any. If necessary, first collects the preceding bytes * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1. + * + * Returns: + * 0: on success + * < 0: on error */ -void insn_get_modrm(struct insn *insn) +int insn_get_modrm(struct insn *insn) { struct insn_field *modrm = &insn->modrm; insn_byte_t pfx_id, mod; + int ret; + if (modrm->got) - return; - if (!insn->opcode.got) - insn_get_opcode(insn); + return 0; + + if (!insn->opcode.got) { + ret = insn_get_opcode(insn); + if (ret) + return ret; + } if (inat_has_modrm(insn->attr)) { mod = get_next(insn_byte_t, insn); @@ -301,17 +338,22 @@ void insn_get_modrm(struct insn *insn) pfx_id = insn_last_prefix_id(insn); insn->attr = inat_get_group_attribute(mod, pfx_id, insn->attr); - if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) - insn->attr = 0; /* This is bad */ + if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) { + /* Bad insn */ + insn->attr = 0; + return -EINVAL; + } } } if (insn->x86_64 && inat_is_force64(insn->attr)) insn->opnd_bytes = 8; + modrm->got = 1; + return 0; err_out: - return; + return -ENODATA; } @@ -325,11 +367,16 @@ err_out: int insn_rip_relative(struct insn *insn) { struct insn_field *modrm = &insn->modrm; + int ret; if (!insn->x86_64) return 0; - if (!modrm->got) - insn_get_modrm(insn); + + if (!modrm->got) { + ret = insn_get_modrm(insn); + if (ret) + return 0; + } /* * For rip-relative instructions, the mod field (top 2 bits) * is zero and the r/m field (bottom 3 bits) is 0x5. @@ -343,15 +390,25 @@ int insn_rip_relative(struct insn *insn) * * If necessary, first collects the instruction up to and including the * ModRM byte. + * + * Returns: + * 0: if decoding succeeded + * < 0: otherwise. */ -void insn_get_sib(struct insn *insn) +int insn_get_sib(struct insn *insn) { insn_byte_t modrm; + int ret; if (insn->sib.got) - return; - if (!insn->modrm.got) - insn_get_modrm(insn); + return 0; + + if (!insn->modrm.got) { + ret = insn_get_modrm(insn); + if (ret) + return ret; + } + if (insn->modrm.nbytes) { modrm = (insn_byte_t)insn->modrm.value; if (insn->addr_bytes != 2 && @@ -362,8 +419,10 @@ void insn_get_sib(struct insn *insn) } insn->sib.got = 1; + return 0; + err_out: - return; + return -ENODATA; } @@ -374,15 +433,25 @@ err_out: * If necessary, first collects the instruction up to and including the * SIB byte. * Displacement value is sign-expanded. + * + * * Returns: + * 0: if decoding succeeded + * < 0: otherwise. */ -void insn_get_displacement(struct insn *insn) +int insn_get_displacement(struct insn *insn) { insn_byte_t mod, rm, base; + int ret; if (insn->displacement.got) - return; - if (!insn->sib.got) - insn_get_sib(insn); + return 0; + + if (!insn->sib.got) { + ret = insn_get_sib(insn); + if (ret) + return ret; + } + if (insn->modrm.nbytes) { /* * Interpreting the modrm byte: @@ -425,9 +494,10 @@ void insn_get_displacement(struct insn *insn) } out: insn->displacement.got = 1; + return 0; err_out: - return; + return -ENODATA; } /* Decode moffset16/32/64. Return 0 if failed */ @@ -538,20 +608,30 @@ err_out: } /** - * insn_get_immediate() - Get the immediates of instruction + * insn_get_immediate() - Get the immediate in an instruction * @insn: &struct insn containing instruction * * If necessary, first collects the instruction up to and including the * displacement bytes. * Basically, most of immediates are sign-expanded. Unsigned-value can be - * get by bit masking with ((1 << (nbytes * 8)) - 1) + * computed by bit masking with ((1 << (nbytes * 8)) - 1) + * + * Returns: + * 0: on success + * < 0: on error */ -void insn_get_immediate(struct insn *insn) +int insn_get_immediate(struct insn *insn) { + int ret; + if (insn->immediate.got) - return; - if (!insn->displacement.got) - insn_get_displacement(insn); + return 0; + + if (!insn->displacement.got) { + ret = insn_get_displacement(insn); + if (ret) + return ret; + } if (inat_has_moffset(insn->attr)) { if (!__get_moffset(insn)) @@ -604,9 +684,10 @@ void insn_get_immediate(struct insn *insn) } done: insn->immediate.got = 1; + return 0; err_out: - return; + return -ENODATA; } /** @@ -615,13 +696,58 @@ err_out: * * If necessary, first collects the instruction up to and including the * immediates bytes. - */ -void insn_get_length(struct insn *insn) + * + * Returns: + * - 0 on success + * - < 0 on error +*/ +int insn_get_length(struct insn *insn) { + int ret; + if (insn->length) - return; - if (!insn->immediate.got) - insn_get_immediate(insn); + return 0; + + if (!insn->immediate.got) { + ret = insn_get_immediate(insn); + if (ret) + return ret; + } + insn->length = (unsigned char)((unsigned long)insn->next_byte - (unsigned long)insn->kaddr); + + return 0; +} + +/** + * insn_decode() - Decode an x86 instruction + * @insn: &struct insn to be initialized + * @kaddr: address (in kernel memory) of instruction (or copy thereof) + * @buf_len: length of the insn buffer at @kaddr + * @m: insn mode, see enum insn_mode + * + * Returns: + * 0: if decoding succeeded + * < 0: otherwise. + */ +int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m) +{ + int ret; + +/* #define INSN_MODE_KERN -1 __ignore_sync_check__ mode is only valid in the kernel */ + + if (m == INSN_MODE_KERN) + insn_init(insn, kaddr, buf_len, IS_ENABLED(CONFIG_X86_64)); + else + insn_init(insn, kaddr, buf_len, m == INSN_MODE_64); + + ret = insn_get_length(insn); + if (ret) + return ret; + + if (insn_complete(insn)) + return 0; + + return -EINVAL; } diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S index cb5a1964506b..a1f9416bf67a 100644 --- a/arch/x86/lib/iomap_copy_64.S +++ b/arch/x86/lib/iomap_copy_64.S @@ -11,5 +11,5 @@ SYM_FUNC_START(__iowrite32_copy) movl %edx,%ecx rep movsd - ret + RET SYM_FUNC_END(__iowrite32_copy) diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 1e299ac73c86..59cf2343f3d9 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include .pushsection .noinstr.text, "ax" @@ -39,7 +39,7 @@ SYM_FUNC_START_WEAK(memcpy) rep movsq movl %edx, %ecx rep movsb - ret + RET SYM_FUNC_END(memcpy) SYM_FUNC_END_ALIAS(__memcpy) EXPORT_SYMBOL(memcpy) @@ -53,7 +53,7 @@ SYM_FUNC_START_LOCAL(memcpy_erms) movq %rdi, %rax movq %rdx, %rcx rep movsb - ret + RET SYM_FUNC_END(memcpy_erms) SYM_FUNC_START_LOCAL(memcpy_orig) @@ -137,7 +137,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig) movq %r9, 1*8(%rdi) movq %r10, -2*8(%rdi, %rdx) movq %r11, -1*8(%rdi, %rdx) - retq + RET .p2align 4 .Lless_16bytes: cmpl $8, %edx @@ -149,7 +149,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig) movq -1*8(%rsi, %rdx), %r9 movq %r8, 0*8(%rdi) movq %r9, -1*8(%rdi, %rdx) - retq + RET .p2align 4 .Lless_8bytes: cmpl $4, %edx @@ -162,7 +162,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig) movl -4(%rsi, %rdx), %r8d movl %ecx, (%rdi) movl %r8d, -4(%rdi, %rdx) - retq + RET .p2align 4 .Lless_3bytes: subl $1, %edx @@ -180,7 +180,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig) movb %cl, (%rdi) .Lend: - retq + RET SYM_FUNC_END(memcpy_orig) .popsection diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index 41902fe8b859..4b8ee3a2fcc3 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -8,7 +8,7 @@ */ #include #include -#include +#include #include #undef memmove @@ -40,7 +40,7 @@ SYM_FUNC_START(__memmove) /* FSRM implies ERMS => no length checks, do the copy directly */ .Lmemmove_begin_forward: ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM - ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS + ALTERNATIVE "", "jmp .Lmemmove_erms", X86_FEATURE_ERMS /* * movsq instruction have many startup latency @@ -205,7 +205,12 @@ SYM_FUNC_START(__memmove) movb (%rsi), %r11b movb %r11b, (%rdi) 13: - retq + RET + +.Lmemmove_erms: + movq %rdx, %rcx + rep movsb + RET SYM_FUNC_END(__memmove) SYM_FUNC_END_ALIAS(memmove) EXPORT_SYMBOL(__memmove) diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 0bfd26e4ca9e..d624f2bc42f1 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -3,7 +3,7 @@ #include #include -#include +#include #include /* @@ -40,7 +40,7 @@ SYM_FUNC_START(__memset) movl %edx,%ecx rep stosb movq %r9,%rax - ret + RET SYM_FUNC_END(__memset) SYM_FUNC_END_ALIAS(memset) EXPORT_SYMBOL(memset) @@ -63,7 +63,7 @@ SYM_FUNC_START_LOCAL(memset_erms) movq %rdx,%rcx rep stosb movq %r9,%rax - ret + RET SYM_FUNC_END(memset_erms) SYM_FUNC_START_LOCAL(memset_orig) @@ -125,7 +125,7 @@ SYM_FUNC_START_LOCAL(memset_orig) .Lende: movq %r10,%rax - ret + RET .Lbad_alignment: cmpq $7,%rdx diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S index a2b9caa5274c..ebd259f31496 100644 --- a/arch/x86/lib/msr-reg.S +++ b/arch/x86/lib/msr-reg.S @@ -35,7 +35,7 @@ SYM_FUNC_START(\op\()_safe_regs) movl %edi, 28(%r10) popq %r12 popq %rbx - ret + RET 3: movl $-EIO, %r11d jmp 2b @@ -77,7 +77,7 @@ SYM_FUNC_START(\op\()_safe_regs) popl %esi popl %ebp popl %ebx - ret + RET 3: movl $-EIO, 4(%esp) jmp 2b diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index 0ea344c5ea43..ecb2049c1273 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -52,7 +52,7 @@ SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL) 1: movb %al,(%_ASM_CX) xor %ecx,%ecx ASM_CLAC - ret + RET SYM_FUNC_END(__put_user_1) EXPORT_SYMBOL(__put_user_1) EXPORT_SYMBOL(__put_user_nocheck_1) @@ -66,7 +66,7 @@ SYM_INNER_LABEL(__put_user_nocheck_2, SYM_L_GLOBAL) 2: movw %ax,(%_ASM_CX) xor %ecx,%ecx ASM_CLAC - ret + RET SYM_FUNC_END(__put_user_2) EXPORT_SYMBOL(__put_user_2) EXPORT_SYMBOL(__put_user_nocheck_2) @@ -80,7 +80,7 @@ SYM_INNER_LABEL(__put_user_nocheck_4, SYM_L_GLOBAL) 3: movl %eax,(%_ASM_CX) xor %ecx,%ecx ASM_CLAC - ret + RET SYM_FUNC_END(__put_user_4) EXPORT_SYMBOL(__put_user_4) EXPORT_SYMBOL(__put_user_nocheck_4) diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index b4c43a9b1483..1221bb099afb 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -4,33 +4,37 @@ #include #include #include -#include +#include #include #include #include #include -.macro THUNK reg .section .text.__x86.indirect_thunk - .align 32 -SYM_FUNC_START(__x86_indirect_thunk_\reg) - JMP_NOSPEC \reg -SYM_FUNC_END(__x86_indirect_thunk_\reg) - -SYM_FUNC_START_NOALIGN(__x86_retpoline_\reg) +.macro RETPOLINE reg ANNOTATE_INTRA_FUNCTION_CALL - call .Ldo_rop_\@ + call .Ldo_rop_\@ .Lspec_trap_\@: UNWIND_HINT_EMPTY pause lfence - jmp .Lspec_trap_\@ + jmp .Lspec_trap_\@ .Ldo_rop_\@: - mov %\reg, (%_ASM_SP) - UNWIND_HINT_RET_OFFSET - ret -SYM_FUNC_END(__x86_retpoline_\reg) + mov %\reg, (%_ASM_SP) + UNWIND_HINT_FUNC + RET +.endm + +.macro THUNK reg + + .align RETPOLINE_THUNK_SIZE +SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL) + UNWIND_HINT_EMPTY + + ALTERNATIVE_2 __stringify(RETPOLINE \reg), \ + __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE, \ + __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_NOT(X86_FEATURE_RETPOLINE) .endm @@ -48,16 +52,90 @@ SYM_FUNC_END(__x86_retpoline_\reg) #define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym) #define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg) -#define EXPORT_RETPOLINE(reg) __EXPORT_THUNK(__x86_retpoline_ ## reg) -#undef GEN + .align RETPOLINE_THUNK_SIZE +SYM_CODE_START(__x86_indirect_thunk_array) + #define GEN(reg) THUNK reg #include - #undef GEN + + .align RETPOLINE_THUNK_SIZE +SYM_CODE_END(__x86_indirect_thunk_array) + #define GEN(reg) EXPORT_THUNK(reg) #include - #undef GEN -#define GEN(reg) EXPORT_RETPOLINE(reg) -#include + +/* + * This function name is magical and is used by -mfunction-return=thunk-extern + * for the compiler to generate JMPs to it. + */ +#ifdef CONFIG_RETHUNK + + .section .text.__x86.return_thunk + +/* + * Safety details here pertain to the AMD Zen{1,2} microarchitecture: + * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for + * alignment within the BTB. + * 2) The instruction at zen_untrain_ret must contain, and not + * end with, the 0xc3 byte of the RET. + * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread + * from re-poisioning the BTB prediction. + */ + .align 64 + .skip 63, 0xcc +SYM_FUNC_START_NOALIGN(zen_untrain_ret); + + /* + * As executed from zen_untrain_ret, this is: + * + * TEST $0xcc, %bl + * LFENCE + * JMP __x86_return_thunk + * + * Executing the TEST instruction has a side effect of evicting any BTB + * prediction (potentially attacker controlled) attached to the RET, as + * __x86_return_thunk + 1 isn't an instruction boundary at the moment. + */ + .byte 0xf6 + + /* + * As executed from __x86_return_thunk, this is a plain RET. + * + * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. + * + * We subsequently jump backwards and architecturally execute the RET. + * This creates a correct BTB prediction (type=ret), but in the + * meantime we suffer Straight Line Speculation (because the type was + * no branch) which is halted by the INT3. + * + * With SMT enabled and STIBP active, a sibling thread cannot poison + * RET's prediction to a type of its choice, but can evict the + * prediction due to competitive sharing. If the prediction is + * evicted, __x86_return_thunk will suffer Straight Line Speculation + * which will be contained safely by the INT3. + */ +SYM_INNER_LABEL(__x86_return_thunk, SYM_L_GLOBAL) + ret + int3 +SYM_CODE_END(__x86_return_thunk) + + /* + * Ensure the TEST decoding / BTB invalidation is complete. + */ + lfence + + /* + * Jump back and execute the RET in the middle of the TEST instruction. + * INT3 is for SLS protection. + */ + jmp __x86_return_thunk + int3 +SYM_FUNC_END(zen_untrain_ret) +__EXPORT_THUNK(zen_untrain_ret) + +EXPORT_SYMBOL(__x86_return_thunk) + +#endif /* CONFIG_RETHUNK */ diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 508c81e97ab1..f1c0befb62df 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -121,7 +121,7 @@ void __memcpy_flushcache(void *_dst, const void *_src, size_t size) /* cache copy and flush to align dest */ if (!IS_ALIGNED(dest, 8)) { - unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest); + size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest); memcpy((void *) dest, (void *) source, len); clean_cache_range((void *) dest, len); diff --git a/arch/x86/math-emu/div_Xsig.S b/arch/x86/math-emu/div_Xsig.S index 951da2ad54bb..8c270ab415be 100644 --- a/arch/x86/math-emu/div_Xsig.S +++ b/arch/x86/math-emu/div_Xsig.S @@ -341,7 +341,7 @@ L_exit: popl %esi leave - ret + RET #ifdef PARANOID diff --git a/arch/x86/math-emu/div_small.S b/arch/x86/math-emu/div_small.S index d047d1816abe..637439bfefa4 100644 --- a/arch/x86/math-emu/div_small.S +++ b/arch/x86/math-emu/div_small.S @@ -44,5 +44,5 @@ SYM_FUNC_START(FPU_div_small) popl %esi leave - ret + RET SYM_FUNC_END(FPU_div_small) diff --git a/arch/x86/math-emu/mul_Xsig.S b/arch/x86/math-emu/mul_Xsig.S index 4afc7b1fa6e9..54a031b66142 100644 --- a/arch/x86/math-emu/mul_Xsig.S +++ b/arch/x86/math-emu/mul_Xsig.S @@ -62,7 +62,7 @@ SYM_FUNC_START(mul32_Xsig) popl %esi leave - ret + RET SYM_FUNC_END(mul32_Xsig) @@ -115,7 +115,7 @@ SYM_FUNC_START(mul64_Xsig) popl %esi leave - ret + RET SYM_FUNC_END(mul64_Xsig) @@ -175,5 +175,5 @@ SYM_FUNC_START(mul_Xsig_Xsig) popl %esi leave - ret + RET SYM_FUNC_END(mul_Xsig_Xsig) diff --git a/arch/x86/math-emu/polynom_Xsig.S b/arch/x86/math-emu/polynom_Xsig.S index 702315eecb86..35fd723fc0df 100644 --- a/arch/x86/math-emu/polynom_Xsig.S +++ b/arch/x86/math-emu/polynom_Xsig.S @@ -133,5 +133,5 @@ L_accum_done: popl %edi popl %esi leave - ret + RET SYM_FUNC_END(polynomial_Xsig) diff --git a/arch/x86/math-emu/reg_norm.S b/arch/x86/math-emu/reg_norm.S index cad1d60b1e84..594936eeed67 100644 --- a/arch/x86/math-emu/reg_norm.S +++ b/arch/x86/math-emu/reg_norm.S @@ -72,7 +72,7 @@ L_exit_valid: L_exit: popl %ebx leave - ret + RET L_zero: @@ -138,7 +138,7 @@ L_exit_nuo_valid: popl %ebx leave - ret + RET L_exit_nuo_zero: movl TAG_Zero,%eax @@ -146,5 +146,5 @@ L_exit_nuo_zero: popl %ebx leave - ret + RET SYM_FUNC_END(FPU_normalize_nuo) diff --git a/arch/x86/math-emu/reg_round.S b/arch/x86/math-emu/reg_round.S index 11a1f798451b..8bdbdcee7456 100644 --- a/arch/x86/math-emu/reg_round.S +++ b/arch/x86/math-emu/reg_round.S @@ -437,7 +437,7 @@ fpu_Arith_exit: popl %edi popl %esi leave - ret + RET /* diff --git a/arch/x86/math-emu/reg_u_add.S b/arch/x86/math-emu/reg_u_add.S index 9c9e2c810afe..07247287a3af 100644 --- a/arch/x86/math-emu/reg_u_add.S +++ b/arch/x86/math-emu/reg_u_add.S @@ -164,6 +164,6 @@ L_exit: popl %edi popl %esi leave - ret + RET #endif /* PARANOID */ SYM_FUNC_END(FPU_u_add) diff --git a/arch/x86/math-emu/reg_u_div.S b/arch/x86/math-emu/reg_u_div.S index e2fb5c2644c5..b5a41e2fc484 100644 --- a/arch/x86/math-emu/reg_u_div.S +++ b/arch/x86/math-emu/reg_u_div.S @@ -468,7 +468,7 @@ L_exit: popl %esi leave - ret + RET #endif /* PARANOID */ SYM_FUNC_END(FPU_u_div) diff --git a/arch/x86/math-emu/reg_u_mul.S b/arch/x86/math-emu/reg_u_mul.S index 0c779c87ac5b..e2588b24b8c2 100644 --- a/arch/x86/math-emu/reg_u_mul.S +++ b/arch/x86/math-emu/reg_u_mul.S @@ -144,7 +144,7 @@ L_exit: popl %edi popl %esi leave - ret + RET #endif /* PARANOID */ SYM_FUNC_END(FPU_u_mul) diff --git a/arch/x86/math-emu/reg_u_sub.S b/arch/x86/math-emu/reg_u_sub.S index e9bb7c248649..4c900c29e4ff 100644 --- a/arch/x86/math-emu/reg_u_sub.S +++ b/arch/x86/math-emu/reg_u_sub.S @@ -270,5 +270,5 @@ L_exit: popl %edi popl %esi leave - ret + RET SYM_FUNC_END(FPU_u_sub) diff --git a/arch/x86/math-emu/round_Xsig.S b/arch/x86/math-emu/round_Xsig.S index d9d7de8dbd7b..126c40473bad 100644 --- a/arch/x86/math-emu/round_Xsig.S +++ b/arch/x86/math-emu/round_Xsig.S @@ -78,7 +78,7 @@ L_exit: popl %esi popl %ebx leave - ret + RET SYM_FUNC_END(round_Xsig) @@ -138,5 +138,5 @@ L_n_exit: popl %esi popl %ebx leave - ret + RET SYM_FUNC_END(norm_Xsig) diff --git a/arch/x86/math-emu/shr_Xsig.S b/arch/x86/math-emu/shr_Xsig.S index 726af985f758..f726bf6f6396 100644 --- a/arch/x86/math-emu/shr_Xsig.S +++ b/arch/x86/math-emu/shr_Xsig.S @@ -45,7 +45,7 @@ SYM_FUNC_START(shr_Xsig) popl %ebx popl %esi leave - ret + RET L_more_than_31: cmpl $64,%ecx @@ -61,7 +61,7 @@ L_more_than_31: movl $0,8(%esi) popl %esi leave - ret + RET L_more_than_63: cmpl $96,%ecx @@ -76,7 +76,7 @@ L_more_than_63: movl %edx,8(%esi) popl %esi leave - ret + RET L_more_than_95: xorl %eax,%eax @@ -85,5 +85,5 @@ L_more_than_95: movl %eax,8(%esi) popl %esi leave - ret + RET SYM_FUNC_END(shr_Xsig) diff --git a/arch/x86/math-emu/wm_shrx.S b/arch/x86/math-emu/wm_shrx.S index 4fc89174caf0..f608a28a4c43 100644 --- a/arch/x86/math-emu/wm_shrx.S +++ b/arch/x86/math-emu/wm_shrx.S @@ -55,7 +55,7 @@ SYM_FUNC_START(FPU_shrx) popl %ebx popl %esi leave - ret + RET L_more_than_31: cmpl $64,%ecx @@ -70,7 +70,7 @@ L_more_than_31: movl $0,4(%esi) popl %esi leave - ret + RET L_more_than_63: cmpl $96,%ecx @@ -84,7 +84,7 @@ L_more_than_63: movl %edx,4(%esi) popl %esi leave - ret + RET L_more_than_95: xorl %eax,%eax @@ -92,7 +92,7 @@ L_more_than_95: movl %eax,4(%esi) popl %esi leave - ret + RET SYM_FUNC_END(FPU_shrx) @@ -146,7 +146,7 @@ SYM_FUNC_START(FPU_shrxs) popl %ebx popl %esi leave - ret + RET /* Shift by [0..31] bits */ Ls_less_than_32: @@ -163,7 +163,7 @@ Ls_less_than_32: popl %ebx popl %esi leave - ret + RET /* Shift by [64..95] bits */ Ls_more_than_63: @@ -189,7 +189,7 @@ Ls_more_than_63: popl %ebx popl %esi leave - ret + RET Ls_more_than_95: /* Shift by [96..inf) bits */ @@ -203,5 +203,5 @@ Ls_more_than_95: popl %ebx popl %esi leave - ret + RET SYM_FUNC_END(FPU_shrxs) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 155c95dc1772..88a1e0d60170 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -78,10 +78,20 @@ static uint8_t __pte2cachemode_tbl[8] = { [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC, }; -/* Check that the write-protect PAT entry is set for write-protect */ +/* + * Check that the write-protect PAT entry is set for write-protect. + * To do this without making assumptions how PAT has been set up (Xen has + * another layout than the kernel), translate the _PAGE_CACHE_MODE_WP cache + * mode via the __cachemode2pte_tbl[] into protection bits (those protection + * bits will select a cache mode of WP or better), and then translate the + * protection bits back into the cache mode using __pte2cm_idx() and the + * __pte2cachemode_tbl[] array. This will return the really used cache mode. + */ bool x86_has_pat_wp(void) { - return __pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] == _PAGE_CACHE_MODE_WP; + uint16_t prot = __cachemode2pte_tbl[_PAGE_CACHE_MODE_WP]; + + return __pte2cachemode_tbl[__pte2cm_idx(prot)] == _PAGE_CACHE_MODE_WP; } enum page_cache_mode pgprot2cachemode(pgprot_t pgprot) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 067ca92e69ef..20951ab522a1 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -645,7 +645,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, pages++; spin_lock(&init_mm.page_table_lock); - prot = __pgprot(pgprot_val(prot) | __PAGE_KERNEL_LARGE); + prot = __pgprot(pgprot_val(prot) | _PAGE_PSE); set_pte_init((pte_t *)pud, pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT, diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 91e61dbba3e0..88cb537ccdea 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -216,9 +216,15 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size, * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; - phys_addr &= PHYSICAL_PAGE_MASK; + phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; + /* + * Mask out any bits not part of the actual physical + * address, like memory encryption bits. + */ + phys_addr &= PHYSICAL_PAGE_MASK; + retval = memtype_reserve(phys_addr, (u64)phys_addr + size, pcm, &new_pcm); if (retval) { diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S index 7a84fc8bc5c3..145b67299ab6 100644 --- a/arch/x86/mm/mem_encrypt_boot.S +++ b/arch/x86/mm/mem_encrypt_boot.S @@ -65,7 +65,10 @@ SYM_FUNC_START(sme_encrypt_execute) movq %rbp, %rsp /* Restore original stack pointer */ pop %rbp + /* Offset to __x86_return_thunk would be wrong here */ + ANNOTATE_UNRET_SAFE ret + int3 SYM_FUNC_END(sme_encrypt_execute) SYM_FUNC_START(__enc_copy) @@ -151,6 +154,9 @@ SYM_FUNC_START(__enc_copy) pop %r12 pop %r15 + /* Offset to __x86_return_thunk would be wrong here */ + ANNOTATE_UNRET_SAFE ret + int3 .L__enc_copy_end: SYM_FUNC_END(__enc_copy) diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index e94da744386f..9dc31996c7ed 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -861,7 +861,7 @@ void debug_cpumask_set_cpu(int cpu, int node, bool enable) return; } mask = node_to_cpumask_map[node]; - if (!mask) { + if (!cpumask_available(mask)) { pr_err("node_to_cpumask_map[%i] NULL\n", node); dump_stack(); return; @@ -907,7 +907,7 @@ const struct cpumask *cpumask_of_node(int node) dump_stack(); return cpu_none_mask; } - if (node_to_cpumask_map[node] == NULL) { + if (!cpumask_available(node_to_cpumask_map[node])) { printk(KERN_WARNING "cpumask_of_node(%d): no node_to_cpumask_map!\n", node); diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index 232932bda4e5..f9c53a710740 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -101,7 +101,7 @@ int pat_debug_enable; static int __init pat_debug_setup(char *str) { pat_debug_enable = 1; - return 0; + return 1; } __setup("debugpat", pat_debug_setup); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 610b43529148..547ec83794c0 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -15,7 +15,6 @@ #include #include #include -#include static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) { @@ -213,6 +212,14 @@ static void jit_fill_hole(void *area, unsigned int size) struct jit_context { int cleanup_addr; /* Epilogue code offset */ + + /* + * Program specific offsets of labels in the code; these rely on the + * JIT doing at least 2 passes, recording the position on the first + * pass, only to generate the correct offset on the second pass. + */ + int tail_call_direct_label; + int tail_call_indirect_label; }; /* Maximum number of bytes emitted while JITing one eBPF insn */ @@ -372,20 +379,40 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true); } -static int get_pop_bytes(bool *callee_regs_used) +#define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) + +static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) { - int bytes = 0; + u8 *prog = *pprog; + int cnt = 0; - if (callee_regs_used[3]) - bytes += 2; - if (callee_regs_used[2]) - bytes += 2; - if (callee_regs_used[1]) - bytes += 2; - if (callee_regs_used[0]) - bytes += 1; +#ifdef CONFIG_RETPOLINE + if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { + EMIT_LFENCE(); + EMIT2(0xFF, 0xE0 + reg); + } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { + emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); + } else +#endif + EMIT2(0xFF, 0xE0 + reg); - return bytes; + *pprog = prog; +} + +static void emit_return(u8 **pprog, u8 *ip) +{ + u8 *prog = *pprog; + int cnt = 0; + + if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { + emit_jump(&prog, &__x86_return_thunk, ip); + } else { + EMIT1(0xC3); /* ret */ + if (IS_ENABLED(CONFIG_SLS)) + EMIT1(0xCC); /* int3 */ + } + + *pprog = prog; } /* @@ -403,30 +430,12 @@ static int get_pop_bytes(bool *callee_regs_used) * out: */ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, - u32 stack_depth) + u32 stack_depth, u8 *ip, + struct jit_context *ctx) { int tcc_off = -4 - round_up(stack_depth, 8); - u8 *prog = *pprog; - int pop_bytes = 0; - int off1 = 42; - int off2 = 31; - int off3 = 9; - int cnt = 0; - - /* count the additional bytes used for popping callee regs from stack - * that need to be taken into account for each of the offsets that - * are used for bailing out of the tail call - */ - pop_bytes = get_pop_bytes(callee_regs_used); - off1 += pop_bytes; - off2 += pop_bytes; - off3 += pop_bytes; - - if (stack_depth) { - off1 += 7; - off2 += 7; - off3 += 7; - } + u8 *prog = *pprog, *start = *pprog; + int cnt = 0, offset; /* * rdi - pointer to ctx @@ -441,8 +450,9 @@ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, EMIT2(0x89, 0xD2); /* mov edx, edx */ EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ offsetof(struct bpf_array, map.max_entries)); -#define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */ - EMIT2(X86_JBE, OFFSET1); /* jbe out */ + + offset = ctx->tail_call_indirect_label - (prog + 2 - start); + EMIT2(X86_JBE, offset); /* jbe out */ /* * if (tail_call_cnt > MAX_TAIL_CALL_CNT) @@ -450,8 +460,9 @@ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, */ EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ -#define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE) - EMIT2(X86_JA, OFFSET2); /* ja out */ + + offset = ctx->tail_call_indirect_label - (prog + 2 - start); + EMIT2(X86_JA, offset); /* ja out */ EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ @@ -464,12 +475,11 @@ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, * goto out; */ EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ -#define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE) - EMIT2(X86_JE, OFFSET3); /* je out */ - *pprog = prog; - pop_callee_regs(pprog, callee_regs_used); - prog = *pprog; + offset = ctx->tail_call_indirect_label - (prog + 2 - start); + EMIT2(X86_JE, offset); /* je out */ + + pop_callee_regs(&prog, callee_regs_used); EMIT1(0x58); /* pop rax */ if (stack_depth) @@ -486,42 +496,21 @@ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, * rdi == ctx (1st arg) * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET */ - RETPOLINE_RCX_BPF_JIT(); + emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); /* out: */ + ctx->tail_call_indirect_label = prog - start; *pprog = prog; } static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, - u8 **pprog, int addr, u8 *image, - bool *callee_regs_used, u32 stack_depth) + u8 **pprog, u8 *ip, + bool *callee_regs_used, u32 stack_depth, + struct jit_context *ctx) { int tcc_off = -4 - round_up(stack_depth, 8); - u8 *prog = *pprog; - int pop_bytes = 0; - int off1 = 20; - int poke_off; - int cnt = 0; - - /* count the additional bytes used for popping callee regs to stack - * that need to be taken into account for jump offset that is used for - * bailing out from of the tail call when limit is reached - */ - pop_bytes = get_pop_bytes(callee_regs_used); - off1 += pop_bytes; - - /* - * total bytes for: - * - nop5/ jmpq $off - * - pop callee regs - * - sub rsp, $val if depth > 0 - * - pop rax - */ - poke_off = X86_PATCH_SIZE + pop_bytes + 1; - if (stack_depth) { - poke_off += 7; - off1 += 7; - } + u8 *prog = *pprog, *start = *pprog; + int cnt = 0, offset; /* * if (tail_call_cnt > MAX_TAIL_CALL_CNT) @@ -529,28 +518,30 @@ static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, */ EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ - EMIT2(X86_JA, off1); /* ja out */ + + offset = ctx->tail_call_direct_label - (prog + 2 - start); + EMIT2(X86_JA, offset); /* ja out */ EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ - poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE); + poke->tailcall_bypass = ip + (prog - start); poke->adj_off = X86_TAIL_CALL_OFFSET; - poke->tailcall_target = image + (addr - X86_PATCH_SIZE); + poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, poke->tailcall_bypass); - *pprog = prog; - pop_callee_regs(pprog, callee_regs_used); - prog = *pprog; + pop_callee_regs(&prog, callee_regs_used); EMIT1(0x58); /* pop rax */ if (stack_depth) EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE); prog += X86_PATCH_SIZE; + /* out: */ + ctx->tail_call_direct_label = prog - start; *pprog = prog; } @@ -1141,8 +1132,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, /* speculation barrier */ case BPF_ST | BPF_NOSPEC: if (boot_cpu_has(X86_FEATURE_XMM2)) - /* Emit 'lfence' */ - EMIT3(0x0F, 0xAE, 0xE8); + EMIT_LFENCE(); break; /* ST: *(u8*)(dst_reg + off) = imm */ @@ -1258,8 +1248,9 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_CALL: func = (u8 *) __bpf_call_base + imm32; if (tail_call_reachable) { + /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ EMIT3_off32(0x48, 0x8B, 0x85, - -(bpf_prog->aux->stack_depth + 8)); + -round_up(bpf_prog->aux->stack_depth, 8) - 8); if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) return -EINVAL; } else { @@ -1271,13 +1262,16 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_TAIL_CALL: if (imm32) emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], - &prog, addrs[i], image, + &prog, image + addrs[i - 1], callee_regs_used, - bpf_prog->aux->stack_depth); + bpf_prog->aux->stack_depth, + ctx); else emit_bpf_tail_call_indirect(&prog, callee_regs_used, - bpf_prog->aux->stack_depth); + bpf_prog->aux->stack_depth, + image + addrs[i - 1], + ctx); break; /* cond jump */ @@ -1462,7 +1456,7 @@ emit_jmp: ctx->cleanup_addr = proglen; pop_callee_regs(&prog, callee_regs_used); EMIT1(0xC9); /* leave */ - EMIT1(0xC3); /* ret */ + emit_return(&prog, image + addrs[i - 1] + (prog - temp)); break; default: @@ -1903,7 +1897,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i if (flags & BPF_TRAMP_F_SKIP_FRAME) /* skip our return address and return to parent */ EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ - EMIT1(0xC3); /* ret */ + emit_return(&prog, prog); /* Make sure the trampoline generation logic doesn't overflow */ if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { ret = -EFAULT; @@ -1916,26 +1910,6 @@ cleanup: return ret; } -static int emit_fallback_jump(u8 **pprog) -{ - u8 *prog = *pprog; - int err = 0; - -#ifdef CONFIG_RETPOLINE - /* Note that this assumes the the compiler uses external - * thunks for indirect calls. Both clang and GCC use the same - * naming convention for external thunks. - */ - err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog); -#else - int cnt = 0; - - EMIT2(0xFF, 0xE2); /* jmp rdx */ -#endif - *pprog = prog; - return err; -} - static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) { u8 *jg_reloc, *prog = *pprog; @@ -1957,9 +1931,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) if (err) return err; - err = emit_fallback_jump(&prog); /* jmp thunk/indirect */ - if (err) - return err; + emit_indirect_jump(&prog, 2 /* rdx */, prog); *pprog = prog; return 0; diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 4bd0f98df700..622af951220c 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -15,6 +15,7 @@ #include #include #include +#include #include /* @@ -1267,6 +1268,21 @@ static void emit_epilogue(u8 **pprog, u32 stack_depth) *pprog = prog; } +static int emit_jmp_edx(u8 **pprog, u8 *ip) +{ + u8 *prog = *pprog; + int cnt = 0; + +#ifdef CONFIG_RETPOLINE + EMIT1_off32(0xE9, (u8 *)__x86_indirect_thunk_edx - (ip + 5)); +#else + EMIT2(0xFF, 0xE2); +#endif + *pprog = prog; + + return cnt; +} + /* * Generate the following code: * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... @@ -1280,7 +1296,7 @@ static void emit_epilogue(u8 **pprog, u32 stack_depth) * goto *(prog->bpf_func + prologue_size); * out: */ -static void emit_bpf_tail_call(u8 **pprog) +static void emit_bpf_tail_call(u8 **pprog, u8 *ip) { u8 *prog = *pprog; int cnt = 0; @@ -1362,7 +1378,7 @@ static void emit_bpf_tail_call(u8 **pprog) * eax == ctx (1st arg) * edx == prog->bpf_func + prologue_size */ - RETPOLINE_EDX_BPF_JIT(); + cnt += emit_jmp_edx(&prog, ip + cnt); if (jmp_label1 == -1) jmp_label1 = cnt; @@ -1929,7 +1945,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, break; } case BPF_JMP | BPF_TAIL_CALL: - emit_bpf_tail_call(&prog); + emit_bpf_tail_call(&prog, image + addrs[i - 1]); break; /* cond jump */ diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index c552cd2d0632..326d6d173733 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -476,7 +476,6 @@ static __init void xen_setup_pci_msi(void) xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs; } xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs; - pci_msi_ignore_mask = 1; } else if (xen_hvm_domain()) { xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs; xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs; @@ -490,6 +489,11 @@ static __init void xen_setup_pci_msi(void) * in allocating the native domain and never use it. */ x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain; + /* + * With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely + * controlled by the hypervisor. + */ + pci_msi_ignore_mask = 1; } #else /* CONFIG_PCI_MSI */ diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S index 09ec84f6ef51..f3cfdb1c9a35 100644 --- a/arch/x86/platform/efi/efi_stub_32.S +++ b/arch/x86/platform/efi/efi_stub_32.S @@ -56,5 +56,5 @@ SYM_FUNC_START(efi_call_svam) movl 16(%esp), %ebx leave - ret + RET SYM_FUNC_END(efi_call_svam) diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index 90380a17ab23..2206b8bc47b8 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S @@ -23,5 +23,5 @@ SYM_FUNC_START(__efi_call) mov %rsi, %rcx CALL_NOSPEC rdi leave - ret + RET SYM_FUNC_END(__efi_call) diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S index 26f0da238c1c..9eac088ef4de 100644 --- a/arch/x86/platform/efi/efi_thunk_64.S +++ b/arch/x86/platform/efi/efi_thunk_64.S @@ -22,6 +22,7 @@ #include #include #include +#include .text .code64 @@ -63,7 +64,9 @@ SYM_CODE_START(__efi64_thunk) 1: movq 24(%rsp), %rsp pop %rbx pop %rbp - retq + ANNOTATE_UNRET_SAFE + ret + int3 .code32 2: pushl $__KERNEL_CS diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c index f03a6883dcc6..89f25af4b3c3 100644 --- a/arch/x86/platform/olpc/olpc-xo1-sci.c +++ b/arch/x86/platform/olpc/olpc-xo1-sci.c @@ -80,7 +80,7 @@ static void send_ebook_state(void) return; } - if (!!test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == state) + if (test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == !!state) return; /* Nothing new to report. */ input_report_switch(ebook_switch_idev, SW_TABLET_MODE, state); diff --git a/arch/x86/platform/olpc/xo1-wakeup.S b/arch/x86/platform/olpc/xo1-wakeup.S index 75f4faff8468..3a5abffe5660 100644 --- a/arch/x86/platform/olpc/xo1-wakeup.S +++ b/arch/x86/platform/olpc/xo1-wakeup.S @@ -77,7 +77,7 @@ save_registers: pushfl popl saved_context_eflags - ret + RET restore_registers: movl saved_context_ebp, %ebp @@ -88,7 +88,7 @@ restore_registers: pushl saved_context_eflags popfl - ret + RET SYM_CODE_START(do_olpc_suspend_lowlevel) call save_processor_state @@ -109,7 +109,7 @@ ret_point: call restore_registers call restore_processor_state - ret + RET SYM_CODE_END(do_olpc_suspend_lowlevel) .data diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 7e4bafaedd7f..92728a97993b 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -25,6 +25,7 @@ #include #include #include +#include #ifdef CONFIG_X86_32 __visible unsigned long saved_context_ebx; @@ -40,7 +41,8 @@ static void msr_save_context(struct saved_context *ctxt) struct saved_msr *end = msr + ctxt->saved_msrs.num; while (msr < end) { - msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q); + if (msr->valid) + rdmsrl(msr->info.msr_no, msr->info.reg.q); msr++; } } @@ -264,11 +266,18 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) x86_platform.restore_sched_clock_state(); mtrr_bp_restore(); perf_restore_debug_store(); - msr_restore_context(ctxt); c = &cpu_data(smp_processor_id()); if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL)) init_ia32_feat_ctl(c); + + microcode_bsp_resume(); + + /* + * This needs to happen after the microcode has been updated upon resume + * because some of the MSRs are "emulated" in microcode. + */ + msr_restore_context(ctxt); } /* Needed by apm.c */ @@ -440,8 +449,10 @@ static int msr_build_context(const u32 *msr_id, const int num) } for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) { + u64 dummy; + msr_array[i].info.msr_no = msr_id[j]; - msr_array[i].valid = false; + msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy); msr_array[i].info.reg.q = 0; } saved_msrs->num = total_num; @@ -516,10 +527,32 @@ static int pm_cpu_check(const struct x86_cpu_id *c) return ret; } +static void pm_save_spec_msr(void) +{ + struct msr_enumeration { + u32 msr_no; + u32 feature; + } msr_enum[] = { + { MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL }, + { MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL }, + { MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT }, + { MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL }, + { MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD }, + { MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC }, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(msr_enum); i++) { + if (boot_cpu_has(msr_enum[i].feature)) + msr_build_context(&msr_enum[i].msr_no, 1); + } +} + static int pm_check_save_msr(void) { dmi_check_system(msr_save_dmi_table); pm_cpu_check(msr_save_cpu_table); + pm_save_spec_msr(); return 0; } diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index 8786653ad3c0..5606a15cf9a1 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S @@ -32,7 +32,7 @@ SYM_FUNC_START(swsusp_arch_suspend) FRAME_BEGIN call swsusp_save FRAME_END - ret + RET SYM_FUNC_END(swsusp_arch_suspend) SYM_CODE_START(restore_image) @@ -108,5 +108,5 @@ SYM_FUNC_START(restore_registers) /* tell the hibernation core that we've just restored the memory */ movl %eax, in_suspend - ret + RET SYM_FUNC_END(restore_registers) diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S index 7918b8415f13..3ae7a3d7d61e 100644 --- a/arch/x86/power/hibernate_asm_64.S +++ b/arch/x86/power/hibernate_asm_64.S @@ -49,7 +49,7 @@ SYM_FUNC_START(swsusp_arch_suspend) FRAME_BEGIN call swsusp_save FRAME_END - ret + RET SYM_FUNC_END(swsusp_arch_suspend) SYM_CODE_START(restore_image) @@ -143,5 +143,5 @@ SYM_FUNC_START(restore_registers) /* tell the hibernation core that we've just restored the memory */ movq %rax, in_suspend(%rip) - ret + RET SYM_FUNC_END(restore_registers) diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile index 77f70b969d14..3113800da63a 100644 --- a/arch/x86/um/Makefile +++ b/arch/x86/um/Makefile @@ -27,7 +27,8 @@ else obj-y += syscalls_64.o vdso/ -subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o +subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o +subarch-$(CONFIG_PREEMPTION) += ../entry/thunk_64.o endif diff --git a/arch/x86/um/checksum_32.S b/arch/x86/um/checksum_32.S index 13f118dec74f..aed782ab7721 100644 --- a/arch/x86/um/checksum_32.S +++ b/arch/x86/um/checksum_32.S @@ -110,7 +110,7 @@ csum_partial: 7: popl %ebx popl %esi - ret + RET #else @@ -208,7 +208,7 @@ csum_partial: 80: popl %ebx popl %esi - ret + RET #endif EXPORT_SYMBOL(csum_partial) diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c index 3ee234b6234d..255a44dd415a 100644 --- a/arch/x86/um/ldt.c +++ b/arch/x86/um/ldt.c @@ -23,9 +23,11 @@ static long write_ldt_entry(struct mm_id *mm_idp, int func, { long res; void *stub_addr; + + BUILD_BUG_ON(sizeof(*desc) % sizeof(long)); + res = syscall_stub_data(mm_idp, (unsigned long *)desc, - (sizeof(*desc) + sizeof(long) - 1) & - ~(sizeof(long) - 1), + sizeof(*desc) / sizeof(long), addr, &stub_addr); if (!res) { unsigned long args[] = { func, diff --git a/arch/x86/um/setjmp_32.S b/arch/x86/um/setjmp_32.S index 62eaf8c80e04..2d991ddbcca5 100644 --- a/arch/x86/um/setjmp_32.S +++ b/arch/x86/um/setjmp_32.S @@ -34,7 +34,7 @@ kernel_setjmp: movl %esi,12(%edx) movl %edi,16(%edx) movl %ecx,20(%edx) # Return address - ret + RET .size kernel_setjmp,.-kernel_setjmp diff --git a/arch/x86/um/setjmp_64.S b/arch/x86/um/setjmp_64.S index 1b5d40d4ff46..b46acb6a8ebd 100644 --- a/arch/x86/um/setjmp_64.S +++ b/arch/x86/um/setjmp_64.S @@ -33,7 +33,7 @@ kernel_setjmp: movq %r14,40(%rdi) movq %r15,48(%rdi) movq %rsi,56(%rdi) # Return address - ret + RET .size kernel_setjmp,.-kernel_setjmp diff --git a/arch/x86/um/shared/sysdep/syscalls_32.h b/arch/x86/um/shared/sysdep/syscalls_32.h index 68fd2cf526fd..f6e9f84397e7 100644 --- a/arch/x86/um/shared/sysdep/syscalls_32.h +++ b/arch/x86/um/shared/sysdep/syscalls_32.h @@ -6,10 +6,9 @@ #include #include -typedef long syscall_handler_t(struct pt_regs); +typedef long syscall_handler_t(struct syscall_args); extern syscall_handler_t *sys_call_table[]; #define EXECUTE_SYSCALL(syscall, regs) \ - ((long (*)(struct syscall_args)) \ - (*sys_call_table[syscall]))(SYSCALL_ARGS(®s->regs)) + ((*sys_call_table[syscall]))(SYSCALL_ARGS(®s->regs)) diff --git a/arch/x86/um/shared/sysdep/syscalls_64.h b/arch/x86/um/shared/sysdep/syscalls_64.h index 8a7d5e1da98e..1e6875b4ffd8 100644 --- a/arch/x86/um/shared/sysdep/syscalls_64.h +++ b/arch/x86/um/shared/sysdep/syscalls_64.h @@ -10,13 +10,12 @@ #include #include -typedef long syscall_handler_t(void); +typedef long syscall_handler_t(long, long, long, long, long, long); extern syscall_handler_t *sys_call_table[]; #define EXECUTE_SYSCALL(syscall, regs) \ - (((long (*)(long, long, long, long, long, long)) \ - (*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(®s->regs), \ + (((*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(®s->regs), \ UPT_SYSCALL_ARG2(®s->regs), \ UPT_SYSCALL_ARG3(®s->regs), \ UPT_SYSCALL_ARG4(®s->regs), \ diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c index ac8eee093f9c..66162eafd8e8 100644 --- a/arch/x86/um/tls_32.c +++ b/arch/x86/um/tls_32.c @@ -65,9 +65,6 @@ static int get_free_idx(struct task_struct* task) struct thread_struct *t = &task->thread; int idx; - if (!t->arch.tls_array) - return GDT_ENTRY_TLS_MIN; - for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) if (!t->arch.tls_array[idx].present) return idx + GDT_ENTRY_TLS_MIN; @@ -240,9 +237,6 @@ static int get_tls_entry(struct task_struct *task, struct user_desc *info, { struct thread_struct *t = &task->thread; - if (!t->arch.tls_array) - goto clear; - if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile index 5943387e3f35..5ca366e15c76 100644 --- a/arch/x86/um/vdso/Makefile +++ b/arch/x86/um/vdso/Makefile @@ -62,7 +62,7 @@ quiet_cmd_vdso = VDSO $@ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' -VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv +VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv -z noexecstack GCOV_PROFILE := n # diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index fc5c5ba4aacb..40b5779fce21 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -OBJECT_FILES_NON_STANDARD_xen-asm.o := y ifdef CONFIG_FUNCTION_TRACER # Do not profile debug and lowlevel utilities diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 804c65d2b95f..815030b7f6fa 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -768,6 +768,7 @@ static void xen_load_idt(const struct desc_ptr *desc) { static DEFINE_SPINLOCK(lock); static struct trap_info traps[257]; + static const struct trap_info zero = { }; unsigned out; trace_xen_cpu_load_idt(desc); @@ -777,7 +778,7 @@ static void xen_load_idt(const struct desc_ptr *desc) memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc)); out = xen_convert_trap_info(desc, traps, false); - memset(&traps[out], 0, sizeof(traps[0])); + traps[out] = zero; xen_mc_flush(); if (HYPERVISOR_set_trap_table(traps)) diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 8bfc10330107..1f80dd3a2dd4 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -922,7 +922,7 @@ void xen_enable_sysenter(void) if (!boot_cpu_has(sysenter_feature)) return; - ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target); + ret = register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat); if(ret != 0) setup_clear_cpu_cap(sysenter_feature); } @@ -931,7 +931,7 @@ void xen_enable_syscall(void) { int ret; - ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target); + ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64); if (ret != 0) { printk(KERN_ERR "Failed to set syscall callback: %d\n", ret); /* Pretty fatal; 64-bit userspace has no other @@ -940,7 +940,7 @@ void xen_enable_syscall(void) if (boot_cpu_has(X86_FEATURE_SYSCALL32)) { ret = register_callback(CALLBACKTYPE_syscall32, - xen_syscall32_target); + xen_entry_SYSCALL_compat); if (ret != 0) setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); } diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c index 6ff3c887e0b9..b70afdff419c 100644 --- a/arch/x86/xen/smp_hvm.c +++ b/arch/x86/xen/smp_hvm.c @@ -19,6 +19,12 @@ static void __init xen_hvm_smp_prepare_boot_cpu(void) */ xen_vcpu_setup(0); + /* + * Called again in case the kernel boots on vcpu >= MAX_VIRT_CPUS. + * Refer to comments in xen_hvm_init_time_ops(). + */ + xen_hvm_init_time_ops(); + /* * The alternative logic (which patches the unlock/lock) runs before * the smp bootup up code is activated. Hence we need to set this up diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 91f5b330dcc6..8183d17e1cf1 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -556,6 +556,11 @@ static void xen_hvm_setup_cpu_clockevents(void) void __init xen_hvm_init_time_ops(void) { + static bool hvm_time_initialized; + + if (hvm_time_initialized) + return; + /* * vector callback is needed otherwise we cannot receive interrupts * on cpu > 0 and at this point we don't know how many cpus are @@ -565,7 +570,22 @@ void __init xen_hvm_init_time_ops(void) return; if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { - pr_info("Xen doesn't support pvclock on HVM, disable pv timer"); + pr_info_once("Xen doesn't support pvclock on HVM, disable pv timer"); + return; + } + + /* + * Only MAX_VIRT_CPUS 'vcpu_info' are embedded inside 'shared_info'. + * The __this_cpu_read(xen_vcpu) is still NULL when Xen HVM guest + * boots on vcpu >= MAX_VIRT_CPUS (e.g., kexec), To access + * __this_cpu_read(xen_vcpu) via xen_clocksource_read() will panic. + * + * The xen_hvm_init_time_ops() should be called again later after + * __this_cpu_read(xen_vcpu) is available. + */ + if (!__this_cpu_read(xen_vcpu)) { + pr_info("Delay xen_init_time_common() as kernel is running on vcpu=%d\n", + xen_vcpu_nr(0)); return; } @@ -577,6 +597,8 @@ void __init xen_hvm_init_time_ops(void) x86_platform.calibrate_tsc = xen_tsc_khz; x86_platform.get_wallclock = xen_get_wallclock; x86_platform.set_wallclock = xen_set_wallclock; + + hvm_time_initialized = true; } #endif diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 011ec649f388..e3031afcb103 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -14,6 +14,7 @@ #include #include #include +#include #include @@ -44,7 +45,7 @@ SYM_FUNC_START(xen_irq_enable_direct) call check_events 1: FRAME_END - ret + RET SYM_FUNC_END(xen_irq_enable_direct) @@ -54,7 +55,7 @@ SYM_FUNC_END(xen_irq_enable_direct) */ SYM_FUNC_START(xen_irq_disable_direct) movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask - ret + RET SYM_FUNC_END(xen_irq_disable_direct) /* @@ -70,7 +71,7 @@ SYM_FUNC_START(xen_save_fl_direct) testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask setz %ah addb %ah, %ah - ret + RET SYM_FUNC_END(xen_save_fl_direct) @@ -97,7 +98,7 @@ SYM_FUNC_START(xen_restore_fl_direct) call check_events 1: FRAME_END - ret + RET SYM_FUNC_END(xen_restore_fl_direct) @@ -127,7 +128,7 @@ SYM_FUNC_START(check_events) pop %rcx pop %rax FRAME_END - ret + RET SYM_FUNC_END(check_events) SYM_FUNC_START(xen_read_cr2) @@ -135,18 +136,19 @@ SYM_FUNC_START(xen_read_cr2) _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX FRAME_END - ret + RET SYM_FUNC_END(xen_read_cr2); SYM_FUNC_START(xen_read_cr2_direct) FRAME_BEGIN _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX FRAME_END - ret + RET SYM_FUNC_END(xen_read_cr2_direct); .macro xen_pv_trap name SYM_CODE_START(xen_\name) + UNWIND_HINT_ENTRY pop %rcx pop %r11 jmp \name @@ -186,6 +188,7 @@ xen_pv_trap asm_exc_xen_hypervisor_callback SYM_CODE_START(xen_early_idt_handler_array) i = 0 .rept NUM_EXCEPTION_VECTORS + UNWIND_HINT_EMPTY pop %rcx pop %r11 jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE @@ -212,11 +215,13 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 * rsp->rax } */ SYM_CODE_START(xen_iret) + UNWIND_HINT_EMPTY pushq $0 jmp hypercall_iret SYM_CODE_END(xen_iret) SYM_CODE_START(xen_sysret64) + UNWIND_HINT_EMPTY /* * We're already on the usermode stack at this point, but * still with the kernel gs, so we can easily switch back. @@ -271,7 +276,8 @@ SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode) */ /* Normal 64-bit system call target */ -SYM_FUNC_START(xen_syscall_target) +SYM_CODE_START(xen_entry_SYSCALL_64) + UNWIND_HINT_ENTRY popq %rcx popq %r11 @@ -284,12 +290,13 @@ SYM_FUNC_START(xen_syscall_target) movq $__USER_CS, 1*8(%rsp) jmp entry_SYSCALL_64_after_hwframe -SYM_FUNC_END(xen_syscall_target) +SYM_CODE_END(xen_entry_SYSCALL_64) #ifdef CONFIG_IA32_EMULATION /* 32-bit compat syscall target */ -SYM_FUNC_START(xen_syscall32_target) +SYM_CODE_START(xen_entry_SYSCALL_compat) + UNWIND_HINT_ENTRY popq %rcx popq %r11 @@ -302,10 +309,11 @@ SYM_FUNC_START(xen_syscall32_target) movq $__USER32_CS, 1*8(%rsp) jmp entry_SYSCALL_compat_after_hwframe -SYM_FUNC_END(xen_syscall32_target) +SYM_CODE_END(xen_entry_SYSCALL_compat) /* 32-bit compat sysenter target */ -SYM_FUNC_START(xen_sysenter_target) +SYM_CODE_START(xen_entry_SYSENTER_compat) + UNWIND_HINT_ENTRY /* * NB: Xen is polite and clears TF from EFLAGS for us. This means * that we don't need to guard against single step exceptions here. @@ -322,17 +330,18 @@ SYM_FUNC_START(xen_sysenter_target) movq $__USER32_CS, 1*8(%rsp) jmp entry_SYSENTER_compat_after_hwframe -SYM_FUNC_END(xen_sysenter_target) +SYM_CODE_END(xen_entry_SYSENTER_compat) #else /* !CONFIG_IA32_EMULATION */ -SYM_FUNC_START_ALIAS(xen_syscall32_target) -SYM_FUNC_START(xen_sysenter_target) +SYM_CODE_START(xen_entry_SYSCALL_compat) +SYM_CODE_START(xen_entry_SYSENTER_compat) + UNWIND_HINT_ENTRY lea 16(%rsp), %rsp /* strip %rcx, %r11 */ mov $-ENOSYS, %rax pushq $0 jmp hypercall_iret -SYM_FUNC_END(xen_sysenter_target) -SYM_FUNC_END_ALIAS(xen_syscall32_target) +SYM_CODE_END(xen_entry_SYSENTER_compat) +SYM_CODE_END(xen_entry_SYSCALL_compat) #endif /* CONFIG_IA32_EMULATION */ diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index 2d7c8f34f56c..2a3ef5fcba34 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -68,8 +68,10 @@ SYM_CODE_END(asm_cpu_bringup_and_idle) .balign PAGE_SIZE SYM_CODE_START(hypercall_page) .rept (PAGE_SIZE / 32) - UNWIND_HINT_EMPTY - .skip 32 + UNWIND_HINT_FUNC + ANNOTATE_UNRET_SAFE + ret + .skip 31, 0xcc .endr #define HYPERCALL(n) \ diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 9546c3384c75..8695809b88f0 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -10,10 +10,10 @@ /* These are code, but not functions. Defined in entry.S */ extern const char xen_failsafe_callback[]; -void xen_sysenter_target(void); +void xen_entry_SYSENTER_compat(void); #ifdef CONFIG_X86_64 -void xen_syscall_target(void); -void xen_syscall32_target(void); +void xen_entry_SYSCALL_64(void); +void xen_entry_SYSCALL_compat(void); #endif extern void *xen_initial_gdt; diff --git a/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi index 9bf8bad1dd18..c33932568aa7 100644 --- a/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi +++ b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi @@ -8,19 +8,19 @@ reg = <0x00000000 0x08000000>; bank-width = <2>; device-width = <2>; - partition@0x0 { + partition@0 { label = "data"; reg = <0x00000000 0x06000000>; }; - partition@0x6000000 { + partition@6000000 { label = "boot loader area"; reg = <0x06000000 0x00800000>; }; - partition@0x6800000 { + partition@6800000 { label = "kernel image"; reg = <0x06800000 0x017e0000>; }; - partition@0x7fe0000 { + partition@7fe0000 { label = "boot environment"; reg = <0x07fe0000 0x00020000>; }; diff --git a/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi index 40c2f81f7cb6..7bde2ab2d6fb 100644 --- a/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi +++ b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi @@ -8,19 +8,19 @@ reg = <0x08000000 0x01000000>; bank-width = <2>; device-width = <2>; - partition@0x0 { + partition@0 { label = "boot loader area"; reg = <0x00000000 0x00400000>; }; - partition@0x400000 { + partition@400000 { label = "kernel image"; reg = <0x00400000 0x00600000>; }; - partition@0xa00000 { + partition@a00000 { label = "data"; reg = <0x00a00000 0x005e0000>; }; - partition@0xfe0000 { + partition@fe0000 { label = "boot environment"; reg = <0x00fe0000 0x00020000>; }; diff --git a/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi index fb8d3a9f33c2..0655b868749a 100644 --- a/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi +++ b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi @@ -8,11 +8,11 @@ reg = <0x08000000 0x00400000>; bank-width = <2>; device-width = <2>; - partition@0x0 { + partition@0 { label = "boot loader area"; reg = <0x00000000 0x003f0000>; }; - partition@0x3f0000 { + partition@3f0000 { label = "boot environment"; reg = <0x003f0000 0x00010000>; }; diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index 6acbbe0d87d3..a312333a9add 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -111,18 +111,21 @@ static inline struct thread_info *current_thread_info(void) #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ #define TIF_SYSCALL_TRACEPOINT 4 /* syscall tracepoint instrumentation */ -#define TIF_MEMDIE 5 /* is terminating due to OOM killer */ +#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */ #define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */ #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ #define TIF_DB_DISABLED 8 /* debug trap disabled for syscall */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ #define TIF_SECCOMP 10 /* secure computing */ +#define TIF_MEMDIE 11 /* is terminating due to OOM killer */ #define _TIF_SYSCALL_TRACE (1< + #endif /* _XTENSA_TIMEX_H */ diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S index 45cc0ae0af6f..c7b9f12896f2 100644 --- a/arch/xtensa/kernel/coprocessor.S +++ b/arch/xtensa/kernel/coprocessor.S @@ -29,7 +29,7 @@ .if XTENSA_HAVE_COPROCESSOR(x); \ .align 4; \ .Lsave_cp_regs_cp##x: \ - xchal_cp##x##_store a2 a4 a5 a6 a7; \ + xchal_cp##x##_store a2 a3 a4 a5 a6; \ jx a0; \ .endif @@ -46,7 +46,7 @@ .if XTENSA_HAVE_COPROCESSOR(x); \ .align 4; \ .Lload_cp_regs_cp##x: \ - xchal_cp##x##_load a2 a4 a5 a6 a7; \ + xchal_cp##x##_load a2 a3 a4 a5 a6; \ jx a0; \ .endif diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 703cf6205efe..647b162f959b 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -500,8 +500,8 @@ common_exception_return: */ _bbsi.l a4, TIF_NEED_RESCHED, 3f - _bbsi.l a4, TIF_NOTIFY_RESUME, 2f - _bbci.l a4, TIF_SIGPENDING, 5f + movi a2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL + bnone a4, a2, 5f 2: l32i a4, a1, PT_DEPC bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c index 0dde21e0d3de..ad1841cecdfb 100644 --- a/arch/xtensa/kernel/jump_label.c +++ b/arch/xtensa/kernel/jump_label.c @@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data) { struct patch *patch = data; - if (atomic_inc_return(&patch->cpu_count) == 1) { + if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) { local_patch_text(patch->addr, patch->data, patch->sz); atomic_inc(&patch->cpu_count); } else { diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 397a7de56377..9534ef515d74 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -217,7 +217,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, p->thread.sp = (unsigned long)childregs; - if (!(p->flags & PF_KTHREAD)) { + if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { struct pt_regs *regs = current_pt_regs(); unsigned long usp = usp_thread_fn ? usp_thread_fn : regs->areg[1]; diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index bb3f4797d212..db6cdea471d8 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -226,12 +226,12 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) void user_enable_single_step(struct task_struct *child) { - child->ptrace |= PT_SINGLESTEP; + set_tsk_thread_flag(child, TIF_SINGLESTEP); } void user_disable_single_step(struct task_struct *child) { - child->ptrace &= ~PT_SINGLESTEP; + clear_tsk_thread_flag(child, TIF_SINGLESTEP); } /* diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index 1fb1047f905c..f2b00f43cf23 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -465,7 +465,7 @@ static void do_signal(struct pt_regs *regs) /* Set up the stack frame */ ret = setup_frame(&ksig, sigmask_to_save(), regs); signal_setup_done(ret, &ksig, 0); - if (current->ptrace & PT_SINGLESTEP) + if (test_thread_flag(TIF_SINGLESTEP)) task_pt_regs(current)->icountlevel = 1; return; @@ -491,14 +491,15 @@ static void do_signal(struct pt_regs *regs) /* If there's no signal to deliver, we just restore the saved mask. */ restore_saved_sigmask(); - if (current->ptrace & PT_SINGLESTEP) + if (test_thread_flag(TIF_SINGLESTEP)) task_pt_regs(current)->icountlevel = 1; return; } void do_notify_resume(struct pt_regs *regs) { - if (test_thread_flag(TIF_SIGPENDING)) + if (test_thread_flag(TIF_SIGPENDING) || + test_thread_flag(TIF_NOTIFY_SIGNAL)) do_signal(regs); if (test_thread_flag(TIF_NOTIFY_RESUME)) diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 77971fe4cc95..8e81ba63ed38 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -154,6 +154,7 @@ static void __init calibrate_ccount(void) cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu"); if (cpu) { clk = of_clk_get(cpu, 0); + of_node_put(cpu); if (!IS_ERR(clk)) { ccount_freq = clk_get_rate(clk); return; diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index 4986226a5ab2..08d70c868c13 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c @@ -502,16 +502,24 @@ static const struct net_device_ops iss_netdev_ops = { .ndo_set_rx_mode = iss_net_set_multicast_list, }; -static int iss_net_configure(int index, char *init) +static void iss_net_pdev_release(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct iss_net_private *lp = + container_of(pdev, struct iss_net_private, pdev); + + free_netdev(lp->dev); +} + +static void iss_net_configure(int index, char *init) { struct net_device *dev; struct iss_net_private *lp; - int err; dev = alloc_etherdev(sizeof(*lp)); if (dev == NULL) { pr_err("eth_configure: failed to allocate device\n"); - return 1; + return; } /* Initialize private element. */ @@ -540,7 +548,7 @@ static int iss_net_configure(int index, char *init) if (!tuntap_probe(lp, index, init)) { pr_err("%s: invalid arguments. Skipping device!\n", dev->name); - goto errout; + goto err_free_netdev; } pr_info("Netdevice %d (%pM)\n", index, dev->dev_addr); @@ -548,7 +556,8 @@ static int iss_net_configure(int index, char *init) /* sysfs register */ if (!driver_registered) { - platform_driver_register(&iss_net_driver); + if (platform_driver_register(&iss_net_driver)) + goto err_free_netdev; driver_registered = 1; } @@ -558,7 +567,9 @@ static int iss_net_configure(int index, char *init) lp->pdev.id = index; lp->pdev.name = DRIVER_NAME; - platform_device_register(&lp->pdev); + lp->pdev.dev.release = iss_net_pdev_release; + if (platform_device_register(&lp->pdev)) + goto err_free_netdev; SET_NETDEV_DEV(dev, &lp->pdev.dev); dev->netdev_ops = &iss_netdev_ops; @@ -567,23 +578,20 @@ static int iss_net_configure(int index, char *init) dev->irq = -1; rtnl_lock(); - err = register_netdevice(dev); - rtnl_unlock(); - - if (err) { + if (register_netdevice(dev)) { + rtnl_unlock(); pr_err("%s: error registering net device!\n", dev->name); - /* XXX: should we call ->remove() here? */ - free_netdev(dev); - return 1; + platform_device_unregister(&lp->pdev); + return; } + rtnl_unlock(); timer_setup(&lp->tl, iss_net_user_timer_expire, 0); - return 0; + return; -errout: - /* FIXME: unregister; free, etc.. */ - return -EIO; +err_free_netdev: + free_netdev(dev); } /* ------------------------------------------------------------------------- */ diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c index 3447556d276d..2b3c829f655b 100644 --- a/arch/xtensa/platforms/iss/simdisk.c +++ b/arch/xtensa/platforms/iss/simdisk.c @@ -213,12 +213,18 @@ static ssize_t proc_read_simdisk(struct file *file, char __user *buf, struct simdisk *dev = PDE_DATA(file_inode(file)); const char *s = dev->filename; if (s) { - ssize_t n = simple_read_from_buffer(buf, size, ppos, - s, strlen(s)); - if (n < 0) - return n; - buf += n; - size -= n; + ssize_t len = strlen(s); + char *temp = kmalloc(len + 2, GFP_KERNEL); + + if (!temp) + return -ENOMEM; + + len = scnprintf(temp, len + 2, "%s\n", s); + len = simple_read_from_buffer(buf, size, ppos, + temp, len); + + kfree(temp); + return len; } return simple_read_from_buffer(buf, size, ppos, "\n", 1); } diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c index 538e6748e85a..c79c1d09ea86 100644 --- a/arch/xtensa/platforms/xtfpga/setup.c +++ b/arch/xtensa/platforms/xtfpga/setup.c @@ -133,6 +133,7 @@ static int __init machine_setup(void) if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc"))) update_local_mac(eth); + of_node_put(eth); return 0; } arch_initcall(machine_setup); diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index c2fdd6fcdaee..badb90352bf3 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -553,6 +553,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd) */ bfqg->bfqd = bfqd; bfqg->active_entities = 0; + bfqg->online = true; bfqg->rq_pos_tree = RB_ROOT; } @@ -581,28 +582,11 @@ static void bfq_group_set_parent(struct bfq_group *bfqg, entity->sched_data = &parent->sched_data; } -static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd, - struct blkcg *blkcg) +static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg) { - struct blkcg_gq *blkg; - - blkg = blkg_lookup(blkcg, bfqd->queue); - if (likely(blkg)) - return blkg_to_bfqg(blkg); - return NULL; -} - -struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, - struct blkcg *blkcg) -{ - struct bfq_group *bfqg, *parent; + struct bfq_group *parent; struct bfq_entity *entity; - bfqg = bfq_lookup_bfqg(bfqd, blkcg); - - if (unlikely(!bfqg)) - return NULL; - /* * Update chain of bfq_groups as we might be handling a leaf group * which, along with some of its relatives, has not been hooked yet @@ -619,8 +603,28 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, bfq_group_set_parent(curr_bfqg, parent); } } +} - return bfqg; +struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio) +{ + struct blkcg_gq *blkg = bio->bi_blkg; + struct bfq_group *bfqg; + + while (blkg) { + if (!blkg->online) { + blkg = blkg->parent; + continue; + } + bfqg = blkg_to_bfqg(blkg); + if (bfqg->online) { + bio_associate_blkg_from_css(bio, &blkg->blkcg->css); + return bfqg; + } + blkg = blkg->parent; + } + bio_associate_blkg_from_css(bio, + &bfqg_to_blkg(bfqd->root_group)->blkcg->css); + return bfqd->root_group; } /** @@ -696,25 +700,15 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, * Move bic to blkcg, assuming that bfqd->lock is held; which makes * sure that the reference to cgroup is valid across the call (see * comments in bfq_bic_update_cgroup on this issue) - * - * NOTE: an alternative approach might have been to store the current - * cgroup in bfqq and getting a reference to it, reducing the lookup - * time here, at the price of slightly more complex code. */ -static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, - struct bfq_io_cq *bic, - struct blkcg *blkcg) +static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd, + struct bfq_io_cq *bic, + struct bfq_group *bfqg) { struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0); struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1); - struct bfq_group *bfqg; struct bfq_entity *entity; - bfqg = bfq_find_set_group(bfqd, blkcg); - - if (unlikely(!bfqg)) - bfqg = bfqd->root_group; - if (async_bfqq) { entity = &async_bfqq->entity; @@ -725,9 +719,39 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, } if (sync_bfqq) { - entity = &sync_bfqq->entity; - if (entity->sched_data != &bfqg->sched_data) - bfq_bfqq_move(bfqd, sync_bfqq, bfqg); + if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) { + /* We are the only user of this bfqq, just move it */ + if (sync_bfqq->entity.sched_data != &bfqg->sched_data) + bfq_bfqq_move(bfqd, sync_bfqq, bfqg); + } else { + struct bfq_queue *bfqq; + + /* + * The queue was merged to a different queue. Check + * that the merge chain still belongs to the same + * cgroup. + */ + for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq) + if (bfqq->entity.sched_data != + &bfqg->sched_data) + break; + if (bfqq) { + /* + * Some queue changed cgroup so the merge is + * not valid anymore. We cannot easily just + * cancel the merge (by clearing new_bfqq) as + * there may be other processes using this + * queue and holding refs to all queues below + * sync_bfqq->new_bfqq. Similarly if the merge + * already happened, we need to detach from + * bfqq now so that we cannot merge bio to a + * request from the old cgroup. + */ + bfq_put_cooperator(sync_bfqq); + bfq_release_process_ref(bfqd, sync_bfqq); + bic_set_bfqq(bic, NULL, 1); + } + } } return bfqg; @@ -736,20 +760,24 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) { struct bfq_data *bfqd = bic_to_bfqd(bic); - struct bfq_group *bfqg = NULL; + struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio); uint64_t serial_nr; - rcu_read_lock(); - serial_nr = __bio_blkcg(bio)->css.serial_nr; + serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr; /* * Check whether blkcg has changed. The condition may trigger * spuriously on a newly created cic but there's no harm. */ if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) - goto out; + return; - bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio)); + /* + * New cgroup for this process. Make sure it is linked to bfq internal + * cgroup hierarchy. + */ + bfq_link_bfqg(bfqd, bfqg); + __bfq_bic_change_cgroup(bfqd, bic, bfqg); /* * Update blkg_path for bfq_log_* functions. We cache this * path, and update it here, for the following @@ -802,8 +830,6 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) */ blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path)); bic->blkcg_serial_nr = serial_nr; -out: - rcu_read_unlock(); } /** @@ -931,6 +957,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) put_async_queues: bfq_put_async_queues(bfqd, bfqg); + bfqg->online = false; spin_unlock_irqrestore(&bfqd->lock, flags); /* @@ -1420,7 +1447,7 @@ void bfq_end_wr_async(struct bfq_data *bfqd) bfq_end_wr_async_queues(bfqd, bfqd->root_group); } -struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg) +struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio) { return bfqd->root_group; } diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 4edec39ba1ba..0e64aa558b95 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -421,6 +421,8 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, */ void bfq_schedule_dispatch(struct bfq_data *bfqd) { + lockdep_assert_held(&bfqd->lock); + if (bfqd->queued != 0) { bfq_log(bfqd, "schedule dispatch"); blk_mq_run_hw_queues(bfqd->queue, true); @@ -2227,10 +2229,17 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio, spin_lock_irq(&bfqd->lock); - if (bic) + if (bic) { + /* + * Make sure cgroup info is uptodate for current process before + * considering the merge. + */ + bfq_bic_update_cgroup(bic, bio); + bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); - else + } else { bfqd->bio_bfqq = NULL; + } bfqd->bio_bic = bic; ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free); @@ -2260,8 +2269,6 @@ static int bfq_request_merge(struct request_queue *q, struct request **req, return ELEVATOR_NO_MERGE; } -static struct bfq_queue *bfq_init_rq(struct request *rq); - static void bfq_request_merged(struct request_queue *q, struct request *req, enum elv_merge type) { @@ -2270,7 +2277,7 @@ static void bfq_request_merged(struct request_queue *q, struct request *req, blk_rq_pos(req) < blk_rq_pos(container_of(rb_prev(&req->rb_node), struct request, rb_node))) { - struct bfq_queue *bfqq = bfq_init_rq(req); + struct bfq_queue *bfqq = RQ_BFQQ(req); struct bfq_data *bfqd; struct request *prev, *next_rq; @@ -2322,8 +2329,8 @@ static void bfq_request_merged(struct request_queue *q, struct request *req, static void bfq_requests_merged(struct request_queue *q, struct request *rq, struct request *next) { - struct bfq_queue *bfqq = bfq_init_rq(rq), - *next_bfqq = bfq_init_rq(next); + struct bfq_queue *bfqq = RQ_BFQQ(rq), + *next_bfqq = RQ_BFQQ(next); if (!bfqq) return; @@ -2502,6 +2509,14 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) if (process_refs == 0 || new_process_refs == 0) return NULL; + /* + * Make sure merged queues belong to the same parent. Parents could + * have changed since the time we decided the two queues are suitable + * for merging. + */ + if (new_bfqq->entity.parent != bfqq->entity.parent) + return NULL; + bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", new_bfqq->pid); @@ -4914,7 +4929,7 @@ void bfq_put_queue(struct bfq_queue *bfqq) bfqg_and_blkg_put(bfqg); } -static void bfq_put_cooperator(struct bfq_queue *bfqq) +void bfq_put_cooperator(struct bfq_queue *bfqq) { struct bfq_queue *__bfqq, *next; @@ -5146,14 +5161,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, struct bfq_queue *bfqq; struct bfq_group *bfqg; - rcu_read_lock(); - - bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio)); - if (!bfqg) { - bfqq = &bfqd->oom_bfqq; - goto out; - } - + bfqg = bfq_bio_bfqg(bfqd, bio); if (!is_sync) { async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, ioprio); @@ -5197,7 +5205,6 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, out: bfqq->ref++; /* get a process reference to this queue */ bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref); - rcu_read_unlock(); return bfqq; } @@ -5500,6 +5507,8 @@ static inline void bfq_update_insert_stats(struct request_queue *q, unsigned int cmd_flags) {} #endif /* CONFIG_BFQ_CGROUP_DEBUG */ +static struct bfq_queue *bfq_init_rq(struct request *rq); + static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head) { @@ -5514,17 +5523,14 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bfqg_stats_update_legacy_io(q, rq); #endif spin_lock_irq(&bfqd->lock); + bfqq = bfq_init_rq(rq); if (blk_mq_sched_try_insert_merge(q, rq)) { spin_unlock_irq(&bfqd->lock); return; } - spin_unlock_irq(&bfqd->lock); - blk_mq_sched_request_inserted(rq); - spin_lock_irq(&bfqd->lock); - bfqq = bfq_init_rq(rq); if (!bfqq || at_head || blk_rq_is_passthrough(rq)) { if (at_head) list_add(&rq->queuelist, &bfqd->dispatch); @@ -6260,8 +6266,8 @@ bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_bfqq_expire(bfqd, bfqq, true, reason); schedule_dispatch: - spin_unlock_irqrestore(&bfqd->lock, flags); bfq_schedule_dispatch(bfqd); + spin_unlock_irqrestore(&bfqd->lock, flags); } /* diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 703895224562..2a4a6f44efff 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -901,6 +901,8 @@ struct bfq_group { /* reference counter (see comments in bfq_bic_update_cgroup) */ int ref; + /* Is bfq_group still online? */ + bool online; struct bfq_entity entity; struct bfq_sched_data sched_data; @@ -954,6 +956,7 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd, void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq, bool compensate, enum bfqq_expiration reason); void bfq_put_queue(struct bfq_queue *bfqq); +void bfq_put_cooperator(struct bfq_queue *bfqq); void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq); void bfq_schedule_dispatch(struct bfq_data *bfqd); @@ -981,8 +984,7 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg); void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio); void bfq_end_wr_async(struct bfq_data *bfqd); -struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, - struct blkcg *blkcg); +struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio); struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); struct bfq_group *bfqq_group(struct bfq_queue *bfqq); struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); diff --git a/block/bio.c b/block/bio.c index f8d26ce7b61b..6d6e7b96b002 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1057,9 +1057,6 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) size_t offset; int ret = 0; - if (WARN_ON_ONCE(!max_append_sectors)) - return 0; - /* * Move page array up in the allocated memory for the bio vecs as far as * possible so that we can start filling biovecs from the beginning diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 22fb1beed49a..39c94ee6c4a5 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1898,12 +1898,8 @@ EXPORT_SYMBOL_GPL(bio_associate_blkg); */ void bio_clone_blkg_association(struct bio *dst, struct bio *src) { - if (src->bi_blkg) { - if (dst->bi_blkg) - blkg_put(dst->bi_blkg); - blkg_get(src->bi_blkg); - dst->bi_blkg = src->bi_blkg; - } + if (src->bi_blkg) + bio_associate_blkg_from_css(dst, &bio_blkcg(src)->css); } EXPORT_SYMBOL_GPL(bio_clone_blkg_association); diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 9af32b44b717..fb8f959a7f32 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -2257,7 +2257,17 @@ static void ioc_timer_fn(struct timer_list *timer) iocg->hweight_donating = hwa; iocg->hweight_after_donation = new_hwi; list_add(&iocg->surplus_list, &surpluses); - } else { + } else if (!iocg->abs_vdebt) { + /* + * @iocg doesn't have enough to donate. Reset + * its inuse to active. + * + * Don't reset debtors as their inuse's are + * owned by debt handling. This shouldn't affect + * donation calculuation in any meaningful way + * as @iocg doesn't have a meaningful amount of + * share anyway. + */ TRACE_IOCG_PATH(inuse_shortage, iocg, &now, iocg->inuse, iocg->active, iocg->hweight_inuse, new_hwi); diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index d8b0d8bd132b..74511a060d59 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -86,7 +86,17 @@ struct iolatency_grp; struct blk_iolatency { struct rq_qos rqos; struct timer_list timer; - atomic_t enabled; + + /* + * ->enabled is the master enable switch gating the throttling logic and + * inflight tracking. The number of cgroups which have iolat enabled is + * tracked in ->enable_cnt, and ->enable is flipped on/off accordingly + * from ->enable_work with the request_queue frozen. For details, See + * blkiolatency_enable_work_fn(). + */ + bool enabled; + atomic_t enable_cnt; + struct work_struct enable_work; }; static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos) @@ -94,11 +104,6 @@ static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos) return container_of(rqos, struct blk_iolatency, rqos); } -static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat) -{ - return atomic_read(&blkiolat->enabled) > 0; -} - struct child_latency_info { spinlock_t lock; @@ -463,7 +468,7 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio) struct blkcg_gq *blkg = bio->bi_blkg; bool issue_as_root = bio_issue_as_root_blkg(bio); - if (!blk_iolatency_enabled(blkiolat)) + if (!blkiolat->enabled) return; while (blkg && blkg->parent) { @@ -593,7 +598,6 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) u64 window_start; u64 now; bool issue_as_root = bio_issue_as_root_blkg(bio); - bool enabled = false; int inflight = 0; blkg = bio->bi_blkg; @@ -604,8 +608,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) if (!iolat) return; - enabled = blk_iolatency_enabled(iolat->blkiolat); - if (!enabled) + if (!iolat->blkiolat->enabled) return; now = ktime_to_ns(ktime_get()); @@ -644,6 +647,7 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos) struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); del_timer_sync(&blkiolat->timer); + flush_work(&blkiolat->enable_work); blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency); kfree(blkiolat); } @@ -715,6 +719,44 @@ next: rcu_read_unlock(); } +/** + * blkiolatency_enable_work_fn - Enable or disable iolatency on the device + * @work: enable_work of the blk_iolatency of interest + * + * iolatency needs to keep track of the number of in-flight IOs per cgroup. This + * is relatively expensive as it involves walking up the hierarchy twice for + * every IO. Thus, if iolatency is not enabled in any cgroup for the device, we + * want to disable the in-flight tracking. + * + * We have to make sure that the counting is balanced - we don't want to leak + * the in-flight counts by disabling accounting in the completion path while IOs + * are in flight. This is achieved by ensuring that no IO is in flight by + * freezing the queue while flipping ->enabled. As this requires a sleepable + * context, ->enabled flipping is punted to this work function. + */ +static void blkiolatency_enable_work_fn(struct work_struct *work) +{ + struct blk_iolatency *blkiolat = container_of(work, struct blk_iolatency, + enable_work); + bool enabled; + + /* + * There can only be one instance of this function running for @blkiolat + * and it's guaranteed to be executed at least once after the latest + * ->enabled_cnt modification. Acting on the latest ->enable_cnt is + * sufficient. + * + * Also, we know @blkiolat is safe to access as ->enable_work is flushed + * in blkcg_iolatency_exit(). + */ + enabled = atomic_read(&blkiolat->enable_cnt); + if (enabled != blkiolat->enabled) { + blk_mq_freeze_queue(blkiolat->rqos.q); + blkiolat->enabled = enabled; + blk_mq_unfreeze_queue(blkiolat->rqos.q); + } +} + int blk_iolatency_init(struct request_queue *q) { struct blk_iolatency *blkiolat; @@ -740,17 +782,15 @@ int blk_iolatency_init(struct request_queue *q) } timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0); + INIT_WORK(&blkiolat->enable_work, blkiolatency_enable_work_fn); return 0; } -/* - * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise - * return 0. - */ -static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) +static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) { struct iolatency_grp *iolat = blkg_to_lat(blkg); + struct blk_iolatency *blkiolat = iolat->blkiolat; u64 oldval = iolat->min_lat_nsec; iolat->min_lat_nsec = val; @@ -758,13 +798,15 @@ static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec, BLKIOLATENCY_MAX_WIN_SIZE); - if (!oldval && val) - return 1; + if (!oldval && val) { + if (atomic_inc_return(&blkiolat->enable_cnt) == 1) + schedule_work(&blkiolat->enable_work); + } if (oldval && !val) { blkcg_clear_delay(blkg); - return -1; + if (atomic_dec_return(&blkiolat->enable_cnt) == 0) + schedule_work(&blkiolat->enable_work); } - return 0; } static void iolatency_clear_scaling(struct blkcg_gq *blkg) @@ -796,7 +838,6 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, u64 lat_val = 0; u64 oldval; int ret; - int enable = 0; ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx); if (ret) @@ -831,41 +872,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, blkg = ctx.blkg; oldval = iolat->min_lat_nsec; - enable = iolatency_set_min_lat_nsec(blkg, lat_val); - if (enable) { - if (!blk_get_queue(blkg->q)) { - ret = -ENODEV; - goto out; - } - - blkg_get(blkg); - } - - if (oldval != iolat->min_lat_nsec) { + iolatency_set_min_lat_nsec(blkg, lat_val); + if (oldval != iolat->min_lat_nsec) iolatency_clear_scaling(blkg); - } - ret = 0; out: blkg_conf_finish(&ctx); - if (ret == 0 && enable) { - struct iolatency_grp *tmp = blkg_to_lat(blkg); - struct blk_iolatency *blkiolat = tmp->blkiolat; - - blk_mq_freeze_queue(blkg->q); - - if (enable == 1) - atomic_inc(&blkiolat->enabled); - else if (enable == -1) - atomic_dec(&blkiolat->enabled); - else - WARN_ON_ONCE(1); - - blk_mq_unfreeze_queue(blkg->q); - - blkg_put(blkg); - blk_put_queue(blkg->q); - } return ret ?: nbytes; } @@ -1006,14 +1018,8 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd) { struct iolatency_grp *iolat = pd_to_lat(pd); struct blkcg_gq *blkg = lat_to_blkg(iolat); - struct blk_iolatency *blkiolat = iolat->blkiolat; - int ret; - ret = iolatency_set_min_lat_nsec(blkg, 0); - if (ret == 1) - atomic_inc(&blkiolat->enabled); - if (ret == -1) - atomic_dec(&blkiolat->enabled); + iolatency_set_min_lat_nsec(blkg, 0); iolatency_clear_scaling(blkg); } diff --git a/block/blk-map.c b/block/blk-map.c index 21630dccac62..ede73f4f7014 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -488,7 +488,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data, if (bytes > len) bytes = len; - page = alloc_page(q->bounce_gfp | gfp_mask); + page = alloc_page(q->bounce_gfp | __GFP_ZERO | gfp_mask); if (!page) goto cleanup; diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 43d4e9df161b..175e6c9aecb5 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -881,6 +881,9 @@ void blk_mq_debugfs_register_hctx(struct request_queue *q, char name[20]; int i; + if (!q->debugfs_dir) + return; + snprintf(name, sizeof(name), "hctx%u", hctx->queue_num); hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir); diff --git a/block/blk-mq.c b/block/blk-mq.c index db5146aec24e..5d520b187555 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -470,6 +470,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, if (!blk_mq_hw_queue_mapped(data.hctx)) goto out_queue_exit; cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); + if (cpu >= nr_cpu_ids) + goto out_queue_exit; data.ctx = __blk_mq_get_ctx(q, cpu); if (!q->elevator) @@ -1404,7 +1406,8 @@ out: /* If we didn't flush the entire list, we could have told the driver * there was more coming, but that turned out to be a lie. */ - if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued) + if ((!list_empty(list) || errors || needs_resource || + ret == BLK_STS_DEV_RESOURCE) && q->mq_ops->commit_rqs && queued) q->mq_ops->commit_rqs(hctx); /* * Any items that need requeuing? Stuff them into hctx->dispatch, @@ -1672,8 +1675,7 @@ static bool blk_mq_has_sqsched(struct request_queue *q) */ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) { - struct blk_mq_hw_ctx *hctx; - + struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); /* * If the IO scheduler does not respect hardware queues when * dispatching, we just don't bother with multiple HW queues and @@ -1681,8 +1683,8 @@ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) * just causes lock contention inside the scheduler and pointless cache * bouncing. */ - hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT, - raw_smp_processor_id()); + struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, 0, ctx); + if (!blk_mq_hctx_stopped(hctx)) return hctx; return NULL; @@ -2135,6 +2137,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, list_del_init(&rq->queuelist); ret = blk_mq_request_issue_directly(rq, list_empty(list)); if (ret != BLK_STS_OK) { + errors++; if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { blk_mq_request_bypass_insert(rq, false, @@ -2142,7 +2145,6 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, break; } blk_mq_end_request(rq, ret); - errors++; } else queued++; } diff --git a/block/blk-throttle.c b/block/blk-throttle.c index c53a254171a2..c526fdd0a7b9 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -944,7 +944,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, u64 bps_limit, unsigned long *wait) { bool rw = bio_data_dir(bio); - u64 bytes_allowed, extra_bytes, tmp; + u64 bytes_allowed, extra_bytes; unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; unsigned int bio_size = throtl_bio_data_size(bio); @@ -961,10 +961,8 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, jiffy_elapsed_rnd = tg->td->throtl_slice; jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); - - tmp = bps_limit * jiffy_elapsed_rnd; - do_div(tmp, HZ); - bytes_allowed = tmp; + bytes_allowed = mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed_rnd, + (u64)HZ); if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) { if (wait) diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 35d81b5deae1..6f63920f073c 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -838,9 +838,11 @@ int wbt_init(struct request_queue *q) rwb->last_comp = rwb->last_issue = jiffies; rwb->win_nsec = RWB_WINDOW_NSEC; rwb->enable_state = WBT_STATE_ON_DEFAULT; - rwb->wc = 1; + rwb->wc = test_bit(QUEUE_FLAG_WC, &q->queue_flags); rwb->rq_depth.default_depth = RWB_DEF_DEPTH; - wbt_update_limits(rwb); + rwb->min_lat_nsec = wbt_default_latency_nsec(q); + + wbt_queue_depth_changed(&rwb->rqos); /* * Assign rwb and add the stats callback. @@ -848,10 +850,5 @@ int wbt_init(struct request_queue *q) rq_qos_add(q, &rwb->rqos); blk_stat_add_callback(q, rwb->cb); - rwb->min_lat_nsec = wbt_default_latency_nsec(q); - - wbt_queue_depth_changed(&rwb->rqos); - wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); - return 0; } diff --git a/block/ioctl.c b/block/ioctl.c index ed240e170e14..e7eed7dadb5c 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -679,7 +679,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) (bdev->bd_bdi->ra_pages * PAGE_SIZE) / 512); case BLKGETSIZE: size = i_size_read(bdev->bd_inode); - if ((size >> 9) > ~0UL) + if ((size >> 9) > ~(compat_ulong_t)0) return -EFBIG; return compat_put_ulong(argp, size >> 9); diff --git a/block/mq-deadline-main.c b/block/mq-deadline-main.c index f441bf5929be..cdcf72e72d98 100644 --- a/block/mq-deadline-main.c +++ b/block/mq-deadline-main.c @@ -749,6 +749,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, per_prio = &dd->per_prio[prio]; if (at_head) { list_add(&rq->queuelist, &per_prio->dispatch); + rq->fifo_time = jiffies; } else { deadline_add_rq_rb(per_prio, rq); @@ -866,7 +867,7 @@ SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]); SHOW_JIFFIES(deadline_aging_expire_show, dd->aging_expire); SHOW_INT(deadline_writes_starved_show, dd->writes_starved); SHOW_INT(deadline_front_merges_show, dd->front_merges); -SHOW_INT(deadline_async_depth_show, dd->front_merges); +SHOW_INT(deadline_async_depth_show, dd->async_depth); SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch); #undef SHOW_INT #undef SHOW_JIFFIES @@ -896,7 +897,7 @@ STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MA STORE_JIFFIES(deadline_aging_expire_store, &dd->aging_expire, 0, INT_MAX); STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX); STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1); -STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX); +STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX); STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX); #undef STORE_FUNCTION #undef STORE_INT diff --git a/block/partitions/core.c b/block/partitions/core.c index a02e22411594..e3d61ec4a5a6 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -329,6 +329,7 @@ void delete_partition(struct hd_struct *part) struct gendisk *disk = part_to_disk(part); struct disk_part_tbl *ptbl = rcu_dereference_protected(disk->part_tbl, 1); + struct block_device *bdev; /* * ->part_tbl is referenced in this part's release handler, so @@ -346,6 +347,12 @@ void delete_partition(struct hd_struct *part) * "in-use" until we really free the gendisk. */ blk_invalidate_devt(part_devt(part)); + + bdev = bdget_part(part); + if (bdev) { + remove_inode_hash(bdev->bd_inode); + bdput(bdev); + } percpu_ref_kill(&part->ref); } diff --git a/block/sed-opal.c b/block/sed-opal.c index daafadbb88ca..0ac5a4f3f226 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c @@ -88,8 +88,8 @@ struct opal_dev { u64 lowest_lba; size_t pos; - u8 cmd[IO_BUFFER_LENGTH]; - u8 resp[IO_BUFFER_LENGTH]; + u8 *cmd; + u8 *resp; struct parsed_resp parsed; size_t prev_d_len; @@ -2134,6 +2134,8 @@ void free_opal_dev(struct opal_dev *dev) return; clean_opal_dev(dev); + kfree(dev->resp); + kfree(dev->cmd); kfree(dev); } EXPORT_SYMBOL(free_opal_dev); @@ -2146,17 +2148,39 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv) if (!dev) return NULL; + /* + * Presumably DMA-able buffers must be cache-aligned. Kmalloc makes + * sure the allocated buffer is DMA-safe in that regard. + */ + dev->cmd = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL); + if (!dev->cmd) + goto err_free_dev; + + dev->resp = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL); + if (!dev->resp) + goto err_free_cmd; + INIT_LIST_HEAD(&dev->unlk_lst); mutex_init(&dev->dev_lock); dev->data = data; dev->send_recv = send_recv; if (check_opal_support(dev) != 0) { pr_debug("Opal is not supported on this device\n"); - kfree(dev); - return NULL; + goto err_free_resp; } return dev; + +err_free_resp: + kfree(dev->resp); + +err_free_cmd: + kfree(dev->cmd); + +err_free_dev: + kfree(dev); + + return NULL; } EXPORT_SYMBOL(init_opal_dev); diff --git a/build.config.gki.aarch64 b/build.config.gki.aarch64 index 9e70748400a1..738ddee4af33 100644 --- a/build.config.gki.aarch64 +++ b/build.config.gki.aarch64 @@ -10,6 +10,7 @@ ABI_DEFINITION=android/abi_gki_aarch64.xml TIDY_ABI=1 KMI_SYMBOL_LIST=android/abi_gki_aarch64 ADDITIONAL_KMI_SYMBOL_LISTS=" +android/abi_gki_aarch64_type_visibility android/abi_gki_aarch64_core android/abi_gki_aarch64_db845c android/abi_gki_aarch64_exynos @@ -18,6 +19,7 @@ android/abi_gki_aarch64_fips140 android/abi_gki_aarch64_galaxy android/abi_gki_aarch64_generic android/abi_gki_aarch64_hikey960 +android/abi_gki_aarch64_honor android/abi_gki_aarch64_imx android/abi_gki_aarch64_lenovo android/abi_gki_aarch64_mtk @@ -30,6 +32,7 @@ android/abi_gki_aarch64_vivo android/abi_gki_aarch64_xiaomi android/abi_gki_aarch64_asus android/abi_gki_aarch64_transsion +android/abi_gki_aarch64_tuxera " FILES="${FILES} diff --git a/build.config.rockchip b/build.config.rockchip index e7f08311793b..297c4c95a14b 100644 --- a/build.config.rockchip +++ b/build.config.rockchip @@ -3,6 +3,6 @@ DEFCONFIG=rockchip_aarch64_gki_defconfig KMI_SYMBOL_LIST=android/abi_gki_aarch64_rockchip -PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/common/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/common/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/common/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/common/arch/arm64/configs/rockchip_gki.config" +PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/common/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/common/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/common/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/common/arch/arm64/configs/rockchip_gki.fragment" POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/common/arch/arm64/configs/${DEFCONFIG}" diff --git a/certs/blacklist_hashes.c b/certs/blacklist_hashes.c index 344892337be0..d5961aa3d338 100644 --- a/certs/blacklist_hashes.c +++ b/certs/blacklist_hashes.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include "blacklist.h" -const char __initdata *const blacklist_hashes[] = { +const char __initconst *const blacklist_hashes[] = { #include CONFIG_SYSTEM_BLACKLIST_HASH_LIST , NULL }; diff --git a/crypto/Kconfig b/crypto/Kconfig index a86d17ebe6fa..b604dd6a8faf 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -15,6 +15,7 @@ source "crypto/async_tx/Kconfig" # menuconfig CRYPTO tristate "Cryptographic API" + select LIB_MEMNEQ help This option provides the core Cryptographic API. @@ -1966,7 +1967,6 @@ config CRYPTO_STATS config CRYPTO_HASH_INFO bool -source "lib/crypto/Kconfig" source "drivers/crypto/Kconfig" source "crypto/asymmetric_keys/Kconfig" source "certs/Kconfig" diff --git a/crypto/Makefile b/crypto/Makefile index f8a3677d2eec..8be22a6ea3d8 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -4,7 +4,7 @@ # obj-$(CONFIG_CRYPTO) += crypto.o -crypto-y := api.o cipher.o compress.o memneq.o +crypto-y := api.o cipher.o compress.o obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o obj-$(CONFIG_CRYPTO_FIPS) += fips.o @@ -211,10 +211,10 @@ UBSAN_SANITIZE_jitterentropy-fips.o = n # module that is in scope for FIPS 140-2 certification # crypto-fips-objs := drbg.o ecb.o cbc.o ctr.o cts.o gcm.o xts.o hmac.o cmac.o \ - memneq.o gf128mul.o aes_generic.o lib-crypto-aes.o \ + gf128mul.o aes_generic.o lib-crypto-aes.o \ jitterentropy.o jitterentropy-kcapi.o \ sha1_generic.o sha256_generic.o sha512_generic.o \ - lib-sha1.o lib-crypto-sha256.o + lib-memneq.o lib-sha1.o lib-crypto-sha256.o crypto-fips-objs := $(foreach o,$(crypto-fips-objs),$(o:.o=-fips.o)) # get the arch to add its objects to $(crypto-fips-objs) diff --git a/crypto/akcipher.c b/crypto/akcipher.c index f866085c8a4a..ab975a420e1e 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -120,6 +120,12 @@ static int akcipher_default_op(struct akcipher_request *req) return -ENOSYS; } +static int akcipher_default_set_key(struct crypto_akcipher *tfm, + const void *key, unsigned int keylen) +{ + return -ENOSYS; +} + int crypto_register_akcipher(struct akcipher_alg *alg) { struct crypto_alg *base = &alg->base; @@ -132,6 +138,8 @@ int crypto_register_akcipher(struct akcipher_alg *alg) alg->encrypt = akcipher_default_op; if (!alg->decrypt) alg->decrypt = akcipher_default_op; + if (!alg->set_priv_key) + alg->set_priv_key = akcipher_default_set_key; akcipher_prepare_alg(alg); return crypto_register_alg(base); diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 788a4ba1e2e7..cf9b7ac36202 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -260,6 +260,10 @@ static int cert_sig_digest_update(const struct public_key_signature *sig, BUG_ON(!sig->data); + /* SM2 signatures always use the SM3 hash algorithm */ + if (!sig->hash_algo || strcmp(sig->hash_algo, "sm3") != 0) + return -EINVAL; + ret = sm2_compute_z_digest(tfm_pkey, SM2_DEFAULT_USERID, SM2_DEFAULT_USERID_LEN, dgst); if (ret) @@ -356,8 +360,7 @@ int public_key_verify_signature(const struct public_key *pkey, if (ret) goto error_free_key; - if (sig->pkey_algo && strcmp(sig->pkey_algo, "sm2") == 0 && - sig->data_size) { + if (strcmp(pkey->pkey_algo, "sm2") == 0 && sig->data_size) { ret = cert_sig_digest_update(sig, tfm); if (ret) goto error_free_key; diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c index 72fe480f9bd6..5f96a21f8788 100644 --- a/crypto/blake2s_generic.c +++ b/crypto/blake2s_generic.c @@ -15,12 +15,12 @@ static int crypto_blake2s_update_generic(struct shash_desc *desc, const u8 *in, unsigned int inlen) { - return crypto_blake2s_update(desc, in, inlen, blake2s_compress_generic); + return crypto_blake2s_update(desc, in, inlen, true); } static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out) { - return crypto_blake2s_final(desc, out, blake2s_compress_generic); + return crypto_blake2s_final(desc, out, true); } #define BLAKE2S_ALG(name, driver_name, digest_size) \ diff --git a/crypto/cryptd.c b/crypto/cryptd.c index a1bea0f4baa8..668095eca0fa 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -39,6 +39,10 @@ struct cryptd_cpu_queue { }; struct cryptd_queue { + /* + * Protected by disabling BH to allow enqueueing from softinterrupt and + * dequeuing from kworker (cryptd_queue_worker()). + */ struct cryptd_cpu_queue __percpu *cpu_queue; }; @@ -125,28 +129,28 @@ static void cryptd_fini_queue(struct cryptd_queue *queue) static int cryptd_enqueue_request(struct cryptd_queue *queue, struct crypto_async_request *request) { - int cpu, err; + int err; struct cryptd_cpu_queue *cpu_queue; refcount_t *refcnt; - cpu = get_cpu(); + local_bh_disable(); cpu_queue = this_cpu_ptr(queue->cpu_queue); err = crypto_enqueue_request(&cpu_queue->queue, request); refcnt = crypto_tfm_ctx(request->tfm); if (err == -ENOSPC) - goto out_put_cpu; + goto out; - queue_work_on(cpu, cryptd_wq, &cpu_queue->work); + queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work); if (!refcount_read(refcnt)) - goto out_put_cpu; + goto out; refcount_inc(refcnt); -out_put_cpu: - put_cpu(); +out: + local_bh_enable(); return err; } @@ -162,15 +166,10 @@ static void cryptd_queue_worker(struct work_struct *work) cpu_queue = container_of(work, struct cryptd_cpu_queue, work); /* * Only handle one request at a time to avoid hogging crypto workqueue. - * preempt_disable/enable is used to prevent being preempted by - * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent - * cryptd_enqueue_request() being accessed from software interrupts. */ local_bh_disable(); - preempt_disable(); backlog = crypto_get_backlog(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue); - preempt_enable(); local_bh_enable(); if (!req) diff --git a/crypto/drbg.c b/crypto/drbg.c index 1b4587e0ddad..c769c4476abb 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1036,17 +1036,38 @@ static const struct drbg_state_ops drbg_hash_ops = { ******************************************************************/ static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed, - int reseed) + int reseed, enum drbg_seed_state new_seed_state) { int ret = drbg->d_ops->update(drbg, seed, reseed); if (ret) return ret; - drbg->seeded = true; + drbg->seeded = new_seed_state; /* 10.1.1.2 / 10.1.1.3 step 5 */ drbg->reseed_ctr = 1; + switch (drbg->seeded) { + case DRBG_SEED_STATE_UNSEEDED: + /* Impossible, but handle it to silence compiler warnings. */ + fallthrough; + case DRBG_SEED_STATE_PARTIAL: + /* + * Require frequent reseeds until the seed source is + * fully initialized. + */ + drbg->reseed_threshold = 50; + break; + + case DRBG_SEED_STATE_FULL: + /* + * Seed source has become fully initialized, frequent + * reseeds no longer required. + */ + drbg->reseed_threshold = drbg_max_requests(drbg); + break; + } + return ret; } @@ -1066,12 +1087,10 @@ static inline int drbg_get_random_bytes(struct drbg_state *drbg, return 0; } -static void drbg_async_seed(struct work_struct *work) +static int drbg_seed_from_random(struct drbg_state *drbg) { struct drbg_string data; LIST_HEAD(seedlist); - struct drbg_state *drbg = container_of(work, struct drbg_state, - seed_work); unsigned int entropylen = drbg_sec_strength(drbg->core->flags); unsigned char entropy[32]; int ret; @@ -1082,26 +1101,15 @@ static void drbg_async_seed(struct work_struct *work) drbg_string_fill(&data, entropy, entropylen); list_add_tail(&data.list, &seedlist); - mutex_lock(&drbg->drbg_mutex); - ret = drbg_get_random_bytes(drbg, entropy, entropylen); if (ret) - goto unlock; + goto out; - /* Set seeded to false so that if __drbg_seed fails the - * next generate call will trigger a reseed. - */ - drbg->seeded = false; - - __drbg_seed(drbg, &seedlist, true); - - if (drbg->seeded) - drbg->reseed_threshold = drbg_max_requests(drbg); - -unlock: - mutex_unlock(&drbg->drbg_mutex); + ret = __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL); +out: memzero_explicit(entropy, entropylen); + return ret; } /* @@ -1123,6 +1131,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, unsigned int entropylen = drbg_sec_strength(drbg->core->flags); struct drbg_string data1; LIST_HEAD(seedlist); + enum drbg_seed_state new_seed_state = DRBG_SEED_STATE_FULL; /* 9.1 / 9.2 / 9.3.1 step 3 */ if (pers && pers->len > (drbg_max_addtl(drbg))) { @@ -1150,6 +1159,9 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, BUG_ON((entropylen * 2) > sizeof(entropy)); /* Get seed from in-kernel /dev/urandom */ + if (!rng_is_initialized()) + new_seed_state = DRBG_SEED_STATE_PARTIAL; + ret = drbg_get_random_bytes(drbg, entropy, entropylen); if (ret) goto out; @@ -1206,7 +1218,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, memset(drbg->C, 0, drbg_statelen(drbg)); } - ret = __drbg_seed(drbg, &seedlist, reseed); + ret = __drbg_seed(drbg, &seedlist, reseed, new_seed_state); out: memzero_explicit(entropy, entropylen * 2); @@ -1386,19 +1398,25 @@ static int drbg_generate(struct drbg_state *drbg, * here. The spec is a bit convoluted here, we make it simpler. */ if (drbg->reseed_threshold < drbg->reseed_ctr) - drbg->seeded = false; + drbg->seeded = DRBG_SEED_STATE_UNSEEDED; - if (drbg->pr || !drbg->seeded) { + if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) { pr_devel("DRBG: reseeding before generation (prediction " "resistance: %s, state %s)\n", drbg->pr ? "true" : "false", - drbg->seeded ? "seeded" : "unseeded"); + (drbg->seeded == DRBG_SEED_STATE_FULL ? + "seeded" : "unseeded")); /* 9.3.1 steps 7.1 through 7.3 */ len = drbg_seed(drbg, addtl, true); if (len) goto err; /* 9.3.1 step 7.4 */ addtl = NULL; + } else if (rng_is_initialized() && + drbg->seeded == DRBG_SEED_STATE_PARTIAL) { + len = drbg_seed_from_random(drbg); + if (len) + goto err; } if (addtl && 0 < addtl->len) @@ -1491,51 +1509,15 @@ static int drbg_generate_long(struct drbg_state *drbg, return 0; } -static void drbg_schedule_async_seed(struct random_ready_callback *rdy) -{ - struct drbg_state *drbg = container_of(rdy, struct drbg_state, - random_ready); - - schedule_work(&drbg->seed_work); -} - static int drbg_prepare_hrng(struct drbg_state *drbg) { - int err; - /* We do not need an HRNG in test mode. */ if (list_empty(&drbg->test_data.list)) return 0; drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0); - INIT_WORK(&drbg->seed_work, drbg_async_seed); - - drbg->random_ready.owner = THIS_MODULE; - drbg->random_ready.func = drbg_schedule_async_seed; - - err = add_random_ready_callback(&drbg->random_ready); - - switch (err) { - case 0: - break; - - case -EALREADY: - err = 0; - fallthrough; - - default: - drbg->random_ready.func = NULL; - return err; - } - - /* - * Require frequent reseeds until the seed source is fully - * initialized. - */ - drbg->reseed_threshold = 50; - - return err; + return 0; } /* @@ -1578,7 +1560,7 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, if (!drbg->core) { drbg->core = &drbg_cores[coreref]; drbg->pr = pr; - drbg->seeded = false; + drbg->seeded = DRBG_SEED_STATE_UNSEEDED; drbg->reseed_threshold = drbg_max_requests(drbg); ret = drbg_alloc_state(drbg); @@ -1629,11 +1611,6 @@ free_everything: */ static int drbg_uninstantiate(struct drbg_state *drbg) { - if (drbg->random_ready.func) { - del_random_ready_callback(&drbg->random_ready); - cancel_work_sync(&drbg->seed_work); - } - if (!IS_ERR_OR_NULL(drbg->jent)) crypto_free_rng(drbg->jent); drbg->jent = NULL; diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c index 6a3fd09057d0..f7ed43020672 100644 --- a/crypto/ecrdsa.c +++ b/crypto/ecrdsa.c @@ -113,15 +113,15 @@ static int ecrdsa_verify(struct akcipher_request *req) /* Step 1: verify that 0 < r < q, 0 < s < q */ if (vli_is_zero(r, ndigits) || - vli_cmp(r, ctx->curve->n, ndigits) == 1 || + vli_cmp(r, ctx->curve->n, ndigits) >= 0 || vli_is_zero(s, ndigits) || - vli_cmp(s, ctx->curve->n, ndigits) == 1) + vli_cmp(s, ctx->curve->n, ndigits) >= 0) return -EKEYREJECTED; /* Step 2: calculate hash (h) of the message (passed as input) */ /* Step 3: calculate e = h \mod q */ vli_from_le64(e, digest, ndigits); - if (vli_cmp(e, ctx->curve->n, ndigits) == 1) + if (vli_cmp(e, ctx->curve->n, ndigits) >= 0) vli_sub(e, e, ctx->curve->n, ndigits); if (vli_is_zero(e, ndigits)) e[0] = 1; @@ -137,7 +137,7 @@ static int ecrdsa_verify(struct akcipher_request *req) /* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */ ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key, ctx->curve); - if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1) + if (vli_cmp(cc.x, ctx->curve->n, ndigits) >= 0) vli_sub(cc.x, cc.x, ctx->curve->n, ndigits); /* Step 7: if R == r signature is valid */ diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 37c4c308339e..423c55d0e165 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -117,6 +117,22 @@ struct rand_data { #define JENT_EHEALTH 9 /* Health test failed during initialization */ #define JENT_ERCT 10 /* RCT failed during initialization */ +/* + * The output n bits can receive more than n bits of min entropy, of course, + * but the fixed output of the conditioning function can only asymptotically + * approach the output size bits of min entropy, not attain that bound. Random + * maps will tend to have output collisions, which reduces the creditable + * output entropy (that is what SP 800-90B Section 3.1.5.1.2 attempts to bound). + * + * The value "64" is justified in Appendix A.4 of the current 90C draft, + * and aligns with NIST's in "epsilon" definition in this document, which is + * that a string can be considered "full entropy" if you can bound the min + * entropy in each bit of output to at least 1-epsilon, where epsilon is + * required to be <= 2^(-32). + */ +#define JENT_ENTROPY_SAFETY_FACTOR 64 + +#include #include "jitterentropy.h" /*************************************************************************** @@ -546,7 +562,10 @@ static int jent_measure_jitter(struct rand_data *ec) */ static void jent_gen_entropy(struct rand_data *ec) { - unsigned int k = 0; + unsigned int k = 0, safety_factor = 0; + + if (fips_enabled) + safety_factor = JENT_ENTROPY_SAFETY_FACTOR; /* priming of the ->prev_time value */ jent_measure_jitter(ec); @@ -560,7 +579,7 @@ static void jent_gen_entropy(struct rand_data *ec) * We multiply the loop value with ->osr to obtain the * oversampling rate requested by the caller */ - if (++k >= (DATA_SIZE_BITS * ec->osr)) + if (++k >= ((DATA_SIZE_BITS + safety_factor) * ec->osr)) break; } } diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c index 48019660a096..63c5444f0f1a 100644 --- a/drivers/accessibility/speakup/main.c +++ b/drivers/accessibility/speakup/main.c @@ -1780,7 +1780,7 @@ static void speakup_con_update(struct vc_data *vc) { unsigned long flags; - if (!speakup_console[vc->vc_num] || spk_parked) + if (!speakup_console[vc->vc_num] || spk_parked || !synth) return; if (!spin_trylock_irqsave(&speakup_info.spinlock, flags)) /* Speakup output, discard */ diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c index 6284aff434a1..da3a24557a03 100644 --- a/drivers/accessibility/speakup/spk_ttyio.c +++ b/drivers/accessibility/speakup/spk_ttyio.c @@ -88,7 +88,7 @@ static int spk_ttyio_receive_buf2(struct tty_struct *tty, } if (!ldisc_data->buf_free) - /* ttyio_in will tty_schedule_flip */ + /* ttyio_in will tty_flip_buffer_push */ return 0; /* Make sure the consumer has read buf before we have seen @@ -334,7 +334,7 @@ static unsigned char ttyio_in(int timeout) mb(); ldisc_data->buf_free = true; /* Let TTY push more characters */ - tty_schedule_flip(speakup_tty->port); + tty_flip_buffer_push(speakup_tty->port); return rv; } diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c index 72f1fb77abcd..e648158368a7 100644 --- a/drivers/acpi/acpi_extlog.c +++ b/drivers/acpi/acpi_extlog.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -138,8 +139,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, int cpu = mce->extcpu; struct acpi_hest_generic_status *estatus, *tmp; struct acpi_hest_generic_data *gdata; - const guid_t *fru_id = &guid_null; - char *fru_text = ""; + const guid_t *fru_id; + char *fru_text; guid_t *sec_type; static u32 err_seq; @@ -160,17 +161,23 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, /* log event via trace */ err_seq++; - gdata = (struct acpi_hest_generic_data *)(tmp + 1); - if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) - fru_id = (guid_t *)gdata->fru_id; - if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) - fru_text = gdata->fru_text; - sec_type = (guid_t *)gdata->section_type; - if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { - struct cper_sec_mem_err *mem = (void *)(gdata + 1); - if (gdata->error_data_length >= sizeof(*mem)) - trace_extlog_mem_event(mem, err_seq, fru_id, fru_text, - (u8)gdata->error_severity); + apei_estatus_for_each_section(tmp, gdata) { + if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) + fru_id = (guid_t *)gdata->fru_id; + else + fru_id = &guid_null; + if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) + fru_text = gdata->fru_text; + else + fru_text = ""; + sec_type = (guid_t *)gdata->section_type; + if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { + struct cper_sec_mem_err *mem = (void *)(gdata + 1); + + if (gdata->error_data_length >= sizeof(*mem)) + trace_extlog_mem_event(mem, err_seq, fru_id, fru_text, + (u8)gdata->error_severity); + } } out: diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index be73974ce449..6ff81027c69d 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -401,6 +401,9 @@ static int register_device_clock(struct acpi_device *adev, if (!lpss_clk_dev) lpt_register_clock_device(); + if (IS_ERR(lpss_clk_dev)) + return PTR_ERR(lpss_clk_dev); + clk_data = platform_get_drvdata(lpss_clk_dev); if (!clk_data) return -ENODEV; diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index eb04b2f828ee..cf6c9ffe04a2 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -498,6 +498,22 @@ static const struct dmi_system_id video_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"), }, }, + { + .callback = video_disable_backlight_sysfs_if, + .ident = "Toshiba Satellite Z830", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE Z830"), + }, + }, + { + .callback = video_disable_backlight_sysfs_if, + .ident = "Toshiba Portege Z830", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE Z830"), + }, + }, /* * Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set * but the IDs actually follow the Device ID Scheme. diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c index 598fd19b65fa..45973aa6e06d 100644 --- a/drivers/acpi/apei/bert.c +++ b/drivers/acpi/apei/bert.c @@ -29,16 +29,26 @@ #undef pr_fmt #define pr_fmt(fmt) "BERT: " fmt + +#define ACPI_BERT_PRINT_MAX_RECORDS 5 #define ACPI_BERT_PRINT_MAX_LEN 1024 static int bert_disable; +/* + * Print "all" the error records in the BERT table, but avoid huge spam to + * the console if the BIOS included oversize records, or too many records. + * Skipping some records here does not lose anything because the full + * data is available to user tools in: + * /sys/firmware/acpi/tables/data/BERT + */ static void __init bert_print_all(struct acpi_bert_region *region, unsigned int region_len) { struct acpi_hest_generic_status *estatus = (struct acpi_hest_generic_status *)region; int remain = region_len; + int printed = 0, skipped = 0; u32 estatus_len; while (remain >= sizeof(struct acpi_bert_region)) { @@ -46,24 +56,26 @@ static void __init bert_print_all(struct acpi_bert_region *region, if (remain < estatus_len) { pr_err(FW_BUG "Truncated status block (length: %u).\n", estatus_len); - return; + break; } /* No more error records. */ if (!estatus->block_status) - return; + break; if (cper_estatus_check(estatus)) { pr_err(FW_BUG "Invalid error record.\n"); - return; + break; } - pr_info_once("Error records from previous boot:\n"); - if (region_len < ACPI_BERT_PRINT_MAX_LEN) + if (estatus_len < ACPI_BERT_PRINT_MAX_LEN && + printed < ACPI_BERT_PRINT_MAX_RECORDS) { + pr_info_once("Error records from previous boot:\n"); cper_estatus_print(KERN_INFO HW_ERR, estatus); - else - pr_info_once("Max print length exceeded, table data is available at:\n" - "/sys/firmware/acpi/tables/data/BERT"); + printed++; + } else { + skipped++; + } /* * Because the boot error source is "one-time polled" type, @@ -75,6 +87,9 @@ static void __init bert_print_all(struct acpi_bert_region *region, estatus = (void *)estatus + estatus_len; remain -= estatus_len; } + + if (skipped) + pr_info(HW_ERR "Skipped %d error records\n", skipped); } static int __init setup_bert_disable(char *str) diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index 133156759551..c281d5b339d3 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -544,6 +544,8 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, ((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE) != REGION_INTERSECTS) && (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY) + != REGION_INTERSECTS) && + (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_SOFT_RESERVED) != REGION_INTERSECTS))) return -EINVAL; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 0c8330ed1ffd..9bdb5bd5fda6 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -163,7 +163,7 @@ static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx) clear_fixmap(fixmap_idx); } -int ghes_estatus_pool_init(int num_ghes) +int ghes_estatus_pool_init(unsigned int num_ghes) { unsigned long addr, len; int rc; @@ -985,7 +985,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work) ghes_estatus_cache_add(generic, estatus); } - if (task_work_pending && current->mm != &init_mm) { + if (task_work_pending && current->mm) { estatus_node->task_work.func = ghes_kick_task_work; estatus_node->task_work_cpu = smp_processor_id(); ret = task_work_add(current, &estatus_node->task_work, diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 2ac0773326e9..b62348a7e4d9 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -607,33 +607,6 @@ static int pcc_data_alloc(int pcc_ss_id) return 0; } -/* Check if CPPC revision + num_ent combination is supported */ -static bool is_cppc_supported(int revision, int num_ent) -{ - int expected_num_ent; - - switch (revision) { - case CPPC_V2_REV: - expected_num_ent = CPPC_V2_NUM_ENT; - break; - case CPPC_V3_REV: - expected_num_ent = CPPC_V3_NUM_ENT; - break; - default: - pr_debug("Firmware exports unsupported CPPC revision: %d\n", - revision); - return false; - } - - if (expected_num_ent != num_ent) { - pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n", - num_ent, expected_num_ent, revision); - return false; - } - - return true; -} - /* * An example CPC table looks like the following. * @@ -729,7 +702,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) cpc_obj->type); goto out_free; } - cpc_ptr->num_entries = num_ent; /* Second entry should be revision. */ cpc_obj = &out_obj->package.elements[1]; @@ -740,10 +712,32 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) cpc_obj->type); goto out_free; } - cpc_ptr->version = cpc_rev; - if (!is_cppc_supported(cpc_rev, num_ent)) + if (cpc_rev < CPPC_V2_REV) { + pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev, + pr->id); goto out_free; + } + + /* + * Disregard _CPC if the number of entries in the return pachage is not + * as expected, but support future revisions being proper supersets of + * the v3 and only causing more entries to be returned by _CPC. + */ + if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) || + (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) || + (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) { + pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n", + num_ent, pr->id); + goto out_free; + } + if (cpc_rev > CPPC_V3_REV) { + num_ent = CPPC_V3_NUM_ENT; + cpc_rev = CPPC_V3_REV; + } + + cpc_ptr->num_entries = num_ent; + cpc_ptr->version = cpc_rev; /* Iterate through remaining entries in _CPC */ for (i = 2; i < num_ent; i++) { diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 3f2e5ea9ab6b..4707d1808ca5 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -183,7 +183,6 @@ static struct workqueue_struct *ec_wq; static struct workqueue_struct *ec_query_wq; static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */ -static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */ static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ @@ -1405,24 +1404,16 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval) if (ec->data_addr == 0 || ec->command_addr == 0) return AE_OK; - if (boot_ec && boot_ec_is_ecdt && EC_FLAGS_IGNORE_DSDT_GPE) { - /* - * Always inherit the GPE number setting from the ECDT - * EC. - */ - ec->gpe = boot_ec->gpe; - } else { - /* Get GPE bit assignment (EC events). */ - /* TODO: Add support for _GPE returning a package */ - status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); - if (ACPI_SUCCESS(status)) - ec->gpe = tmp; + /* Get GPE bit assignment (EC events). */ + /* TODO: Add support for _GPE returning a package */ + status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); + if (ACPI_SUCCESS(status)) + ec->gpe = tmp; + /* + * Errors are non-fatal, allowing for ACPI Reduced Hardware + * platforms which use GpioInt instead of GPE. + */ - /* - * Errors are non-fatal, allowing for ACPI Reduced Hardware - * platforms which use GpioInt instead of GPE. - */ - } /* Use the global lock for all EC transactions? */ tmp = 0; acpi_evaluate_integer(handle, "_GLK", NULL, &tmp); @@ -1860,60 +1851,12 @@ static int ec_honor_dsdt_gpe(const struct dmi_system_id *id) return 0; } -/* - * Some DSDTs contain wrong GPE setting. - * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD - * https://bugzilla.kernel.org/show_bug.cgi?id=195651 - */ -static int ec_honor_ecdt_gpe(const struct dmi_system_id *id) -{ - pr_debug("Detected system needing ignore DSDT GPE setting.\n"); - EC_FLAGS_IGNORE_DSDT_GPE = 1; - return 0; -} - static const struct dmi_system_id ec_dmi_table[] __initconst = { { ec_correct_ecdt, "MSI MS-171F", { DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL}, { - ec_honor_ecdt_gpe, "ASUS FX502VD", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "FX502VD"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUS FX502VE", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "FX502VE"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUS GL702VMK", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BA", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X505BA"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BP", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X505BP"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BA", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X542BA"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BP", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X542BP"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUS X550VXK", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUS X580VD", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL}, - { /* https://bugzilla.kernel.org/show_bug.cgi?id=209989 */ ec_honor_dsdt_gpe, "HP Pavilion Gaming Laptop 15-cx0xxx", { DMI_MATCH(DMI_SYS_VENDOR, "HP"), @@ -2180,13 +2123,6 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = { DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"), }, }, - { - .ident = "ThinkPad X1 Carbon 6th", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"), - }, - }, { .ident = "ThinkPad X1 Yoga 3rd", .matches = { diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c index cb73a5d6ea76..b8e4cae390b0 100644 --- a/drivers/acpi/numa/hmat.c +++ b/drivers/acpi/numa/hmat.c @@ -562,17 +562,26 @@ static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b) { struct memory_initiator *ia; struct memory_initiator *ib; - unsigned long *p_nodes = priv; ia = list_entry(a, struct memory_initiator, node); ib = list_entry(b, struct memory_initiator, node); - set_bit(ia->processor_pxm, p_nodes); - set_bit(ib->processor_pxm, p_nodes); - return ia->processor_pxm - ib->processor_pxm; } +static int initiators_to_nodemask(unsigned long *p_nodes) +{ + struct memory_initiator *initiator; + + if (list_empty(&initiators)) + return -ENXIO; + + list_for_each_entry(initiator, &initiators, node) + set_bit(initiator->processor_pxm, p_nodes); + + return 0; +} + static void hmat_register_target_initiators(struct memory_target *target) { static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); @@ -609,7 +618,10 @@ static void hmat_register_target_initiators(struct memory_target *target) * initiators. */ bitmap_zero(p_nodes, MAX_NUMNODES); - list_sort(p_nodes, &initiators, initiator_cmp); + list_sort(NULL, &initiators, initiator_cmp); + if (initiators_to_nodemask(p_nodes) < 0) + return; + if (!access0done) { for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { loc = localities_types[i]; @@ -643,8 +655,9 @@ static void hmat_register_target_initiators(struct memory_target *target) /* Access 1 ignores Generic Initiators */ bitmap_zero(p_nodes, MAX_NUMNODES); - list_sort(p_nodes, &initiators, initiator_cmp); - best = 0; + if (initiators_to_nodemask(p_nodes) < 0) + return; + for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { loc = localities_types[i]; if (!loc) diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index d9fdfeb25198..a495fb351686 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c @@ -41,6 +41,8 @@ struct mcfg_fixup { static struct mcfg_fixup mcfg_quirks[] = { /* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */ +#ifdef CONFIG_ARM64 + #define AL_ECAM(table_id, rev, seg, ops) \ { "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops } @@ -167,6 +169,7 @@ static struct mcfg_fixup mcfg_quirks[] = { ALTRA_ECAM_QUIRK(1, 13), ALTRA_ECAM_QUIRK(1, 14), ALTRA_ECAM_QUIRK(1, 15), +#endif /* ARM64 */ }; static char mcfg_oem_id[ACPI_OEM_ID_SIZE]; diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 8377c3ed10ff..e5dd87ddc6b3 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -609,7 +609,7 @@ static DEFINE_RAW_SPINLOCK(c3_lock); * @cx: Target state context * @index: index of target state */ -static int acpi_idle_enter_bm(struct cpuidle_driver *drv, +static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv, struct acpi_processor *pr, struct acpi_processor_cx *cx, int index) @@ -666,7 +666,7 @@ static int acpi_idle_enter_bm(struct cpuidle_driver *drv, return index; } -static int acpi_idle_enter(struct cpuidle_device *dev, +static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); @@ -695,7 +695,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev, return index; } -static int acpi_idle_enter_s2idle(struct cpuidle_device *dev, +static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); @@ -1080,6 +1080,11 @@ static int flatten_lpi_states(struct acpi_processor *pr, return 0; } +int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) +{ + return -EOPNOTSUPP; +} + static int acpi_processor_get_lpi_info(struct acpi_processor *pr) { int ret, i; @@ -1088,6 +1093,11 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) struct acpi_device *d = NULL; struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; + /* make sure our architecture has support */ + ret = acpi_processor_ffh_lpi_probe(pr->id); + if (ret == -EOPNOTSUPP) + return ret; + if (!osc_pc_lpi_support_confirmed) return -EOPNOTSUPP; @@ -1139,11 +1149,6 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) return 0; } -int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) -{ - return -ENODEV; -} - int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) { return -ENODEV; diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index 6c7d05b37c98..7df0c6e3ba63 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c @@ -148,7 +148,7 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy) unsigned int cpu; for_each_cpu(cpu, policy->related_cpus) { - struct acpi_processor *pr = per_cpu(processors, policy->cpu); + struct acpi_processor *pr = per_cpu(processors, cpu); if (pr) freq_qos_remove_request(&pr->thermal_req); diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 35ecb57514ca..bcfc090ea892 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -155,10 +155,10 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope, return acpi_nondev_subnode_data_ok(handle, link, list, parent); } -static int acpi_add_nondev_subnodes(acpi_handle scope, - const union acpi_object *links, - struct list_head *list, - struct fwnode_handle *parent) +static bool acpi_add_nondev_subnodes(acpi_handle scope, + const union acpi_object *links, + struct list_head *list, + struct fwnode_handle *parent) { bool ret = false; int i; @@ -433,6 +433,16 @@ void acpi_init_properties(struct acpi_device *adev) acpi_extract_apple_properties(adev); } +static void acpi_free_device_properties(struct list_head *list) +{ + struct acpi_device_properties *props, *tmp; + + list_for_each_entry_safe(props, tmp, list, list) { + list_del(&props->list); + kfree(props); + } +} + static void acpi_destroy_nondev_subnodes(struct list_head *list) { struct acpi_data_node *dn, *next; @@ -445,22 +455,18 @@ static void acpi_destroy_nondev_subnodes(struct list_head *list) wait_for_completion(&dn->kobj_done); list_del(&dn->sibling); ACPI_FREE((void *)dn->data.pointer); + acpi_free_device_properties(&dn->data.properties); kfree(dn); } } void acpi_free_properties(struct acpi_device *adev) { - struct acpi_device_properties *props, *tmp; - acpi_destroy_nondev_subnodes(&adev->data.subnodes); ACPI_FREE((void *)adev->data.pointer); adev->data.of_compatible = NULL; adev->data.pointer = NULL; - list_for_each_entry_safe(props, tmp, &adev->data.properties, list) { - list_del(&props->list); - kfree(props); - } + acpi_free_device_properties(&adev->data.properties); } /** diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 503935b1deeb..097a5b5f46ab 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -364,6 +364,14 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "80E3"), }, }, + { + .callback = init_nvs_save_s3, + .ident = "Lenovo G40-45", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "80E1"), + }, + }, /* * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using * the Low Power S0 Idle firmware interface (see @@ -377,6 +385,18 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"), }, }, + /* + * ASUS B1400CEAE hangs on resume from suspend (see + * https://bugzilla.kernel.org/show_bug.cgi?id=215742). + */ + { + .callback = init_default_s3, + .ident = "ASUS B1400CEAE", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"), + }, + }, {}, }; diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index a5cc4f3bb1e3..1d94c4625f36 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -439,18 +439,29 @@ static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj, { struct acpi_data_attr *data_attr; void __iomem *base; - ssize_t rc; + ssize_t size; data_attr = container_of(bin_attr, struct acpi_data_attr, attr); + size = data_attr->attr.size; - base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size); + if (offset < 0) + return -EINVAL; + + if (offset >= size) + return 0; + + if (count > size - offset) + count = size - offset; + + base = acpi_os_map_iomem(data_attr->addr, size); if (!base) return -ENOMEM; - rc = memory_read_from_buffer(buf, count, &offset, base, - data_attr->attr.size); - acpi_os_unmap_memory(base, data_attr->attr.size); - return rc; + memcpy_fromio(buf, base + offset, count); + + acpi_os_unmap_iomem(base, size); + + return count; } static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr) diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 7b9793cb55c5..b13713199ad9 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -424,23 +424,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = { .callback = video_detect_force_native, .ident = "Clevo NL5xRU", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), - DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), - }, - }, - { - .callback = video_detect_force_native, - .ident = "Clevo NL5xRU", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), - DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), - }, - }, - { - .callback = video_detect_force_native, - .ident = "Clevo NL5xRU", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), }, }, @@ -463,28 +446,124 @@ static const struct dmi_system_id video_detect_dmi_table[] = { { .callback = video_detect_force_native, .ident = "Clevo NL5xNU", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + }, + }, + /* + * The TongFang PF5PU1G, PF4NU1F, PF5NU1G, and PF5LUXG/TUXEDO BA15 Gen10, + * Pulse 14/15 Gen1, and Pulse 15 Gen2 have the same problem as the Clevo + * NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description + * above. + */ + { + .callback = video_detect_force_native, + .ident = "TongFang PF5PU1G", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PF5PU1G"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang PF4NU1F", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PF4NU1F"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang PF4NU1F", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), - DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + DMI_MATCH(DMI_BOARD_NAME, "PULSE1401"), }, }, { .callback = video_detect_force_native, - .ident = "Clevo NL5xNU", + .ident = "TongFang PF5NU1G", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), - DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + DMI_MATCH(DMI_BOARD_NAME, "PF5NU1G"), }, }, { .callback = video_detect_force_native, - .ident = "Clevo NL5xNU", + .ident = "TongFang PF5NU1G", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), - DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "PULSE1501"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang PF5LUXG", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"), + }, + }, + /* + * More Tongfang devices with the same issue as the Clevo NL5xRU and + * NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description above. + */ + { + .callback = video_detect_force_native, + .ident = "TongFang GKxNRxx", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GKxNRxx"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang GKxNRxx", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A1650TI"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang GKxNRxx", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A2060"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang GKxNRxx", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A1650TI"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang GKxNRxx", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A2060"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang GMxNGxx", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxNGxx"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang GMxZGxx", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxZGxx"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "TongFang GMxRGxx", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"), }, }, - /* * Desktops which falsely report a backlight and which our heuristics * for this do not catch. diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 052a1e590e9f..16e0ba315abf 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -173,8 +173,32 @@ static inline void binder_stats_created(enum binder_stat_types type) atomic_inc(&binder_stats.obj_created[type]); } -struct binder_transaction_log binder_transaction_log; -struct binder_transaction_log binder_transaction_log_failed; +struct binder_transaction_log_entry { + int debug_id; + int debug_id_done; + int call_type; + int from_proc; + int from_thread; + int target_handle; + int to_proc; + int to_thread; + int to_node; + int data_size; + int offsets_size; + int return_error_line; + uint32_t return_error; + uint32_t return_error_param; + char context_name[BINDERFS_MAX_NAME + 1]; +}; + +struct binder_transaction_log { + atomic_t cur; + bool full; + struct binder_transaction_log_entry entry[32]; +}; + +static struct binder_transaction_log binder_transaction_log; +static struct binder_transaction_log binder_transaction_log_failed; static struct binder_transaction_log_entry *binder_transaction_log_add( struct binder_transaction_log *log) @@ -1480,6 +1504,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc, } ret = binder_inc_ref_olocked(ref, strong, target_list); *rdata = ref->data; + if (ret && ref == new_ref) { + /* + * Cleanup the failed reference here as the target + * could now be dead and have already released its + * references by now. Calling on the new reference + * with strong=0 and a tmp_refs will not decrement + * the node. The new_ref gets kfree'd below. + */ + binder_cleanup_ref_olocked(new_ref); + ref = NULL; + } + binder_proc_unlock(proc); if (new_ref && ref != new_ref) /* @@ -1738,15 +1774,21 @@ static void binder_cleanup_transaction(struct binder_transaction *t, /** * binder_get_object() - gets object and checks for valid metadata * @proc: binder_proc owning the buffer + * @u: sender's user pointer to base of buffer * @buffer: binder_buffer that we're parsing. * @offset: offset in the @buffer at which to validate an object. * @object: struct binder_object to read into * - * Return: If there's a valid metadata object at @offset in @buffer, the + * Copy the binder object at the given offset into @object. If @u is + * provided then the copy is from the sender's buffer. If not, then + * it is copied from the target's @buffer. + * + * Return: If there's a valid metadata object at @offset, the * size of that object. Otherwise, it returns zero. The object * is read into the struct binder_object pointed to by @object. */ static size_t binder_get_object(struct binder_proc *proc, + const void __user *u, struct binder_buffer *buffer, unsigned long offset, struct binder_object *object) @@ -1756,10 +1798,16 @@ static size_t binder_get_object(struct binder_proc *proc, size_t object_size = 0; read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); - if (offset > buffer->data_size || read_size < sizeof(*hdr) || - binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, - offset, read_size)) + if (offset > buffer->data_size || read_size < sizeof(*hdr)) return 0; + if (u) { + if (copy_from_user(object, u + offset, read_size)) + return 0; + } else { + if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, + offset, read_size)) + return 0; + } /* Ok, now see if we read a complete object. */ hdr = &object->hdr; @@ -1832,7 +1880,7 @@ static struct binder_buffer_object *binder_validate_ptr( b, buffer_offset, sizeof(object_offset))) return NULL; - object_size = binder_get_object(proc, b, object_offset, object); + object_size = binder_get_object(proc, NULL, b, object_offset, object); if (!object_size || object->hdr.type != BINDER_TYPE_PTR) return NULL; if (object_offsetp) @@ -1897,7 +1945,8 @@ static bool binder_validate_fixup(struct binder_proc *proc, unsigned long buffer_offset; struct binder_object last_object; struct binder_buffer_object *last_bbo; - size_t object_size = binder_get_object(proc, b, last_obj_offset, + size_t object_size = binder_get_object(proc, NULL, b, + last_obj_offset, &last_object); if (object_size != sizeof(*last_bbo)) return false; @@ -1972,7 +2021,7 @@ static void binder_deferred_fd_close(int fd) if (!twcb) return; init_task_work(&twcb->twork, binder_do_fd_close); - __close_fd_get_file(fd, &twcb->file); + close_fd_get_file(fd, &twcb->file); if (twcb->file) { filp_close(twcb->file, current->files); task_work_add(current, &twcb->twork, TWA_RESUME); @@ -2012,7 +2061,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, buffer, buffer_offset, sizeof(object_offset))) - object_size = binder_get_object(proc, buffer, + object_size = binder_get_object(proc, NULL, buffer, object_offset, &object); if (object_size == 0) { pr_err("transaction release %d bad object at offset %lld, size %zd\n", @@ -2353,16 +2402,266 @@ err_fd_not_accepted: return ret; } -static int binder_translate_fd_array(struct binder_fd_array_object *fda, +/** + * struct binder_ptr_fixup - data to be fixed-up in target buffer + * @offset offset in target buffer to fixup + * @skip_size bytes to skip in copy (fixup will be written later) + * @fixup_data data to write at fixup offset + * @node list node + * + * This is used for the pointer fixup list (pf) which is created and consumed + * during binder_transaction() and is only accessed locally. No + * locking is necessary. + * + * The list is ordered by @offset. + */ +struct binder_ptr_fixup { + binder_size_t offset; + size_t skip_size; + binder_uintptr_t fixup_data; + struct list_head node; +}; + +/** + * struct binder_sg_copy - scatter-gather data to be copied + * @offset offset in target buffer + * @sender_uaddr user address in source buffer + * @length bytes to copy + * @node list node + * + * This is used for the sg copy list (sgc) which is created and consumed + * during binder_transaction() and is only accessed locally. No + * locking is necessary. + * + * The list is ordered by @offset. + */ +struct binder_sg_copy { + binder_size_t offset; + const void __user *sender_uaddr; + size_t length; + struct list_head node; +}; + +/** + * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data + * @alloc: binder_alloc associated with @buffer + * @buffer: binder buffer in target process + * @sgc_head: list_head of scatter-gather copy list + * @pf_head: list_head of pointer fixup list + * + * Processes all elements of @sgc_head, applying fixups from @pf_head + * and copying the scatter-gather data from the source process' user + * buffer to the target's buffer. It is expected that the list creation + * and processing all occurs during binder_transaction() so these lists + * are only accessed in local context. + * + * Return: 0=success, else -errno + */ +static int binder_do_deferred_txn_copies(struct binder_alloc *alloc, + struct binder_buffer *buffer, + struct list_head *sgc_head, + struct list_head *pf_head) +{ + int ret = 0; + struct binder_sg_copy *sgc, *tmpsgc; + struct binder_ptr_fixup *tmppf; + struct binder_ptr_fixup *pf = + list_first_entry_or_null(pf_head, struct binder_ptr_fixup, + node); + + list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { + size_t bytes_copied = 0; + + while (bytes_copied < sgc->length) { + size_t copy_size; + size_t bytes_left = sgc->length - bytes_copied; + size_t offset = sgc->offset + bytes_copied; + + /* + * We copy up to the fixup (pointed to by pf) + */ + copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset) + : bytes_left; + if (!ret && copy_size) + ret = binder_alloc_copy_user_to_buffer( + alloc, buffer, + offset, + sgc->sender_uaddr + bytes_copied, + copy_size); + bytes_copied += copy_size; + if (copy_size != bytes_left) { + BUG_ON(!pf); + /* we stopped at a fixup offset */ + if (pf->skip_size) { + /* + * we are just skipping. This is for + * BINDER_TYPE_FDA where the translated + * fds will be fixed up when we get + * to target context. + */ + bytes_copied += pf->skip_size; + } else { + /* apply the fixup indicated by pf */ + if (!ret) + ret = binder_alloc_copy_to_buffer( + alloc, buffer, + pf->offset, + &pf->fixup_data, + sizeof(pf->fixup_data)); + bytes_copied += sizeof(pf->fixup_data); + } + list_del(&pf->node); + kfree(pf); + pf = list_first_entry_or_null(pf_head, + struct binder_ptr_fixup, node); + } + } + list_del(&sgc->node); + kfree(sgc); + } + list_for_each_entry_safe(pf, tmppf, pf_head, node) { + BUG_ON(pf->skip_size == 0); + list_del(&pf->node); + kfree(pf); + } + BUG_ON(!list_empty(sgc_head)); + + return ret > 0 ? -EINVAL : ret; +} + +/** + * binder_cleanup_deferred_txn_lists() - free specified lists + * @sgc_head: list_head of scatter-gather copy list + * @pf_head: list_head of pointer fixup list + * + * Called to clean up @sgc_head and @pf_head if there is an + * error. + */ +static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head, + struct list_head *pf_head) +{ + struct binder_sg_copy *sgc, *tmpsgc; + struct binder_ptr_fixup *pf, *tmppf; + + list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { + list_del(&sgc->node); + kfree(sgc); + } + list_for_each_entry_safe(pf, tmppf, pf_head, node) { + list_del(&pf->node); + kfree(pf); + } +} + +/** + * binder_defer_copy() - queue a scatter-gather buffer for copy + * @sgc_head: list_head of scatter-gather copy list + * @offset: binder buffer offset in target process + * @sender_uaddr: user address in source process + * @length: bytes to copy + * + * Specify a scatter-gather block to be copied. The actual copy must + * be deferred until all the needed fixups are identified and queued. + * Then the copy and fixups are done together so un-translated values + * from the source are never visible in the target buffer. + * + * We are guaranteed that repeated calls to this function will have + * monotonically increasing @offset values so the list will naturally + * be ordered. + * + * Return: 0=success, else -errno + */ +static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset, + const void __user *sender_uaddr, size_t length) +{ + struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL); + + if (!bc) + return -ENOMEM; + + bc->offset = offset; + bc->sender_uaddr = sender_uaddr; + bc->length = length; + INIT_LIST_HEAD(&bc->node); + + /* + * We are guaranteed that the deferred copies are in-order + * so just add to the tail. + */ + list_add_tail(&bc->node, sgc_head); + + return 0; +} + +/** + * binder_add_fixup() - queue a fixup to be applied to sg copy + * @pf_head: list_head of binder ptr fixup list + * @offset: binder buffer offset in target process + * @fixup: bytes to be copied for fixup + * @skip_size: bytes to skip when copying (fixup will be applied later) + * + * Add the specified fixup to a list ordered by @offset. When copying + * the scatter-gather buffers, the fixup will be copied instead of + * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup + * will be applied later (in target process context), so we just skip + * the bytes specified by @skip_size. If @skip_size is 0, we copy the + * value in @fixup. + * + * This function is called *mostly* in @offset order, but there are + * exceptions. Since out-of-order inserts are relatively uncommon, + * we insert the new element by searching backward from the tail of + * the list. + * + * Return: 0=success, else -errno + */ +static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset, + binder_uintptr_t fixup, size_t skip_size) +{ + struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL); + struct binder_ptr_fixup *tmppf; + + if (!pf) + return -ENOMEM; + + pf->offset = offset; + pf->fixup_data = fixup; + pf->skip_size = skip_size; + INIT_LIST_HEAD(&pf->node); + + /* Fixups are *mostly* added in-order, but there are some + * exceptions. Look backwards through list for insertion point. + */ + list_for_each_entry_reverse(tmppf, pf_head, node) { + if (tmppf->offset < pf->offset) { + list_add(&pf->node, &tmppf->node); + return 0; + } + } + /* + * if we get here, then the new offset is the lowest so + * insert at the head + */ + list_add(&pf->node, pf_head); + return 0; +} + +static int binder_translate_fd_array(struct list_head *pf_head, + struct binder_fd_array_object *fda, + const void __user *sender_ubuffer, struct binder_buffer_object *parent, + struct binder_buffer_object *sender_uparent, struct binder_transaction *t, struct binder_thread *thread, struct binder_transaction *in_reply_to) { binder_size_t fdi, fd_buf_size; binder_size_t fda_offset; + const void __user *sender_ufda_base; struct binder_proc *proc = thread->proc; - struct binder_proc *target_proc = t->to_proc; + int ret; + + if (fda->num_fds == 0) + return 0; fd_buf_size = sizeof(u32) * fda->num_fds; if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { @@ -2386,19 +2685,25 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, */ fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + fda->parent_offset; - if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) { + sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + + fda->parent_offset; + + if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) || + !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) { binder_user_error("%d:%d parent offset not aligned correctly.\n", proc->pid, thread->pid); return -EINVAL; } + ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32)); + if (ret) + return ret; + for (fdi = 0; fdi < fda->num_fds; fdi++) { u32 fd; - int ret; binder_size_t offset = fda_offset + fdi * sizeof(fd); + binder_size_t sender_uoffset = fdi * sizeof(fd); - ret = binder_alloc_copy_from_buffer(&target_proc->alloc, - &fd, t->buffer, - offset, sizeof(fd)); + ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd)); if (!ret) ret = binder_translate_fd(fd, offset, t, thread, in_reply_to); @@ -2408,7 +2713,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, return 0; } -static int binder_fixup_parent(struct binder_transaction *t, +static int binder_fixup_parent(struct list_head *pf_head, + struct binder_transaction *t, struct binder_thread *thread, struct binder_buffer_object *bp, binder_size_t off_start_offset, @@ -2454,14 +2760,57 @@ static int binder_fixup_parent(struct binder_transaction *t, } buffer_offset = bp->parent_offset + (uintptr_t)parent->buffer - (uintptr_t)b->user_data; - if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, - &bp->buffer, sizeof(bp->buffer))) { - binder_user_error("%d:%d got transaction with invalid parent offset\n", - proc->pid, thread->pid); - return -EINVAL; - } + return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); +} - return 0; +/** + * binder_can_update_transaction() - Can a txn be superseded by an updated one? + * @t1: the pending async txn in the frozen process + * @t2: the new async txn to supersede the outdated pending one + * + * Return: true if t2 can supersede t1 + * false if t2 can not supersede t1 + */ +static bool binder_can_update_transaction(struct binder_transaction *t1, + struct binder_transaction *t2) +{ + if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) != + (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc) + return false; + if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code && + t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid && + t1->buffer->target_node->ptr == t2->buffer->target_node->ptr && + t1->buffer->target_node->cookie == t2->buffer->target_node->cookie) + return true; + return false; +} + +/** + * binder_find_outdated_transaction_ilocked() - Find the outdated transaction + * @t: new async transaction + * @target_list: list to find outdated transaction + * + * Return: the outdated transaction if found + * NULL if no outdated transacton can be found + * + * Requires the proc->inner_lock to be held. + */ +static struct binder_transaction * +binder_find_outdated_transaction_ilocked(struct binder_transaction *t, + struct list_head *target_list) +{ + struct binder_work *w; + + list_for_each_entry(w, target_list, entry) { + struct binder_transaction *t_queued; + + if (w->type != BINDER_WORK_TRANSACTION) + continue; + t_queued = container_of(w, struct binder_transaction, work); + if (binder_can_update_transaction(t_queued, t)) + return t_queued; + } + return NULL; } /** @@ -2490,7 +2839,7 @@ static int binder_proc_transaction(struct binder_transaction *t, struct binder_priority node_prio; bool oneway = !!(t->flags & TF_ONE_WAY); bool pending_async = false; - bool skip = false; + struct binder_transaction *t_outdated = NULL; BUG_ON(!node); binder_node_lock(node); @@ -2518,10 +2867,7 @@ static int binder_proc_transaction(struct binder_transaction *t, return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY; } - trace_android_vh_binder_proc_transaction_entry(proc, t, - &thread, node->debug_id, pending_async, !oneway, &skip); - - if (!thread && !pending_async && !skip) + if (!thread && !pending_async) thread = binder_select_thread_ilocked(proc); trace_android_vh_binder_proc_transaction(current, proc->tsk, @@ -2534,6 +2880,17 @@ static int binder_proc_transaction(struct binder_transaction *t, } else if (!pending_async) { binder_enqueue_work_ilocked(&t->work, &proc->todo); } else { + if ((t->flags & TF_UPDATE_TXN) && proc->is_frozen) { + t_outdated = binder_find_outdated_transaction_ilocked(t, + &node->async_todo); + if (t_outdated) { + binder_debug(BINDER_DEBUG_TRANSACTION, + "txn %d supersedes %d\n", + t->debug_id, t_outdated->debug_id); + list_del_init(&t_outdated->work.entry); + proc->outstanding_txns--; + } + } binder_enqueue_work_ilocked(&t->work, &node->async_todo); } @@ -2547,6 +2904,22 @@ static int binder_proc_transaction(struct binder_transaction *t, binder_inner_proc_unlock(proc); binder_node_unlock(node); + /* + * To reduce potential contention, free the outdated transaction and + * buffer after releasing the locks. + */ + if (t_outdated) { + struct binder_buffer *buffer = t_outdated->buffer; + + t_outdated->buffer = NULL; + buffer->transaction = NULL; + trace_binder_transaction_update_buffer_release(buffer); + binder_transaction_buffer_release(proc, NULL, buffer, 0, 0); + binder_alloc_free_buf(&proc->alloc, buffer); + kfree(t_outdated); + binder_stats_deleted(BINDER_STAT_TRANSACTION); + } + return 0; } @@ -2605,6 +2978,7 @@ static void binder_transaction(struct binder_proc *proc, binder_size_t off_start_offset, off_end_offset; binder_size_t off_min; binder_size_t sg_buf_offset, sg_buf_end_offset; + binder_size_t user_offset = 0; struct binder_proc *target_proc = NULL; struct binder_thread *target_thread = NULL; struct binder_node *target_node = NULL; @@ -2619,6 +2993,12 @@ static void binder_transaction(struct binder_proc *proc, int t_debug_id = atomic_inc_return(&binder_last_id); char *secctx = NULL; u32 secctx_sz = 0; + struct list_head sgc_head; + struct list_head pf_head; + const void __user *user_buffer = (const void __user *) + (uintptr_t)tr->data.ptr.buffer; + INIT_LIST_HEAD(&sgc_head); + INIT_LIST_HEAD(&pf_head); e = binder_transaction_log_add(&binder_transaction_log); e->debug_id = t_debug_id; @@ -2954,19 +3334,6 @@ static void binder_transaction(struct binder_proc *proc, t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); trace_binder_transaction_alloc_buf(t->buffer); - if (binder_alloc_copy_user_to_buffer( - &target_proc->alloc, - t->buffer, 0, - (const void __user *) - (uintptr_t)tr->data.ptr.buffer, - tr->data_size)) { - binder_user_error("%d:%d got transaction with invalid data ptr\n", - proc->pid, thread->pid); - return_error = BR_FAILED_REPLY; - return_error_param = -EFAULT; - return_error_line = __LINE__; - goto err_copy_data_failed; - } if (binder_alloc_copy_user_to_buffer( &target_proc->alloc, t->buffer, @@ -3011,6 +3378,7 @@ static void binder_transaction(struct binder_proc *proc, size_t object_size; struct binder_object object; binder_size_t object_offset; + binder_size_t copy_size; if (binder_alloc_copy_from_buffer(&target_proc->alloc, &object_offset, @@ -3022,8 +3390,27 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_offset; } - object_size = binder_get_object(target_proc, t->buffer, - object_offset, &object); + + /* + * Copy the source user buffer up to the next object + * that will be processed. + */ + copy_size = object_offset - user_offset; + if (copy_size && (user_offset > object_offset || + binder_alloc_copy_user_to_buffer( + &target_proc->alloc, + t->buffer, user_offset, + user_buffer + user_offset, + copy_size))) { + binder_user_error("%d:%d got transaction with invalid data ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EFAULT; + return_error_line = __LINE__; + goto err_copy_data_failed; + } + object_size = binder_get_object(target_proc, user_buffer, + t->buffer, object_offset, &object); if (object_size == 0 || object_offset < off_min) { binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", proc->pid, thread->pid, @@ -3035,6 +3422,11 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_offset; } + /* + * Set offset to the next buffer fragment to be + * copied + */ + user_offset = object_offset + object_size; hdr = &object.hdr; off_min = object_offset + object_size; @@ -3097,6 +3489,8 @@ static void binder_transaction(struct binder_proc *proc, case BINDER_TYPE_FDA: { struct binder_object ptr_object; binder_size_t parent_offset; + struct binder_object user_object; + size_t user_parent_size; struct binder_fd_array_object *fda = to_binder_fd_array_object(hdr); size_t num_valid = (buffer_offset - off_start_offset) / @@ -3128,11 +3522,35 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_parent; } - ret = binder_translate_fd_array(fda, parent, t, thread, - in_reply_to); - if (ret < 0) { + /* + * We need to read the user version of the parent + * object to get the original user offset + */ + user_parent_size = + binder_get_object(proc, user_buffer, t->buffer, + parent_offset, &user_object); + if (user_parent_size != sizeof(user_object.bbo)) { + binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n", + proc->pid, thread->pid, + user_parent_size, + sizeof(user_object.bbo)); return_error = BR_FAILED_REPLY; - return_error_param = ret; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_bad_parent; + } + ret = binder_translate_fd_array(&pf_head, fda, + user_buffer, parent, + &user_object.bbo, t, + thread, in_reply_to); + if (!ret) + ret = binder_alloc_copy_to_buffer(&target_proc->alloc, + t->buffer, + object_offset, + fda, sizeof(*fda)); + if (ret) { + return_error = BR_FAILED_REPLY; + return_error_param = ret > 0 ? -EINVAL : ret; return_error_line = __LINE__; goto err_translate_failed; } @@ -3154,19 +3572,14 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_offset; } - if (binder_alloc_copy_user_to_buffer( - &target_proc->alloc, - t->buffer, - sg_buf_offset, - (const void __user *) - (uintptr_t)bp->buffer, - bp->length)) { - binder_user_error("%d:%d got transaction with invalid offsets ptr\n", - proc->pid, thread->pid); - return_error_param = -EFAULT; + ret = binder_defer_copy(&sgc_head, sg_buf_offset, + (const void __user *)(uintptr_t)bp->buffer, + bp->length); + if (ret) { return_error = BR_FAILED_REPLY; + return_error_param = ret; return_error_line = __LINE__; - goto err_copy_data_failed; + goto err_translate_failed; } /* Fixup buffer pointer to target proc address space */ bp->buffer = (uintptr_t) @@ -3175,7 +3588,8 @@ static void binder_transaction(struct binder_proc *proc, num_valid = (buffer_offset - off_start_offset) / sizeof(binder_size_t); - ret = binder_fixup_parent(t, thread, bp, + ret = binder_fixup_parent(&pf_head, t, + thread, bp, off_start_offset, num_valid, last_fixup_obj_off, @@ -3202,6 +3616,30 @@ static void binder_transaction(struct binder_proc *proc, goto err_bad_object_type; } } + /* Done processing objects, copy the rest of the buffer */ + if (binder_alloc_copy_user_to_buffer( + &target_proc->alloc, + t->buffer, user_offset, + user_buffer + user_offset, + tr->data_size - user_offset)) { + binder_user_error("%d:%d got transaction with invalid data ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EFAULT; + return_error_line = __LINE__; + goto err_copy_data_failed; + } + + ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer, + &sgc_head, &pf_head); + if (ret) { + binder_user_error("%d:%d got transaction with invalid offsets ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; + goto err_copy_data_failed; + } if (t->buffer->oneway_spam_suspect) tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT; else @@ -3277,6 +3715,7 @@ err_bad_object_type: err_bad_offset: err_bad_parent: err_copy_data_failed: + binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head); binder_free_txn_fixups(t); trace_binder_transaction_failed_buffer_release(t->buffer); binder_transaction_buffer_release(target_proc, NULL, t->buffer, @@ -4036,10 +4475,6 @@ retry: size_t trsize = sizeof(*trd); binder_inner_proc_lock(proc); - trace_android_vh_binder_select_worklist_ilocked(&list, thread, - proc, wait_for_proc_work); - if (list) - goto skip; if (!binder_worklist_empty_ilocked(&thread->todo)) list = &thread->todo; else if (!binder_worklist_empty_ilocked(&proc->todo) && @@ -4053,7 +4488,7 @@ retry: goto retry; break; } -skip: + if (end - ptr < sizeof(tr) + 4) { binder_inner_proc_unlock(proc); break; @@ -5950,8 +6385,7 @@ static void print_binder_proc_stats(struct seq_file *m, print_binder_stats(m, " ", &proc->stats); } - -int binder_state_show(struct seq_file *m, void *unused) +static int state_show(struct seq_file *m, void *unused) { struct binder_proc *proc; struct binder_node *node; @@ -5990,7 +6424,7 @@ int binder_state_show(struct seq_file *m, void *unused) return 0; } -int binder_stats_show(struct seq_file *m, void *unused) +static int stats_show(struct seq_file *m, void *unused) { struct binder_proc *proc; @@ -6006,7 +6440,7 @@ int binder_stats_show(struct seq_file *m, void *unused) return 0; } -int binder_transactions_show(struct seq_file *m, void *unused) +static int transactions_show(struct seq_file *m, void *unused) { struct binder_proc *proc; @@ -6062,7 +6496,7 @@ static void print_binder_transaction_log_entry(struct seq_file *m, "\n" : " (incomplete)\n"); } -int binder_transaction_log_show(struct seq_file *m, void *unused) +static int transaction_log_show(struct seq_file *m, void *unused) { struct binder_transaction_log *log = m->private; unsigned int log_cur = atomic_read(&log->cur); @@ -6094,6 +6528,45 @@ const struct file_operations binder_fops = { .release = binder_release, }; +DEFINE_SHOW_ATTRIBUTE(state); +DEFINE_SHOW_ATTRIBUTE(stats); +DEFINE_SHOW_ATTRIBUTE(transactions); +DEFINE_SHOW_ATTRIBUTE(transaction_log); + +const struct binder_debugfs_entry binder_debugfs_entries[] = { + { + .name = "state", + .mode = 0444, + .fops = &state_fops, + .data = NULL, + }, + { + .name = "stats", + .mode = 0444, + .fops = &stats_fops, + .data = NULL, + }, + { + .name = "transactions", + .mode = 0444, + .fops = &transactions_fops, + .data = NULL, + }, + { + .name = "transaction_log", + .mode = 0444, + .fops = &transaction_log_fops, + .data = &binder_transaction_log, + }, + { + .name = "failed_transaction_log", + .mode = 0444, + .fops = &transaction_log_fops, + .data = &binder_transaction_log_failed, + }, + {} /* terminator */ +}; + static int __init init_binder_device(const char *name) { int ret; @@ -6139,36 +6612,18 @@ static int __init binder_init(void) atomic_set(&binder_transaction_log_failed.cur, ~0U); binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); - if (binder_debugfs_dir_entry_root) + if (binder_debugfs_dir_entry_root) { + const struct binder_debugfs_entry *db_entry; + + binder_for_each_debugfs_entry(db_entry) + debugfs_create_file(db_entry->name, + db_entry->mode, + binder_debugfs_dir_entry_root, + db_entry->data, + db_entry->fops); + binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", binder_debugfs_dir_entry_root); - - if (binder_debugfs_dir_entry_root) { - debugfs_create_file("state", - 0444, - binder_debugfs_dir_entry_root, - NULL, - &binder_state_fops); - debugfs_create_file("stats", - 0444, - binder_debugfs_dir_entry_root, - NULL, - &binder_stats_fops); - debugfs_create_file("transactions", - 0444, - binder_debugfs_dir_entry_root, - NULL, - &binder_transactions_fops); - debugfs_create_file("transaction_log", - 0444, - binder_debugfs_dir_entry_root, - &binder_transaction_log, - &binder_transaction_log_fops); - debugfs_create_file("failed_transaction_log", - 0444, - binder_debugfs_dir_entry_root, - &binder_transaction_log_failed, - &binder_transaction_log_fops); } if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) && diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index d30267e08536..447342a878ff 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -213,7 +213,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, mm = alloc->vma_vm_mm; if (mm) { - mmap_read_lock(mm); + mmap_write_lock(mm); vma = alloc->vma; } @@ -271,7 +271,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, trace_binder_alloc_page_end(alloc, index); } if (mm) { - mmap_read_unlock(mm); + mmap_write_unlock(mm); mmput(mm); } return 0; @@ -304,7 +304,7 @@ err_page_ptr_cleared: } err_no_vma: if (mm) { - mmap_read_unlock(mm); + mmap_write_unlock(mm); mmput(mm); } return vma ? -ENOMEM : -ESRCH; diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index 93f02395b0d7..00010c2e3e3c 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -107,41 +107,19 @@ static inline int __init init_binderfs(void) } #endif -int binder_stats_show(struct seq_file *m, void *unused); -DEFINE_SHOW_ATTRIBUTE(binder_stats); - -int binder_state_show(struct seq_file *m, void *unused); -DEFINE_SHOW_ATTRIBUTE(binder_state); - -int binder_transactions_show(struct seq_file *m, void *unused); -DEFINE_SHOW_ATTRIBUTE(binder_transactions); - -int binder_transaction_log_show(struct seq_file *m, void *unused); -DEFINE_SHOW_ATTRIBUTE(binder_transaction_log); - -struct binder_transaction_log_entry { - int debug_id; - int debug_id_done; - int call_type; - int from_proc; - int from_thread; - int target_handle; - int to_proc; - int to_thread; - int to_node; - int data_size; - int offsets_size; - int return_error_line; - uint32_t return_error; - uint32_t return_error_param; - char context_name[BINDERFS_MAX_NAME + 1]; +struct binder_debugfs_entry { + const char *name; + umode_t mode; + const struct file_operations *fops; + void *data; }; -struct binder_transaction_log { - atomic_t cur; - bool full; - struct binder_transaction_log_entry entry[32]; -}; +extern const struct binder_debugfs_entry binder_debugfs_entries[]; + +#define binder_for_each_debugfs_entry(entry) \ + for ((entry) = binder_debugfs_entries; \ + (entry)->name; \ + (entry)++) enum binder_stat_types { BINDER_STAT_PROC, @@ -620,6 +598,4 @@ struct binder_object { }; }; -extern struct binder_transaction_log binder_transaction_log; -extern struct binder_transaction_log binder_transaction_log_failed; #endif /* _LINUX_BINDER_INTERNAL_H */ diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h index a70e23716ad0..8c4a6c3774a1 100644 --- a/drivers/android/binder_trace.h +++ b/drivers/android/binder_trace.h @@ -306,6 +306,10 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release, TP_PROTO(struct binder_buffer *buffer), TP_ARGS(buffer)); +DEFINE_EVENT(binder_buffer_class, binder_transaction_update_buffer_release, + TP_PROTO(struct binder_buffer *buffer), + TP_ARGS(buffer)); + TRACE_EVENT(binder_update_page_range, TP_PROTO(struct binder_alloc *alloc, bool allocate, void __user *start, void __user *end), diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index 7b4f154f07e6..2e5fbb3adb09 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -584,6 +584,7 @@ out: static int init_binder_logs(struct super_block *sb) { struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir; + const struct binder_debugfs_entry *db_entry; struct binderfs_info *info; int ret = 0; @@ -594,43 +595,15 @@ static int init_binder_logs(struct super_block *sb) goto out; } - dentry = binderfs_create_file(binder_logs_root_dir, "stats", - &binder_stats_fops, NULL); - if (IS_ERR(dentry)) { - ret = PTR_ERR(dentry); - goto out; - } - - dentry = binderfs_create_file(binder_logs_root_dir, "state", - &binder_state_fops, NULL); - if (IS_ERR(dentry)) { - ret = PTR_ERR(dentry); - goto out; - } - - dentry = binderfs_create_file(binder_logs_root_dir, "transactions", - &binder_transactions_fops, NULL); - if (IS_ERR(dentry)) { - ret = PTR_ERR(dentry); - goto out; - } - - dentry = binderfs_create_file(binder_logs_root_dir, - "transaction_log", - &binder_transaction_log_fops, - &binder_transaction_log); - if (IS_ERR(dentry)) { - ret = PTR_ERR(dentry); - goto out; - } - - dentry = binderfs_create_file(binder_logs_root_dir, - "failed_transaction_log", - &binder_transaction_log_fops, - &binder_transaction_log_failed); - if (IS_ERR(dentry)) { - ret = PTR_ERR(dentry); - goto out; + binder_for_each_debugfs_entry(db_entry) { + dentry = binderfs_create_file(binder_logs_root_dir, + db_entry->name, + db_entry->fops, + db_entry->data); + if (IS_ERR(dentry)) { + ret = PTR_ERR(dentry); + goto out; + } } proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc"); diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c index a6c589b266da..18ef4406879f 100644 --- a/drivers/android/vendor_hooks.c +++ b/drivers/android/vendor_hooks.c @@ -6,6 +6,11 @@ * Copyright 2020 Google LLC */ +#ifndef __GENKSYMS__ +#include +#include +#endif + #define CREATE_TRACE_POINTS #include #include @@ -37,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -53,7 +59,6 @@ #include #include #include -#include #include #include #include @@ -71,6 +76,7 @@ #include #include #include +#include /* * Export tracepoints that act as a bare tracehook (ie: have no trace event @@ -106,6 +112,12 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_write_finished); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_rwsem_list_add); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_futex_plist_add); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_futex_sleep_start); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_futex); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_futex_wait_start); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_futex_wait_end); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_futex_wake_traverse_plist); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_futex_wake_this); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_futex_wake_up_q_finish); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_start); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_finish); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rtmutex_wait_start); @@ -199,6 +211,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_die_kernel_fault); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sea); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_mem_abort); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sp_pc_abort); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_setup_dma_ops); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_setup_dma_ops); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_setup_dma_ops); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_alloc_iova); @@ -243,7 +256,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_build_sched_domains); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_mutex_list_add); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_unlock_slowpath); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_unlock_slowpath_end); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_start_check_new_owner); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_wake_finish); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_undefinstr); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_ptrauth_fault); @@ -259,6 +271,10 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_commit_creds); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_creds); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_override_creds); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_revert_creds); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_mutex_lock_starttime); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_rtmutex_lock_starttime); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_rwsem_lock_starttime); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_pcpu_rwsem_starttime); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_memory_x); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_memory_nx); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_memory_ro); @@ -271,7 +287,11 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_mm); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_from_fragment_pool); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exclude_reserved_zone); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_include_reserved_zone); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_slowpath_begin); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_slowpath_end); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_mem); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_mapcount_pages); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_traversal_lruvec); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_typec_tcpci_override_toggling); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_typec_tcpci_chk_contaminant); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_typec_tcpci_get_vbus); @@ -286,10 +306,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_reply); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_trans); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_binder_transaction); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_preset); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_entry); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_end); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_select_worklist_ilocked); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_new_ref); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_del_ref); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_post_init_entity_util_avg); @@ -300,6 +318,11 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_logbuf_pr_cont); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_scan_type); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_swappiness); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_slab_bypass); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_failed_page_trylock); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_trylock_set); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_trylock_clear); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_trylock_get_result); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_page_trylock); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_referenced_check_bypass); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_drain_all_pages_bypass); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_drain_all_pages_bypass); @@ -320,7 +343,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_task_fair); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_stat_runtime_rt); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_prepare_update_load_avg_se); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_finish_update_load_avg_se); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_selinux_is_initialized); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_inactive_ratio); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_update_topology_flags_workfn); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_of_i2c_get_board_info); @@ -336,6 +358,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmc_attach_sd); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sdhci_get_cd); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmc_gpio_cd_irqt); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_save_vmalloc_stack); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_remove_vmalloc_stack); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_stack_hash); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_save_track_hash); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_vmpressure); @@ -389,7 +412,13 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_udp_recvmsg); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_tcp_recvmsg_stat); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_pci_d3_sleep); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kmalloc_slab); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_disable_thermal_cooling_stats); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmap_region); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_update_page_mapcount); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_add_page_to_lrulist); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_del_page_from_lrulist); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_should_be_protected); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mark_page_accessed); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_try_to_unmap_one); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_id_remove); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_css_offline); @@ -406,7 +435,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_read_done); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_tlb_conf); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_node_memcgs); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ra_tuning_max_page); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_memcg_scan_type); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_handle_pte_fault_end); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_pte_fault_end); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cow_user_page); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swapin_add_anon_rmap); @@ -417,10 +446,15 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_count_pswpin); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_count_pswpout); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_count_swpout_vm_event); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swap_slot_cache_active); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_drain_slots_cache_cpu); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_drain_slots_cache_cpu); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_alloc_swap_slot_cache); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_swap_slot_cache); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_free_swap_slot); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_swap_slot); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_get_swap_page); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_swap_page); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_cold_or_pageout); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_isolated_for_reclaim); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_inactive_is_low); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_snapshot_refaults); @@ -428,5 +462,28 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_account_swap_pages); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_unuse_swap_page); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_init_swap_info_struct); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_swapinfo); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_alloc_si); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_si); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_pages); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_shmem_page_flag); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmput); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_pelt_multiplier); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_reclaim_bypass); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_failure_bypass); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_page_look_around_ref); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_look_around); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_look_around_migrate_page); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_test_clear_look_around_ref); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dma_buf_stats_teardown); +/* + * For type visibility + */ +#ifdef CONFIG_ARM64 +#include +const struct gic_chip_data *GKI_struct_gic_chip_data; +EXPORT_SYMBOL_GPL(GKI_struct_gic_chip_data); +#endif + +#include +const struct swap_slots_cache *GKI_struct_swap_slots_cache; +EXPORT_SYMBOL_GPL(GKI_struct_swap_slots_cache); diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index d1f284f0c83d..1ce897356993 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -254,7 +254,7 @@ enum { PCS_7 = 0x94, /* 7+ port PCS (Denverton) */ /* em constants */ - EM_MAX_SLOTS = 8, + EM_MAX_SLOTS = SATA_PMP_MAX_PORTS, EM_MAX_RETRY = 5, /* em_ctl bits */ diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index 388baf528fa8..189f75d53741 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c @@ -1230,4 +1230,4 @@ module_platform_driver(imx_ahci_driver); MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver"); MODULE_AUTHOR("Richard Zhu "); MODULE_LICENSE("GPL"); -MODULE_ALIAS("ahci:imx"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 0910441321f7..64d6da0a5303 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -451,14 +451,24 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev, } } - hpriv->nports = child_nodes = of_get_child_count(dev->of_node); + /* + * Too many sub-nodes most likely means having something wrong with + * the firmware. + */ + child_nodes = of_get_child_count(dev->of_node); + if (child_nodes > AHCI_MAX_PORTS) { + rc = -EINVAL; + goto err_out; + } /* * If no sub-node was found, we still need to set nports to * one in order to be able to use the * ahci_platform_[en|dis]able_[phys|regulators] functions. */ - if (!child_nodes) + if (child_nodes) + hpriv->nports = child_nodes; + else hpriv->nports = 1; hpriv->phys = devm_kcalloc(dev, hpriv->nports, sizeof(*hpriv->phys), GFP_KERNEL); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index d2b544bdc7b5..d13474c6d181 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -3936,6 +3936,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, + /* These specific Pioneer models have LPM issues */ + { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM }, + { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM }, + /* Crucial BX100 SSD 500GB has broken LPM support */ { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM }, @@ -3974,6 +3978,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_NO_DMA_LOG | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | @@ -5472,7 +5479,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, const struct ata_port_info * const * ppi, int n_ports) { - const struct ata_port_info *pi; + const struct ata_port_info *pi = &ata_dummy_port_info; struct ata_host *host; int i, j; @@ -5480,7 +5487,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, if (!host) return NULL; - for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { + for (i = 0, j = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; if (ppi[j]) diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 018ed8736a64..973f4d34d7cd 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2131,6 +2131,7 @@ const char *ata_get_cmd_descript(u8 command) { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, + { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" }, { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" }, { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" }, { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index a0e788b64821..f1755efd30a2 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3303,6 +3303,7 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf) case REPORT_LUNS: case REQUEST_SENSE: case SYNCHRONIZE_CACHE: + case SYNCHRONIZE_CACHE_16: case REZERO_UNIT: case SEEK_6: case SEEK_10: @@ -3969,6 +3970,7 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) return ata_scsi_write_same_xlat; case SYNCHRONIZE_CACHE: + case SYNCHRONIZE_CACHE_16: if (ata_try_flush_cache(dev)) return ata_scsi_flush_xlat; break; @@ -4030,44 +4032,51 @@ void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd) int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev) { + struct ata_port *ap = dev->link->ap; u8 scsi_op = scmd->cmnd[0]; ata_xlat_func_t xlat_func; - int rc = 0; + + /* + * scsi_queue_rq() will defer commands if scsi_host_in_recovery(). + * However, this check is done without holding the ap->lock (a libata + * specific lock), so we can have received an error irq since then, + * therefore we must check if EH is pending, while holding ap->lock. + */ + if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) + return SCSI_MLQUEUE_DEVICE_BUSY; + + if (unlikely(!scmd->cmd_len)) + goto bad_cdb_len; if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { - if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len)) + if (unlikely(scmd->cmd_len > dev->cdb_len)) goto bad_cdb_len; xlat_func = ata_get_xlat_func(dev, scsi_op); - } else { - if (unlikely(!scmd->cmd_len)) + } else if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { + /* relay SCSI command to ATAPI device */ + int len = COMMAND_SIZE(scsi_op); + + if (unlikely(len > scmd->cmd_len || + len > dev->cdb_len || + scmd->cmd_len > ATAPI_CDB_LEN)) goto bad_cdb_len; - xlat_func = NULL; - if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { - /* relay SCSI command to ATAPI device */ - int len = COMMAND_SIZE(scsi_op); - if (unlikely(len > scmd->cmd_len || - len > dev->cdb_len || - scmd->cmd_len > ATAPI_CDB_LEN)) - goto bad_cdb_len; + xlat_func = atapi_xlat; + } else { + /* ATA_16 passthru, treat as an ATA command */ + if (unlikely(scmd->cmd_len > 16)) + goto bad_cdb_len; - xlat_func = atapi_xlat; - } else { - /* ATA_16 passthru, treat as an ATA command */ - if (unlikely(scmd->cmd_len > 16)) - goto bad_cdb_len; - - xlat_func = ata_get_xlat_func(dev, scsi_op); - } + xlat_func = ata_get_xlat_func(dev, scsi_op); } if (xlat_func) - rc = ata_scsi_translate(dev, scmd, xlat_func); - else - ata_scsi_simulate(dev, scmd); + return ata_scsi_translate(dev, scmd, xlat_func); - return rc; + ata_scsi_simulate(dev, scmd); + + return 0; bad_cdb_len: DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n", @@ -4215,6 +4224,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) * turning this into a no-op. */ case SYNCHRONIZE_CACHE: + case SYNCHRONIZE_CACHE_16: fallthrough; /* no-op's, complete with success */ diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index 6a40e3c6cf49..31a66fc0c31d 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -196,7 +196,7 @@ static struct { { XFER_PIO_0, "XFER_PIO_0" }, { XFER_PIO_SLOW, "XFER_PIO_SLOW" } }; -ata_bitfield_name_match(xfer,ata_xfer_names) +ata_bitfield_name_search(xfer, ata_xfer_names) /* * ATA Port attributes @@ -301,7 +301,9 @@ int ata_tport_add(struct device *parent, pm_runtime_enable(dev); pm_runtime_forbid(dev); - transport_add_device(dev); + error = transport_add_device(dev); + if (error) + goto tport_transport_add_err; transport_configure_device(dev); error = ata_tlink_add(&ap->link); @@ -312,12 +314,12 @@ int ata_tport_add(struct device *parent, tport_link_err: transport_remove_device(dev); + tport_transport_add_err: device_del(dev); tport_err: transport_destroy_device(dev); put_device(dev); - ata_host_put(ap->host); return error; } @@ -426,7 +428,9 @@ int ata_tlink_add(struct ata_link *link) goto tlink_err; } - transport_add_device(dev); + error = transport_add_device(dev); + if (error) + goto tlink_transport_err; transport_configure_device(dev); ata_for_each_dev(ata_dev, link, ALL) { @@ -441,6 +445,7 @@ int ata_tlink_add(struct ata_link *link) ata_tdev_delete(ata_dev); } transport_remove_device(dev); + tlink_transport_err: device_del(dev); tlink_err: transport_destroy_device(dev); @@ -678,7 +683,13 @@ static int ata_tdev_add(struct ata_device *ata_dev) return error; } - transport_add_device(dev); + error = transport_add_device(dev); + if (error) { + device_del(dev); + ata_tdev_free(ata_dev); + return error; + } + transport_configure_device(dev); return 0; } diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index d91ba47f2fc4..4405d255e3aa 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c @@ -278,9 +278,10 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev) outb(inb(0x1F4) & 0x07, 0x1F4); rt = inb(0x1F3); - rt &= 0x07 << (3 * adev->devno); + rt &= ~(0x07 << (3 * !adev->devno)); if (pio) - rt |= (1 + 3 * pio) << (3 * adev->devno); + rt |= (1 + 3 * pio) << (3 * !adev->devno); + outb(rt, 0x1F3); udelay(100); outb(inb(0x1F2) | 0x01, 0x1F2); diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c index b066809ba9a1..c56f4043b0cc 100644 --- a/drivers/ata/pata_marvell.c +++ b/drivers/ata/pata_marvell.c @@ -83,6 +83,8 @@ static int marvell_cable_detect(struct ata_port *ap) switch(ap->port_no) { case 0: + if (!ap->ioaddr.bmdma_addr) + return ATA_CBL_PATA_UNK; if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1) return ATA_CBL_PATA40; return ATA_CBL_PATA80; diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index b5a3f710d76d..4cc8a1027888 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -888,12 +888,14 @@ static int octeon_cf_probe(struct platform_device *pdev) int i; res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0); if (!res_dma) { + put_device(&dma_dev->dev); of_node_put(dma_node); return -EINVAL; } cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start, resource_size(res_dma)); if (!cf_port->dma_base) { + put_device(&dma_dev->dev); of_node_put(dma_node); return -EINVAL; } @@ -903,6 +905,7 @@ static int octeon_cf_probe(struct platform_device *pdev) irq = i; irq_handler = octeon_cf_interrupt; } + put_device(&dma_dev->dev); } of_node_put(dma_node); } diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index 982fe9112532..464260f66870 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c @@ -145,7 +145,11 @@ struct sata_dwc_device { #endif }; -#define SATA_DWC_QCMD_MAX 32 +/* + * Allow one extra special slot for commands and DMA management + * to account for libata internal commands. + */ +#define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1) struct sata_dwc_device_port { struct sata_dwc_device *hsdev; diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 5f0472c18bcb..82f6f1fbe9e7 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -3767,6 +3767,7 @@ static void __exit idt77252_exit(void) card = idt77252_chain; dev = card->atmdev; idt77252_chain = card->next; + del_timer_sync(&card->tst_timer); if (dev->phy->stop) dev->phy->stop(dev); diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index f87f3b2ebd86..321cb3138d4e 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -526,7 +526,7 @@ void update_siblings_masks(unsigned int cpuid) for_each_online_cpu(cpu) { cpu_topo = &cpu_topology[cpu]; - if (cpuid_topo->llc_id == cpu_topo->llc_id) { + if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) { cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); } @@ -607,4 +607,23 @@ void __init init_cpu_topology(void) else if (of_have_populated_dt() && parse_dt_topology()) reset_cpu_topology(); } + +void store_cpu_topology(unsigned int cpuid) +{ + struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; + + if (cpuid_topo->package_id != -1) + goto topology_populated; + + cpuid_topo->thread_id = -1; + cpuid_topo->core_id = cpuid; + cpuid_topo->package_id = cpu_to_node(cpuid); + + pr_debug("CPU%u: package %d core %d thread %d\n", + cpuid, cpuid_topo->package_id, cpuid_topo->core_id, + cpuid_topo->thread_id); + +topology_populated: + update_siblings_masks(cpuid); +} #endif diff --git a/drivers/base/bus.c b/drivers/base/bus.c index a9c23ecebc7c..df85e928b97f 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -621,7 +621,7 @@ int bus_add_driver(struct device_driver *drv) if (drv->bus->p->drivers_autoprobe) { error = driver_attach(drv); if (error) - goto out_unregister; + goto out_del_list; } module_add_driver(drv->owner, drv); @@ -648,6 +648,8 @@ int bus_add_driver(struct device_driver *drv) return 0; +out_del_list: + klist_del(&priv->knode_bus); out_unregister: kobject_put(&priv->kobj); /* drv->p is freed in driver_release() */ diff --git a/drivers/base/core.c b/drivers/base/core.c index 4dbe8276f579..23fce12a5486 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -470,7 +470,8 @@ static void device_link_release_fn(struct work_struct *work) /* Ensure that all references to the link object have been dropped. */ device_link_synchronize_removal(); - pm_runtime_release_supplier(link, true); + pm_runtime_release_supplier(link); + pm_request_idle(link->supplier); put_device(link->consumer); put_device(link->supplier); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 8f1d6569564c..24dd532c8e5e 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -566,6 +566,18 @@ ssize_t __weak cpu_show_srbds(struct device *dev, return sysfs_emit(buf, "Not affected\n"); } +ssize_t __weak cpu_show_mmio_stale_data(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_retbleed(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -575,6 +587,8 @@ static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); +static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); +static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -586,6 +600,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_tsx_async_abort.attr, &dev_attr_itlb_multihit.attr, &dev_attr_srbds.attr, + &dev_attr_mmio_stale_data.attr, + &dev_attr_retbleed.attr, NULL }; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index b1b8f480a752..020fd562e1d1 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -250,7 +250,6 @@ DEFINE_SHOW_ATTRIBUTE(deferred_devs); int driver_deferred_probe_timeout; EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout); -static DECLARE_WAIT_QUEUE_HEAD(probe_timeout_waitqueue); static int __init deferred_probe_timeout_setup(char *str) { @@ -302,7 +301,6 @@ static void deferred_probe_timeout_work_func(struct work_struct *work) list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe) dev_info(p->device, "deferred probe pending\n"); mutex_unlock(&deferred_probe_mutex); - wake_up_all(&probe_timeout_waitqueue); } static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func); @@ -706,9 +704,6 @@ int driver_probe_done(void) */ void wait_for_device_probe(void) { - /* wait for probe timeout */ - wait_event(probe_timeout_waitqueue, !driver_deferred_probe_timeout); - /* wait for the deferred probe workqueue to finish */ flush_work(&deferred_probe_work); @@ -842,6 +837,11 @@ static int __device_attach_driver(struct device_driver *drv, void *_data) } else if (ret == -EPROBE_DEFER) { dev_dbg(dev, "Device match requests probe deferral\n"); driver_deferred_probe_add(dev); + /* + * Device can't match with a driver right now, so don't attempt + * to match or bind with other drivers on the bus. + */ + return ret; } else if (ret < 0) { dev_dbg(dev, "Bus failed to match device: %d\n", ret); return ret; @@ -897,6 +897,7 @@ out_unlock: static int __device_attach(struct device *dev, bool allow_async) { int ret = 0; + bool async = false; device_lock(dev); if (dev->p->dead) { @@ -935,7 +936,7 @@ static int __device_attach(struct device *dev, bool allow_async) */ dev_dbg(dev, "scheduling asynchronous probe\n"); get_device(dev); - async_schedule_dev(__device_attach_async_helper, dev); + async = true; } else { pm_request_idle(dev); } @@ -945,6 +946,8 @@ static int __device_attach(struct device *dev, bool allow_async) } out_unlock: device_unlock(dev); + if (async) + async_schedule_dev(__device_attach_async_helper, dev); return ret; } @@ -1058,6 +1061,7 @@ static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie) static int __driver_attach(struct device *dev, void *data) { struct device_driver *drv = data; + bool async = false; int ret; /* @@ -1077,6 +1081,11 @@ static int __driver_attach(struct device *dev, void *data) } else if (ret == -EPROBE_DEFER) { dev_dbg(dev, "Device match requests probe deferral\n"); driver_deferred_probe_add(dev); + /* + * Driver could not match with device, but may match with + * another device on the bus. + */ + return 0; } else if (ret < 0) { dev_dbg(dev, "Bus failed to match device: %d\n", ret); return ret; @@ -1095,9 +1104,11 @@ static int __driver_attach(struct device *dev, void *data) if (!dev->driver) { get_device(dev); dev->p->async_driver = drv; - async_schedule_dev(__driver_attach_async_helper, dev); + async = true; } device_unlock(dev); + if (async) + async_schedule_dev(__driver_attach_async_helper, dev); return 0; } diff --git a/drivers/base/memory.c b/drivers/base/memory.c index de058d15b33e..49eb14271f28 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -560,10 +560,9 @@ int register_memory(struct memory_block *memory) } ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory, GFP_KERNEL)); - if (ret) { - put_device(&memory->dev); + if (ret) device_unregister(&memory->dev); - } + return ret; } diff --git a/drivers/base/node.c b/drivers/base/node.c index 21965de8538b..5f745c906c33 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -655,6 +655,7 @@ static int register_node(struct node *node, int num) */ void unregister_node(struct node *node) { + compaction_unregister_node(node); hugetlb_unregister_node(node); /* no-op, if memoryless node */ node_remove_accesses(node); node_remove_caches(node); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index b1974d3e54a6..c4e56e058599 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -219,6 +219,9 @@ static void genpd_debug_remove(struct generic_pm_domain *genpd) { struct dentry *d; + if (!genpd_debugfs_dir) + return; + d = debugfs_lookup(genpd->name, genpd_debugfs_dir); debugfs_remove(d); } @@ -1972,6 +1975,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd, genpd->device_count = 0; genpd->max_off_time_ns = -1; genpd->max_off_time_changed = true; + genpd->next_wakeup = KTIME_MAX; genpd->provider = NULL; genpd->has_provider = false; genpd->accounting_time = ktime_get(); @@ -2052,9 +2056,9 @@ static int genpd_remove(struct generic_pm_domain *genpd) kfree(link); } - genpd_debug_remove(genpd); list_del(&genpd->gpd_list_node); genpd_unlock(genpd); + genpd_debug_remove(genpd); cancel_work_sync(&genpd->power_off_work); if (genpd_is_cpu_domain(genpd)) free_cpumask_var(genpd->cpus); @@ -2857,6 +2861,10 @@ static int genpd_iterate_idle_states(struct device_node *dn, np = it.node; if (!of_match_node(idle_state_match, np)) continue; + + if (!of_device_is_available(np)) + continue; + if (states) { ret = genpd_parse_state(&states[i], np); if (ret) { diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 157331940488..835a39e84c1d 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -308,13 +308,10 @@ static int rpm_get_suppliers(struct device *dev) /** * pm_runtime_release_supplier - Drop references to device link's supplier. * @link: Target device link. - * @check_idle: Whether or not to check if the supplier device is idle. * - * Drop all runtime PM references associated with @link to its supplier device - * and if @check_idle is set, check if that device is idle (and so it can be - * suspended). + * Drop all runtime PM references associated with @link to its supplier device. */ -void pm_runtime_release_supplier(struct device_link *link, bool check_idle) +void pm_runtime_release_supplier(struct device_link *link) { struct device *supplier = link->supplier; @@ -327,9 +324,6 @@ void pm_runtime_release_supplier(struct device_link *link, bool check_idle) while (refcount_dec_not_one(&link->rpm_active) && atomic_read(&supplier->power.usage_count) > 0) pm_runtime_put_noidle(supplier); - - if (check_idle) - pm_request_idle(supplier); } static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) @@ -337,8 +331,11 @@ static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) struct device_link *link; list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, - device_links_read_lock_held()) - pm_runtime_release_supplier(link, try_to_suspend); + device_links_read_lock_held()) { + pm_runtime_release_supplier(link); + if (try_to_suspend) + pm_request_idle(link->supplier); + } } static void rpm_put_suppliers(struct device *dev) @@ -1776,7 +1773,8 @@ void pm_runtime_drop_link(struct device_link *link) return; pm_runtime_drop_link_count(link->consumer); - pm_runtime_release_supplier(link, true); + pm_runtime_release_supplier(link); + pm_request_idle(link->supplier); } static bool pm_runtime_need_not_resume(struct device *dev) diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 94665037f4a3..72b7a92337b1 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -120,7 +120,11 @@ static unsigned int read_magic_time(void) struct rtc_time time; unsigned int val; - mc146818_get_time(&time); + if (mc146818_get_time(&time) < 0) { + pr_err("Unable to read current time from RTC\n"); + return 0; + } + pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time); val = time.tm_year; /* 100 years */ if (val > 100) diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 87c5c421e0f4..4466f8bdab2e 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -220,6 +220,7 @@ static void regmap_irq_enable(struct irq_data *data) struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); struct regmap *map = d->map; const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); + unsigned int reg = irq_data->reg_offset / map->reg_stride; unsigned int mask, type; type = irq_data->type.type_falling_val | irq_data->type.type_rising_val; @@ -236,14 +237,14 @@ static void regmap_irq_enable(struct irq_data *data) * at the corresponding offset in regmap_irq_set_type(). */ if (d->chip->type_in_mask && type) - mask = d->type_buf[irq_data->reg_offset / map->reg_stride]; + mask = d->type_buf[reg] & irq_data->mask; else mask = irq_data->mask; if (d->chip->clear_on_unmask) d->clear_status = true; - d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask; + d->mask_buf[reg] &= ~mask; } static void regmap_irq_disable(struct irq_data *data) diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index f2548049aa0e..40c53632512b 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -39,6 +39,22 @@ config BLK_DEV_FD To compile this driver as a module, choose M here: the module will be called floppy. +config BLK_DEV_FD_RAWCMD + bool "Support for raw floppy disk commands (DEPRECATED)" + depends on BLK_DEV_FD + help + If you want to use actual physical floppies and expect to do + special low-level hardware accesses to them (access and use + non-standard formats, for example), then enable this. + + Note that the code enabled by this option is rarely used and + might be unstable or insecure, and distros should not enable it. + + Note: FDRAWCMD is deprecated and will be removed from the kernel + in the near future. + + If unsure, say N. + config AMIGA_FLOPPY tristate "Amiga floppy support" depends on AMIGA diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 8f879e5c2f67..60b9ca53c0a3 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1644,22 +1644,22 @@ struct sib_info { }; void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib); -extern void notify_resource_state(struct sk_buff *, +extern int notify_resource_state(struct sk_buff *, unsigned int, struct drbd_resource *, struct resource_info *, enum drbd_notification_type); -extern void notify_device_state(struct sk_buff *, +extern int notify_device_state(struct sk_buff *, unsigned int, struct drbd_device *, struct device_info *, enum drbd_notification_type); -extern void notify_connection_state(struct sk_buff *, +extern int notify_connection_state(struct sk_buff *, unsigned int, struct drbd_connection *, struct connection_info *, enum drbd_notification_type); -extern void notify_peer_device_state(struct sk_buff *, +extern int notify_peer_device_state(struct sk_buff *, unsigned int, struct drbd_peer_device *, struct peer_device_info *, diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 65b95aef8dbc..51450f7c81af 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -184,7 +184,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr, unsigned int set_size) { struct drbd_request *r; - struct drbd_request *req = NULL; + struct drbd_request *req = NULL, *tmp = NULL; int expect_epoch = 0; int expect_size = 0; @@ -238,8 +238,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr, * to catch requests being barrier-acked "unexpectedly". * It usually should find the same req again, or some READ preceding it. */ list_for_each_entry(req, &connection->transfer_log, tl_requests) - if (req->epoch == expect_epoch) + if (req->epoch == expect_epoch) { + tmp = req; break; + } + req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests); list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) { if (req->epoch != expect_epoch) break; @@ -2717,7 +2720,7 @@ static int init_submitter(struct drbd_device *device) enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor) { struct drbd_resource *resource = adm_ctx->resource; - struct drbd_connection *connection; + struct drbd_connection *connection, *n; struct drbd_device *device; struct drbd_peer_device *peer_device, *tmp_peer_device; struct gendisk *disk; @@ -2836,7 +2839,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig out_idr_remove_vol: idr_remove(&connection->peer_devices, vnr); out_idr_remove_from_resource: - for_each_connection(connection, resource) { + for_each_connection_safe(connection, n, resource) { peer_device = idr_remove(&connection->peer_devices, vnr); if (peer_device) kref_put(&connection->kref, drbd_destroy_connection); @@ -3628,9 +3631,8 @@ const char *cmdname(enum drbd_packet cmd) * when we want to support more than * one PRO_VERSION */ static const char *cmdnames[] = { + [P_DATA] = "Data", - [P_WSAME] = "WriteSame", - [P_TRIM] = "Trim", [P_DATA_REPLY] = "DataReply", [P_RS_DATA_REPLY] = "RSDataReply", [P_BARRIER] = "Barrier", @@ -3641,7 +3643,6 @@ const char *cmdname(enum drbd_packet cmd) [P_DATA_REQUEST] = "DataRequest", [P_RS_DATA_REQUEST] = "RSDataRequest", [P_SYNC_PARAM] = "SyncParam", - [P_SYNC_PARAM89] = "SyncParam89", [P_PROTOCOL] = "ReportProtocol", [P_UUIDS] = "ReportUUIDs", [P_SIZES] = "ReportSizes", @@ -3649,6 +3650,7 @@ const char *cmdname(enum drbd_packet cmd) [P_SYNC_UUID] = "ReportSyncUUID", [P_AUTH_CHALLENGE] = "AuthChallenge", [P_AUTH_RESPONSE] = "AuthResponse", + [P_STATE_CHG_REQ] = "StateChgRequest", [P_PING] = "Ping", [P_PING_ACK] = "PingAck", [P_RECV_ACK] = "RecvAck", @@ -3659,24 +3661,26 @@ const char *cmdname(enum drbd_packet cmd) [P_NEG_DREPLY] = "NegDReply", [P_NEG_RS_DREPLY] = "NegRSDReply", [P_BARRIER_ACK] = "BarrierAck", - [P_STATE_CHG_REQ] = "StateChgRequest", [P_STATE_CHG_REPLY] = "StateChgReply", [P_OV_REQUEST] = "OVRequest", [P_OV_REPLY] = "OVReply", [P_OV_RESULT] = "OVResult", [P_CSUM_RS_REQUEST] = "CsumRSRequest", [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", + [P_SYNC_PARAM89] = "SyncParam89", [P_COMPRESSED_BITMAP] = "CBitmap", [P_DELAY_PROBE] = "DelayProbe", [P_OUT_OF_SYNC] = "OutOfSync", - [P_RETRY_WRITE] = "RetryWrite", [P_RS_CANCEL] = "RSCancel", [P_CONN_ST_CHG_REQ] = "conn_st_chg_req", [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply", [P_RETRY_WRITE] = "retry_write", [P_PROTOCOL_UPDATE] = "protocol_update", + [P_TRIM] = "Trim", [P_RS_THIN_REQ] = "rs_thin_req", [P_RS_DEALLOCATED] = "rs_deallocated", + [P_WSAME] = "WriteSame", + [P_ZEROES] = "Zeroes", /* enum drbd_packet, but not commands - obsoleted flags: * P_MAY_IGNORE diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index bf7de4c7b96c..54f77b4a0b49 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -790,9 +790,11 @@ int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info) mutex_lock(&adm_ctx.resource->adm_mutex); if (info->genlhdr->cmd == DRBD_ADM_PRIMARY) - retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate); + retcode = (enum drbd_ret_code)drbd_set_role(adm_ctx.device, + R_PRIMARY, parms.assume_uptodate); else - retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0); + retcode = (enum drbd_ret_code)drbd_set_role(adm_ctx.device, + R_SECONDARY, 0); mutex_unlock(&adm_ctx.resource->adm_mutex); genl_lock(); @@ -1962,7 +1964,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) drbd_flush_workqueue(&connection->sender_work); rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE); - retcode = rv; /* FIXME: Type mismatch. */ + retcode = (enum drbd_ret_code)rv; drbd_resume_io(device); if (rv < SS_SUCCESS) goto fail; @@ -2687,7 +2689,8 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) } rcu_read_unlock(); - retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE); + retcode = (enum drbd_ret_code)conn_request_state(connection, + NS(conn, C_UNCONNECTED), CS_VERBOSE); conn_reconfig_done(connection); mutex_unlock(&adm_ctx.resource->adm_mutex); @@ -2800,7 +2803,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info) mutex_lock(&adm_ctx.resource->adm_mutex); rv = conn_try_disconnect(connection, parms.force_disconnect); if (rv < SS_SUCCESS) - retcode = rv; /* FIXME: Type mismatch. */ + retcode = (enum drbd_ret_code)rv; else retcode = NO_ERROR; mutex_unlock(&adm_ctx.resource->adm_mutex); @@ -4614,7 +4617,7 @@ static int nla_put_notification_header(struct sk_buff *msg, return drbd_notification_header_to_skb(msg, &nh, true); } -void notify_resource_state(struct sk_buff *skb, +int notify_resource_state(struct sk_buff *skb, unsigned int seq, struct drbd_resource *resource, struct resource_info *resource_info, @@ -4656,16 +4659,17 @@ void notify_resource_state(struct sk_buff *skb, if (err && err != -ESRCH) goto failed; } - return; + return 0; nla_put_failure: nlmsg_free(skb); failed: drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n", err, seq); + return err; } -void notify_device_state(struct sk_buff *skb, +int notify_device_state(struct sk_buff *skb, unsigned int seq, struct drbd_device *device, struct device_info *device_info, @@ -4705,16 +4709,17 @@ void notify_device_state(struct sk_buff *skb, if (err && err != -ESRCH) goto failed; } - return; + return 0; nla_put_failure: nlmsg_free(skb); failed: drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n", err, seq); + return err; } -void notify_connection_state(struct sk_buff *skb, +int notify_connection_state(struct sk_buff *skb, unsigned int seq, struct drbd_connection *connection, struct connection_info *connection_info, @@ -4754,16 +4759,17 @@ void notify_connection_state(struct sk_buff *skb, if (err && err != -ESRCH) goto failed; } - return; + return 0; nla_put_failure: nlmsg_free(skb); failed: drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n", err, seq); + return err; } -void notify_peer_device_state(struct sk_buff *skb, +int notify_peer_device_state(struct sk_buff *skb, unsigned int seq, struct drbd_peer_device *peer_device, struct peer_device_info *peer_device_info, @@ -4804,13 +4810,14 @@ void notify_peer_device_state(struct sk_buff *skb, if (err && err != -ESRCH) goto failed; } - return; + return 0; nla_put_failure: nlmsg_free(skb); failed: drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n", err, seq); + return err; } void notify_helper(enum drbd_notification_type type, @@ -4861,7 +4868,7 @@ fail: err, seq); } -static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq) +static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq) { struct drbd_genlmsghdr *dh; int err; @@ -4875,11 +4882,12 @@ static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq) if (nla_put_notification_header(skb, NOTIFY_EXISTS)) goto nla_put_failure; genlmsg_end(skb, dh); - return; + return 0; nla_put_failure: nlmsg_free(skb); pr_err("Error %d sending event. Event seq:%u\n", err, seq); + return err; } static void free_state_changes(struct list_head *list) @@ -4906,6 +4914,7 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) unsigned int seq = cb->args[2]; unsigned int n; enum drbd_notification_type flags = 0; + int err = 0; /* There is no need for taking notification_mutex here: it doesn't matter if the initial state events mix with later state chage @@ -4914,32 +4923,32 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) cb->args[5]--; if (cb->args[5] == 1) { - notify_initial_state_done(skb, seq); + err = notify_initial_state_done(skb, seq); goto out; } n = cb->args[4]++; if (cb->args[4] < cb->args[3]) flags |= NOTIFY_CONTINUES; if (n < 1) { - notify_resource_state_change(skb, seq, state_change->resource, + err = notify_resource_state_change(skb, seq, state_change->resource, NOTIFY_EXISTS | flags); goto next; } n--; if (n < state_change->n_connections) { - notify_connection_state_change(skb, seq, &state_change->connections[n], + err = notify_connection_state_change(skb, seq, &state_change->connections[n], NOTIFY_EXISTS | flags); goto next; } n -= state_change->n_connections; if (n < state_change->n_devices) { - notify_device_state_change(skb, seq, &state_change->devices[n], + err = notify_device_state_change(skb, seq, &state_change->devices[n], NOTIFY_EXISTS | flags); goto next; } n -= state_change->n_devices; if (n < state_change->n_devices * state_change->n_connections) { - notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n], + err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n], NOTIFY_EXISTS | flags); goto next; } @@ -4954,7 +4963,10 @@ next: cb->args[4] = 0; } out: - return skb->len; + if (err) + return err; + else + return skb->len; } int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index 0067d328f0b5..5fbaea6b77b1 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -1537,7 +1537,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device, return rv; } -void notify_resource_state_change(struct sk_buff *skb, +int notify_resource_state_change(struct sk_buff *skb, unsigned int seq, struct drbd_resource_state_change *resource_state_change, enum drbd_notification_type type) @@ -1550,10 +1550,10 @@ void notify_resource_state_change(struct sk_buff *skb, .res_susp_fen = resource_state_change->susp_fen[NEW], }; - notify_resource_state(skb, seq, resource, &resource_info, type); + return notify_resource_state(skb, seq, resource, &resource_info, type); } -void notify_connection_state_change(struct sk_buff *skb, +int notify_connection_state_change(struct sk_buff *skb, unsigned int seq, struct drbd_connection_state_change *connection_state_change, enum drbd_notification_type type) @@ -1564,10 +1564,10 @@ void notify_connection_state_change(struct sk_buff *skb, .conn_role = connection_state_change->peer_role[NEW], }; - notify_connection_state(skb, seq, connection, &connection_info, type); + return notify_connection_state(skb, seq, connection, &connection_info, type); } -void notify_device_state_change(struct sk_buff *skb, +int notify_device_state_change(struct sk_buff *skb, unsigned int seq, struct drbd_device_state_change *device_state_change, enum drbd_notification_type type) @@ -1577,10 +1577,10 @@ void notify_device_state_change(struct sk_buff *skb, .dev_disk_state = device_state_change->disk_state[NEW], }; - notify_device_state(skb, seq, device, &device_info, type); + return notify_device_state(skb, seq, device, &device_info, type); } -void notify_peer_device_state_change(struct sk_buff *skb, +int notify_peer_device_state_change(struct sk_buff *skb, unsigned int seq, struct drbd_peer_device_state_change *p, enum drbd_notification_type type) @@ -1594,7 +1594,7 @@ void notify_peer_device_state_change(struct sk_buff *skb, .peer_resync_susp_dependency = p->resync_susp_dependency[NEW], }; - notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type); + return notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type); } static void broadcast_state_change(struct drbd_state_change *state_change) @@ -1602,7 +1602,7 @@ static void broadcast_state_change(struct drbd_state_change *state_change) struct drbd_resource_state_change *resource_state_change = &state_change->resource[0]; bool resource_state_has_changed; unsigned int n_device, n_connection, n_peer_device, n_peer_devices; - void (*last_func)(struct sk_buff *, unsigned int, void *, + int (*last_func)(struct sk_buff *, unsigned int, void *, enum drbd_notification_type) = NULL; void *last_arg = NULL; diff --git a/drivers/block/drbd/drbd_state_change.h b/drivers/block/drbd/drbd_state_change.h index ba80f612d6ab..d5b0479bc9a6 100644 --- a/drivers/block/drbd/drbd_state_change.h +++ b/drivers/block/drbd/drbd_state_change.h @@ -44,19 +44,19 @@ extern struct drbd_state_change *remember_old_state(struct drbd_resource *, gfp_ extern void copy_old_to_new_state_change(struct drbd_state_change *); extern void forget_state_change(struct drbd_state_change *); -extern void notify_resource_state_change(struct sk_buff *, +extern int notify_resource_state_change(struct sk_buff *, unsigned int, struct drbd_resource_state_change *, enum drbd_notification_type type); -extern void notify_connection_state_change(struct sk_buff *, +extern int notify_connection_state_change(struct sk_buff *, unsigned int, struct drbd_connection_state_change *, enum drbd_notification_type type); -extern void notify_device_state_change(struct sk_buff *, +extern int notify_device_state_change(struct sk_buff *, unsigned int, struct drbd_device_state_change *, enum drbd_notification_type type); -extern void notify_peer_device_state_change(struct sk_buff *, +extern int notify_peer_device_state_change(struct sk_buff *, unsigned int, struct drbd_peer_device_state_change *, enum drbd_notification_type type); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index aaee15058d18..4ef407a33996 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -509,8 +509,8 @@ static unsigned long fdc_busy; static DECLARE_WAIT_QUEUE_HEAD(fdc_wait); static DECLARE_WAIT_QUEUE_HEAD(command_done); -/* Errors during formatting are counted here. */ -static int format_errors; +/* errors encountered on the current (or last) request */ +static int floppy_errors; /* Format request descriptor. */ static struct format_descr format_req; @@ -530,7 +530,6 @@ static struct format_descr format_req; static char *floppy_track_buffer; static int max_buffer_sectors; -static int *errors; typedef void (*done_f)(int); static const struct cont_t { void (*interrupt)(void); @@ -1455,7 +1454,7 @@ static int interpret_errors(void) if (drive_params[current_drive].flags & FTD_MSG) DPRINT("Over/Underrun - retrying\n"); bad = 0; - } else if (*errors >= drive_params[current_drive].max_errors.reporting) { + } else if (floppy_errors >= drive_params[current_drive].max_errors.reporting) { print_errors(); } if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC) @@ -2095,7 +2094,7 @@ static void bad_flp_intr(void) if (!next_valid_format(current_drive)) return; } - err_count = ++(*errors); + err_count = ++floppy_errors; INFBOUND(write_errors[current_drive].badness, err_count); if (err_count > drive_params[current_drive].max_errors.abort) cont->done(0); @@ -2240,9 +2239,8 @@ static int do_format(int drive, struct format_descr *tmp_format_req) return -EINVAL; } format_req = *tmp_format_req; - format_errors = 0; cont = &format_cont; - errors = &format_errors; + floppy_errors = 0; ret = wait_til_done(redo_format, true); if (ret == -EINTR) return -EINTR; @@ -2721,7 +2719,7 @@ static int make_raw_rw_request(void) */ if (!direct || (indirect * 2 > direct * 3 && - *errors < drive_params[current_drive].max_errors.read_track && + floppy_errors < drive_params[current_drive].max_errors.read_track && ((!probing || (drive_params[current_drive].read_track & (1 << drive_state[current_drive].probed_format)))))) { max_size = blk_rq_sectors(current_req); @@ -2846,10 +2844,11 @@ static int set_next_request(void) current_req = list_first_entry_or_null(&floppy_reqs, struct request, queuelist); if (current_req) { - current_req->error_count = 0; + floppy_errors = 0; list_del_init(¤t_req->queuelist); + return 1; } - return current_req != NULL; + return 0; } /* Starts or continues processing request. Will automatically unlock the @@ -2908,7 +2907,6 @@ do_request: _floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format]; } else probing = 0; - errors = &(current_req->error_count); tmp = make_raw_rw_request(); if (tmp < 2) { request_done(tmp); @@ -3069,6 +3067,8 @@ static const char *drive_name(int type, int drive) return "(null)"; } +#ifdef CONFIG_BLK_DEV_FD_RAWCMD + /* raw commands */ static void raw_cmd_done(int flag) { @@ -3273,6 +3273,35 @@ static int raw_cmd_ioctl(int cmd, void __user *param) return ret; } +static int floppy_raw_cmd_ioctl(int type, int drive, int cmd, + void __user *param) +{ + int ret; + + pr_warn_once("Note: FDRAWCMD is deprecated and will be removed from the kernel in the near future.\n"); + + if (type) + return -EINVAL; + if (lock_fdc(drive)) + return -EINTR; + set_floppy(drive); + ret = raw_cmd_ioctl(cmd, param); + if (ret == -EINTR) + return -EINTR; + process_fd_request(); + return ret; +} + +#else /* CONFIG_BLK_DEV_FD_RAWCMD */ + +static int floppy_raw_cmd_ioctl(int type, int drive, int cmd, + void __user *param) +{ + return -EOPNOTSUPP; +} + +#endif + static int invalidate_drive(struct block_device *bdev) { /* invalidate the buffer track to force a reread */ @@ -3461,7 +3490,6 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int { int drive = (long)bdev->bd_disk->private_data; int type = ITYPE(drive_state[drive].fd_device); - int i; int ret; int size; union inparam { @@ -3612,16 +3640,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int outparam = &write_errors[drive]; break; case FDRAWCMD: - if (type) - return -EINVAL; - if (lock_fdc(drive)) - return -EINTR; - set_floppy(drive); - i = raw_cmd_ioctl(cmd, (void __user *)param); - if (i == -EINTR) - return -EINTR; - process_fd_request(); - return i; + return floppy_raw_cmd_ioctl(type, drive, cmd, (void __user *)param); case FDTWADDLE: if (lock_fdc(drive)) return -EINTR; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 64994c39a117..538a0764c0f7 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1031,6 +1031,11 @@ loop_set_status_from_info(struct loop_device *lo, lo->lo_offset = info->lo_offset; lo->lo_sizelimit = info->lo_sizelimit; + + /* loff_t vars have been assigned __u64 */ + if (lo->lo_offset < 0 || lo->lo_sizelimit < 0) + return -EOVERFLOW; + memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); lo->lo_file_name[LO_NAME_SIZE-1] = 0; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 59c452fff835..b0d3dadeb964 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -880,11 +880,15 @@ static int wait_for_reconnect(struct nbd_device *nbd) struct nbd_config *config = nbd->config; if (!config->dead_conn_timeout) return 0; - if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) + + if (!wait_event_timeout(config->conn_wait, + test_bit(NBD_RT_DISCONNECTED, + &config->runtime_flags) || + atomic_read(&config->live_connections) > 0, + config->dead_conn_timeout)) return 0; - return wait_event_timeout(config->conn_wait, - atomic_read(&config->live_connections) > 0, - config->dead_conn_timeout) > 0; + + return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags); } static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) @@ -1338,10 +1342,12 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b mutex_unlock(&nbd->config_lock); ret = wait_event_interruptible(config->recv_wq, atomic_read(&config->recv_threads) == 0); - if (ret) + if (ret) { sock_shutdown(nbd); - flush_workqueue(nbd->recv_workq); + nbd_clear_que(nbd); + } + flush_workqueue(nbd->recv_workq); mutex_lock(&nbd->config_lock); nbd_bdev_reset(bdev); /* user requested, ignore socket errors */ @@ -1355,7 +1361,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b static void nbd_clear_sock_ioctl(struct nbd_device *nbd, struct block_device *bdev) { - sock_shutdown(nbd); + nbd_clear_sock(nbd); __invalidate_device(bdev, true); nbd_bdev_reset(bdev); if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, @@ -1468,15 +1474,20 @@ static struct nbd_config *nbd_alloc_config(void) { struct nbd_config *config; + if (!try_module_get(THIS_MODULE)) + return ERR_PTR(-ENODEV); + config = kzalloc(sizeof(struct nbd_config), GFP_NOFS); - if (!config) - return NULL; + if (!config) { + module_put(THIS_MODULE); + return ERR_PTR(-ENOMEM); + } + atomic_set(&config->recv_threads, 0); init_waitqueue_head(&config->recv_wq); init_waitqueue_head(&config->conn_wait); config->blksize = NBD_DEF_BLKSIZE; atomic_set(&config->live_connections, 0); - try_module_get(THIS_MODULE); return config; } @@ -1503,12 +1514,13 @@ static int nbd_open(struct block_device *bdev, fmode_t mode) mutex_unlock(&nbd->config_lock); goto out; } - config = nbd->config = nbd_alloc_config(); - if (!config) { - ret = -ENOMEM; + config = nbd_alloc_config(); + if (IS_ERR(config)) { + ret = PTR_ERR(config); mutex_unlock(&nbd->config_lock); goto out; } + nbd->config = config; refcount_set(&nbd->config_refs, 1); refcount_inc(&nbd->refs); mutex_unlock(&nbd->config_lock); @@ -1930,13 +1942,14 @@ again: nbd_put(nbd); return -EINVAL; } - config = nbd->config = nbd_alloc_config(); - if (!nbd->config) { + config = nbd_alloc_config(); + if (IS_ERR(config)) { mutex_unlock(&nbd->config_lock); nbd_put(nbd); printk(KERN_ERR "nbd: couldn't allocate config\n"); - return -ENOMEM; + return PTR_ERR(config); } + nbd->config = config; refcount_set(&nbd->config_refs, 1); set_bit(NBD_RT_BOUND, &config->runtime_flags); @@ -2029,6 +2042,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd) mutex_lock(&nbd->config_lock); nbd_disconnect(nbd); sock_shutdown(nbd); + wake_up(&nbd->config->conn_wait); /* * Make sure recv thread has finished, so it does not drop the last * config ref and try to destroy the workqueue from inside the work @@ -2456,6 +2470,12 @@ static void __exit nbd_cleanup(void) struct nbd_device *nbd; LIST_HEAD(del_list); + /* + * Unregister netlink interface prior to waiting + * for the completion of netlink commands. + */ + genl_unregister_family(&nbd_genl_family); + nbd_dbg_close(); mutex_lock(&nbd_index_mutex); @@ -2465,13 +2485,15 @@ static void __exit nbd_cleanup(void) while (!list_empty(&del_list)) { nbd = list_first_entry(&del_list, struct nbd_device, list); list_del_init(&nbd->list); + if (refcount_read(&nbd->config_refs)) + printk(KERN_ERR "nbd: possibly leaking nbd_config (ref %d)\n", + refcount_read(&nbd->config_refs)); if (refcount_read(&nbd->refs) != 1) printk(KERN_ERR "nbd: possibly leaking a device\n"); nbd_put(nbd); } idr_destroy(&nbd_index_idr); - genl_unregister_family(&nbd_genl_family); unregister_blkdev(NBD_MAJOR, "nbd"); } diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index bb3686c3869d..c6ba8f9f3f31 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1876,8 +1876,13 @@ static int null_add_dev(struct nullb_device *dev) blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q); mutex_lock(&lock); - nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); - dev->index = nullb->index; + rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); + if (rv < 0) { + mutex_unlock(&lock); + goto out_cleanup_zone; + } + nullb->index = rv; + dev->index = rv; mutex_unlock(&lock); blk_queue_logical_block_size(nullb->q, dev->blocksize); @@ -1889,13 +1894,16 @@ static int null_add_dev(struct nullb_device *dev) rv = null_gendisk_register(nullb); if (rv) - goto out_cleanup_zone; + goto out_ida_free; mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); mutex_unlock(&lock); return 0; + +out_ida_free: + ida_free(&nullb_indexes, nullb->index); out_cleanup_zone: null_free_zoned_dev(dev); out_cleanup_blk_queue: diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 26d25188275a..ac68c845c6cc 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -867,11 +867,12 @@ static int virtblk_probe(struct virtio_device *vdev) blk_queue_io_opt(q, blk_size * opt_io_size); if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) { - q->limits.discard_granularity = blk_size; - virtio_cread(vdev, struct virtio_blk_config, discard_sector_alignment, &v); - q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0; + if (v) + q->limits.discard_granularity = v << SECTOR_SHIFT; + else + q->limits.discard_granularity = blk_size; virtio_cread(vdev, struct virtio_blk_config, max_discard_sectors, &v); diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 040829e2d016..5eff34767b77 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -226,6 +226,9 @@ struct xen_vbd { sector_t size; unsigned int flush_support:1; unsigned int discard_secure:1; + /* Connect-time cached feature_persistent parameter value */ + unsigned int feature_gnt_persistent_parm:1; + /* Persistent grants feature negotiation result */ unsigned int feature_gnt_persistent:1; unsigned int overflow_max_grants:1; }; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 6c5e9373e91c..ddea36295931 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -157,6 +157,11 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif) return 0; } +/* Enable the persistent grants feature. */ +static bool feature_persistent = true; +module_param(feature_persistent, bool, 0644); +MODULE_PARM_DESC(feature_persistent, "Enables the persistent grants feature"); + static struct xen_blkif *xen_blkif_alloc(domid_t domid) { struct xen_blkif *blkif; @@ -472,12 +477,6 @@ static void xen_vbd_free(struct xen_vbd *vbd) vbd->bdev = NULL; } -/* Enable the persistent grants feature. */ -static bool feature_persistent = true; -module_param(feature_persistent, bool, 0644); -MODULE_PARM_DESC(feature_persistent, - "Enables the persistent grants feature"); - static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, unsigned major, unsigned minor, int readonly, int cdrom) @@ -523,8 +522,6 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, if (q && blk_queue_secure_erase(q)) vbd->discard_secure = true; - vbd->feature_gnt_persistent = feature_persistent; - pr_debug("Successful creation of handle=%04x (dom=%u)\n", handle, blkif->domid); return 0; @@ -914,7 +911,7 @@ again: xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support); err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", - be->blkif->vbd.feature_gnt_persistent); + be->blkif->vbd.feature_gnt_persistent_parm); if (err) { xenbus_dev_fatal(dev, err, "writing %s/feature-persistent", dev->nodename); @@ -1091,10 +1088,11 @@ static int connect_ring(struct backend_info *be) xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); return -ENOSYS; } - if (blkif->vbd.feature_gnt_persistent) - blkif->vbd.feature_gnt_persistent = - xenbus_read_unsigned(dev->otherend, - "feature-persistent", 0); + + blkif->vbd.feature_gnt_persistent_parm = feature_persistent; + blkif->vbd.feature_gnt_persistent = + blkif->vbd.feature_gnt_persistent_parm && + xenbus_read_unsigned(dev->otherend, "feature-persistent", 0); blkif->vbd.overflow_max_grants = 0; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 47d4bb23d6f3..6f33d62331b1 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -151,6 +151,10 @@ static unsigned int xen_blkif_max_ring_order; module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444); MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring"); +static bool __read_mostly xen_blkif_trusted = true; +module_param_named(trusted, xen_blkif_trusted, bool, 0644); +MODULE_PARM_DESC(trusted, "Is the backend trusted"); + #define BLK_RING_SIZE(info) \ __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages) @@ -207,7 +211,11 @@ struct blkfront_info unsigned int feature_fua:1; unsigned int feature_discard:1; unsigned int feature_secdiscard:1; + /* Connect-time cached feature_persistent parameter */ + unsigned int feature_persistent_parm:1; + /* Persistent grants feature negotiation result */ unsigned int feature_persistent:1; + unsigned int bounce:1; unsigned int discard_granularity; unsigned int discard_alignment; /* Number of 4KB segments handled */ @@ -310,8 +318,8 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) if (!gnt_list_entry) goto out_of_memory; - if (info->feature_persistent) { - granted_page = alloc_page(GFP_NOIO); + if (info->bounce) { + granted_page = alloc_page(GFP_NOIO | __GFP_ZERO); if (!granted_page) { kfree(gnt_list_entry); goto out_of_memory; @@ -330,7 +338,7 @@ out_of_memory: list_for_each_entry_safe(gnt_list_entry, n, &rinfo->grants, node) { list_del(&gnt_list_entry->node); - if (info->feature_persistent) + if (info->bounce) __free_page(gnt_list_entry->page); kfree(gnt_list_entry); i--; @@ -376,7 +384,7 @@ static struct grant *get_grant(grant_ref_t *gref_head, /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); - if (info->feature_persistent) + if (info->bounce) grant_foreign_access(gnt_list_entry, info); else { /* Grant access to the GFN passed by the caller */ @@ -400,7 +408,7 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head, /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); - if (!info->feature_persistent) { + if (!info->bounce) { struct page *indirect_page; /* Fetch a pre-allocated page to use for indirect grefs */ @@ -715,7 +723,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri .grant_idx = 0, .segments = NULL, .rinfo = rinfo, - .need_copy = rq_data_dir(req) && info->feature_persistent, + .need_copy = rq_data_dir(req) && info->bounce, }; /* @@ -1035,11 +1043,12 @@ static void xlvbd_flush(struct blkfront_info *info) { blk_queue_write_cache(info->rq, info->feature_flush ? true : false, info->feature_fua ? true : false); - pr_info("blkfront: %s: %s %s %s %s %s\n", + pr_info("blkfront: %s: %s %s %s %s %s %s %s\n", info->gd->disk_name, flush_info(info), "persistent grants:", info->feature_persistent ? "enabled;" : "disabled;", "indirect descriptors:", - info->max_indirect_segments ? "enabled;" : "disabled;"); + info->max_indirect_segments ? "enabled;" : "disabled;", + "bounce buffer:", info->bounce ? "enabled" : "disabled;"); } static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) @@ -1273,7 +1282,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) if (!list_empty(&rinfo->indirect_pages)) { struct page *indirect_page, *n; - BUG_ON(info->feature_persistent); + BUG_ON(info->bounce); list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); @@ -1290,7 +1299,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) 0, 0UL); rinfo->persistent_gnts_c--; } - if (info->feature_persistent) + if (info->bounce) __free_page(persistent_gnt->page); kfree(persistent_gnt); } @@ -1311,7 +1320,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) for (j = 0; j < segs; j++) { persistent_gnt = rinfo->shadow[i].grants_used[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); - if (info->feature_persistent) + if (info->bounce) __free_page(persistent_gnt->page); kfree(persistent_gnt); } @@ -1501,7 +1510,7 @@ static int blkif_completion(unsigned long *id, data.s = s; num_sg = s->num_sg; - if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { + if (bret->operation == BLKIF_OP_READ && info->bounce) { for_each_sg(s->sg, sg, num_sg, i) { BUG_ON(sg->offset + sg->length > PAGE_SIZE); @@ -1560,7 +1569,7 @@ static int blkif_completion(unsigned long *id, * Add the used indirect page back to the list of * available pages for indirect grefs. */ - if (!info->feature_persistent) { + if (!info->bounce) { indirect_page = s->indirect_grants[i]->page; list_add(&indirect_page->lru, &rinfo->indirect_pages); } @@ -1753,7 +1762,7 @@ static int setup_blkring(struct xenbus_device *dev, for (i = 0; i < info->nr_ring_pages; i++) rinfo->ring_ref[i] = GRANT_INVALID_REF; - sring = alloc_pages_exact(ring_size, GFP_NOIO); + sring = alloc_pages_exact(ring_size, GFP_NOIO | __GFP_ZERO); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; @@ -1843,6 +1852,12 @@ static void free_info(struct blkfront_info *info) kfree(info); } +/* Enable the persistent grants feature. */ +static bool feature_persistent = true; +module_param(feature_persistent, bool, 0644); +MODULE_PARM_DESC(feature_persistent, + "Enables the persistent grants feature"); + /* Common code used when first setting up, and when resuming. */ static int talk_to_blkback(struct xenbus_device *dev, struct blkfront_info *info) @@ -1857,6 +1872,10 @@ static int talk_to_blkback(struct xenbus_device *dev, if (!info) return -ENODEV; + /* Check if backend is trusted. */ + info->bounce = !xen_blkif_trusted || + !xenbus_read_unsigned(dev->nodename, "trusted", 1); + max_page_order = xenbus_read_unsigned(info->xbdev->otherend, "max-ring-page-order", 0); ring_page_order = min(xen_blkif_max_ring_order, max_page_order); @@ -1930,8 +1949,9 @@ again: message = "writing protocol"; goto abort_transaction; } + info->feature_persistent_parm = feature_persistent; err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", - info->feature_persistent); + info->feature_persistent_parm); if (err) dev_warn(&dev->dev, "writing persistent grants feature to xenbus"); @@ -2006,12 +2026,6 @@ static int negotiate_mq(struct blkfront_info *info) return 0; } -/* Enable the persistent grants feature. */ -static bool feature_persistent = true; -module_param(feature_persistent, bool, 0644); -MODULE_PARM_DESC(feature_persistent, - "Enables the persistent grants feature"); - /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and @@ -2078,8 +2092,6 @@ static int blkfront_probe(struct xenbus_device *dev, info->vdevice = vdevice; info->connected = BLKIF_STATE_DISCONNECTED; - info->feature_persistent = feature_persistent; - /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); dev_set_drvdata(&dev->dev, info); @@ -2283,17 +2295,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) if (err) goto out_of_memory; - if (!info->feature_persistent && info->max_indirect_segments) { + if (!info->bounce && info->max_indirect_segments) { /* - * We are using indirect descriptors but not persistent - * grants, we need to allocate a set of pages that can be + * We are using indirect descriptors but don't have a bounce + * buffer, we need to allocate a set of pages that can be * used for mapping indirect grefs */ int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info); BUG_ON(!list_empty(&rinfo->indirect_pages)); for (i = 0; i < num; i++) { - struct page *indirect_page = alloc_page(GFP_KERNEL); + struct page *indirect_page = alloc_page(GFP_KERNEL | + __GFP_ZERO); if (!indirect_page) goto out_of_memory; list_add(&indirect_page->lru, &rinfo->indirect_pages); @@ -2382,10 +2395,12 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0)) blkfront_setup_discard(info); - if (info->feature_persistent) + if (info->feature_persistent_parm) info->feature_persistent = !!xenbus_read_unsigned(info->xbdev->otherend, "feature-persistent", 0); + if (info->feature_persistent) + info->bounce = true; indirect_segments = xenbus_read_unsigned(info->xbdev->otherend, "feature-max-indirect-segments", 0); @@ -2759,6 +2774,13 @@ static void blkfront_delay_work(struct work_struct *work) struct blkfront_info *info; bool need_schedule_work = false; + /* + * Note that when using bounce buffers but not persistent grants + * there's no need to run blkfront_delay_work because grants are + * revoked in blkif_completion or else an error is reported and the + * connection is closed. + */ + mutex_lock(&blkfront_mutex); list_for_each_entry(info, &info_list, info_list) { diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index 33e3b76c4fa9..b08650417bf0 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -61,12 +61,6 @@ static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp) bool zcomp_available_algorithm(const char *comp) { - int i; - - i = sysfs_match_string(backends, comp); - if (i >= 0) - return true; - /* * Crypto does not ignore a trailing new line symbol, * so make sure you don't supply a string containing @@ -215,6 +209,11 @@ struct zcomp *zcomp_create(const char *compress) struct zcomp *comp; int error; + /* + * Crypto API will execute /sbin/modprobe if the compression module + * is not loaded yet. We must do it here, otherwise we are about to + * call /sbin/modprobe under CPU hot-plug lock. + */ if (!zcomp_available_algorithm(compress)) return ERR_PTR(-EINVAL); diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 1b9743b7f2ef..d263eac784da 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -401,6 +401,8 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = { { 0x6606, "BCM4345C5" }, /* 003.006.006 */ { 0x230f, "BCM4356A2" }, /* 001.003.015 */ { 0x220e, "BCM20702A1" }, /* 001.002.014 */ + { 0x420d, "BCM4349B1" }, /* 002.002.013 */ + { 0x420e, "BCM4349B1" }, /* 002.002.014 */ { 0x4217, "BCM4329B1" }, /* 002.002.023 */ { 0x6106, "BCM4359C0" }, /* 003.001.006 */ { 0x4106, "BCM4335A0" }, /* 002.001.006 */ diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 538232b4c42a..54001ad5de9f 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -399,6 +399,18 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0bda, 0xc822), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + /* Realtek 8852CE Bluetooth devices */ + { USB_DEVICE(0x04ca, 0x4007), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x04c5, 0x1675), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0cb8, 0xc558), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3587), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3586), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + /* Realtek Bluetooth devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01), .driver_info = BTUSB_REALTEK }, @@ -416,6 +428,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, /* Additional Realtek 8723AE Bluetooth devices */ { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK }, @@ -1818,6 +1833,11 @@ static int btusb_setup_csr(struct hci_dev *hdev) rp = (struct hci_rp_read_local_version *)skb->data; + bt_dev_info(hdev, "CSR: Setting up dongle with HCI ver=%u rev=%04x; LMP ver=%u subver=%04x; manufacturer=%u", + le16_to_cpu(rp->hci_ver), le16_to_cpu(rp->hci_rev), + le16_to_cpu(rp->lmp_ver), le16_to_cpu(rp->lmp_subver), + le16_to_cpu(rp->manufacturer)); + /* Detect a wide host of Chinese controllers that aren't CSR. * * Known fake bcdDevices: 0x0100, 0x0134, 0x1915, 0x2520, 0x7558, 0x8891 @@ -2801,6 +2821,7 @@ enum { enum { BTMTK_WMT_INVALID, BTMTK_WMT_PATCH_UNDONE, + BTMTK_WMT_PATCH_PROGRESS, BTMTK_WMT_PATCH_DONE, BTMTK_WMT_ON_UNDONE, BTMTK_WMT_ON_DONE, @@ -2816,7 +2837,7 @@ struct btmtk_wmt_hdr { struct btmtk_hci_wmt_cmd { struct btmtk_wmt_hdr hdr; - u8 data[256]; + u8 data[]; } __packed; struct btmtk_hci_wmt_evt { @@ -2919,7 +2940,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb) * to generate the event. Otherwise, the WMT event cannot return from * the device successfully. */ - udelay(100); + udelay(500); usb_anchor_urb(urb, &data->ctrl_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); @@ -2995,7 +3016,7 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev, struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; u32 hlen, status = BTMTK_WMT_INVALID; struct btmtk_hci_wmt_evt *wmt_evt; - struct btmtk_hci_wmt_cmd wc; + struct btmtk_hci_wmt_cmd *wc; struct btmtk_wmt_hdr *hdr; int err; @@ -3004,24 +3025,42 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev, if (hlen > 255) return -EINVAL; - hdr = (struct btmtk_wmt_hdr *)&wc; + wc = kzalloc(hlen, GFP_KERNEL); + if (!wc) + return -ENOMEM; + + hdr = &wc->hdr; hdr->dir = 1; hdr->op = wmt_params->op; hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); hdr->flag = wmt_params->flag; - memcpy(wc.data, wmt_params->data, wmt_params->dlen); + memcpy(wc->data, wmt_params->data, wmt_params->dlen); set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); - err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc); + /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling, + * it needs constantly polling control pipe until the host received the + * WMT event, thus, we should require to specifically acquire PM counter + * on the USB to prevent the interface from entering auto suspended + * while WMT cmd/event in progress. + */ + err = usb_autopm_get_interface(data->intf); + if (err < 0) + goto err_free_wc; + + err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc); if (err < 0) { clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); - return err; + usb_autopm_put_interface(data->intf); + goto err_free_wc; } /* Submit control IN URB on demand to process the WMT event */ err = btusb_mtk_submit_wmt_recv_urb(hdev); + + usb_autopm_put_interface(data->intf); + if (err < 0) return err; @@ -3039,13 +3078,14 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev, if (err == -EINTR) { bt_dev_err(hdev, "Execution of wmt command interrupted"); clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); - return err; + goto err_free_wc; } if (err) { bt_dev_err(hdev, "Execution of wmt command timed out"); clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); - return -ETIMEDOUT; + err = -ETIMEDOUT; + goto err_free_wc; } /* Parse and handle the return WMT event */ @@ -3081,7 +3121,8 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev, err_free_skb: kfree_skb(data->evt_skb); data->evt_skb = NULL; - +err_free_wc: + kfree(wc); return err; } @@ -3223,9 +3264,9 @@ err_free_buf: return err; } -static int btusb_mtk_id_get(struct btusb_data *data, u32 *id) +static int btusb_mtk_id_get(struct btusb_data *data, u32 reg, u32 *id) { - return btusb_mtk_reg_read(data, 0x80000008, id); + return btusb_mtk_reg_read(data, reg, id); } static int btusb_mtk_setup(struct hci_dev *hdev) @@ -3243,7 +3284,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev) calltime = ktime_get(); - err = btusb_mtk_id_get(data, &dev_id); + err = btusb_mtk_id_get(data, 0x80000008, &dev_id); if (err < 0) { bt_dev_err(hdev, "Failed to get device id (%d)", err); return err; diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 3de0ce5386ac..b57e2e42a912 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -1490,8 +1490,10 @@ static const struct of_device_id bcm_bluetooth_of_match[] = { { .compatible = "brcm,bcm4345c5" }, { .compatible = "brcm,bcm4330-bt" }, { .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data }, + { .compatible = "brcm,bcm4349-bt", .data = &bcm43438_device_data }, { .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data }, { .compatible = "brcm,bcm4335a0" }, + { .compatible = "infineon,cyw55572-bt" }, { }, }; MODULE_DEVICE_TABLE(of, bcm_bluetooth_of_match); diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index b20a40fab83e..d5d2feef6c52 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -1214,7 +1214,11 @@ static struct platform_driver intel_driver = { int __init intel_init(void) { - platform_driver_register(&intel_driver); + int err; + + err = platform_driver_register(&intel_driver); + if (err) + return err; return hci_uart_register_proto(&intel_proto); } diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 8ff9f4482907..12218d388869 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -495,6 +495,11 @@ static int hci_uart_tty_open(struct tty_struct *tty) BT_ERR("Can't allocate control structure"); return -ENFILE; } + if (percpu_init_rwsem(&hu->proto_lock)) { + BT_ERR("Can't allocate semaphore structure"); + kfree(hu); + return -ENOMEM; + } tty->disc_data = hu; hu->tty = tty; @@ -507,8 +512,6 @@ static int hci_uart_tty_open(struct tty_struct *tty) INIT_WORK(&hu->init_ready, hci_uart_init_work); INIT_WORK(&hu->write_work, hci_uart_write_work); - percpu_init_rwsem(&hu->proto_lock); - /* Flush any pending characters in the driver */ tty_driver_flush_buffer(tty); diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index dc7ee5dd2eec..eea18aed17f8 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -689,9 +689,9 @@ static int qca_close(struct hci_uart *hu) skb_queue_purge(&qca->tx_wait_q); skb_queue_purge(&qca->txq); skb_queue_purge(&qca->rx_memdump_q); - del_timer(&qca->tx_idle_timer); - del_timer(&qca->wake_retrans_timer); destroy_workqueue(qca->workqueue); + del_timer_sync(&qca->tx_idle_timer); + del_timer_sync(&qca->wake_retrans_timer); qca->hu = NULL; kfree_skb(qca->rx_skb); diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index e9a44ab3812d..f2e2e553d4de 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -301,11 +301,12 @@ int hci_uart_register_device(struct hci_uart *hu, serdev_device_set_client_ops(hu->serdev, &hci_serdev_client_ops); + if (percpu_init_rwsem(&hu->proto_lock)) + return -ENOMEM; + err = serdev_device_open(hu->serdev); if (err) - return err; - - percpu_init_rwsem(&hu->proto_lock); + goto err_rwsem; err = p->open(hu); if (err) @@ -375,6 +376,8 @@ err_alloc: p->close(hu); err_open: serdev_device_close(hu->serdev); +err_rwsem: + percpu_free_rwsem(&hu->proto_lock); return err; } EXPORT_SYMBOL_GPL(hci_uart_register_device); @@ -396,5 +399,6 @@ void hci_uart_unregister_device(struct hci_uart *hu) clear_bit(HCI_UART_PROTO_READY, &hu->flags); serdev_device_close(hu->serdev); } + percpu_free_rwsem(&hu->proto_lock); } EXPORT_SYMBOL_GPL(hci_uart_unregister_device); diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c index 378f5d62a991..e7eaa8784fee 100644 --- a/drivers/bus/hisi_lpc.c +++ b/drivers/bus/hisi_lpc.c @@ -503,13 +503,13 @@ static int hisi_lpc_acpi_probe(struct device *hostdev) { struct acpi_device *adev = ACPI_COMPANION(hostdev); struct acpi_device *child; + struct platform_device *pdev; int ret; /* Only consider the children of the host */ list_for_each_entry(child, &adev->children, node) { const char *hid = acpi_device_hid(child); const struct hisi_lpc_acpi_cell *cell; - struct platform_device *pdev; const struct resource *res; bool found = false; int num_res; @@ -571,22 +571,24 @@ static int hisi_lpc_acpi_probe(struct device *hostdev) ret = platform_device_add_resources(pdev, res, num_res); if (ret) - goto fail; + goto fail_put_device; ret = platform_device_add_data(pdev, cell->pdata, cell->pdata_size); if (ret) - goto fail; + goto fail_put_device; ret = platform_device_add(pdev); if (ret) - goto fail; + goto fail_put_device; acpi_device_set_enumerated(child); } return 0; +fail_put_device: + platform_device_put(pdev); fail: hisi_lpc_acpi_remove(hostdev); return ret; diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 1bb00a959c67..f8c29b888e6b 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -224,6 +224,8 @@ static struct sunxi_rsb_device *sunxi_rsb_device_create(struct sunxi_rsb *rsb, dev_dbg(&rdev->dev, "device %s registered\n", dev_name(&rdev->dev)); + return rdev; + err_device_add: put_device(&rdev->dev); @@ -266,6 +268,9 @@ EXPORT_SYMBOL_GPL(sunxi_rsb_driver_register); /* common code that starts a transfer */ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb) { + u32 int_mask, status; + bool timeout; + if (readl(rsb->regs + RSB_CTRL) & RSB_CTRL_START_TRANS) { dev_dbg(rsb->dev, "RSB transfer still in progress\n"); return -EBUSY; @@ -273,13 +278,23 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb) reinit_completion(&rsb->complete); - writel(RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER, - rsb->regs + RSB_INTE); + int_mask = RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER; + writel(int_mask, rsb->regs + RSB_INTE); writel(RSB_CTRL_START_TRANS | RSB_CTRL_GLOBAL_INT_ENB, rsb->regs + RSB_CTRL); - if (!wait_for_completion_io_timeout(&rsb->complete, - msecs_to_jiffies(100))) { + if (irqs_disabled()) { + timeout = readl_poll_timeout_atomic(rsb->regs + RSB_INTS, + status, (status & int_mask), + 10, 100000); + writel(status, rsb->regs + RSB_INTS); + } else { + timeout = !wait_for_completion_io_timeout(&rsb->complete, + msecs_to_jiffies(100)); + status = rsb->status; + } + + if (timeout) { dev_dbg(rsb->dev, "RSB timeout\n"); /* abort the transfer */ @@ -291,18 +306,18 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb) return -ETIMEDOUT; } - if (rsb->status & RSB_INTS_LOAD_BSY) { + if (status & RSB_INTS_LOAD_BSY) { dev_dbg(rsb->dev, "RSB busy\n"); return -EBUSY; } - if (rsb->status & RSB_INTS_TRANS_ERR) { - if (rsb->status & RSB_INTS_TRANS_ERR_ACK) { + if (status & RSB_INTS_TRANS_ERR) { + if (status & RSB_INTS_TRANS_ERR_ACK) { dev_dbg(rsb->dev, "RSB slave nack\n"); return -EINVAL; } - if (rsb->status & RSB_INTS_TRANS_ERR_DATA) { + if (status & RSB_INTS_TRANS_ERR_DATA) { dev_dbg(rsb->dev, "RSB transfer data error\n"); return -EIO; } diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 18f0650c5d40..4ee20be76508 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -3130,13 +3130,27 @@ static int sysc_check_disabled_devices(struct sysc *ddata) */ static int sysc_check_active_timer(struct sysc *ddata) { + int error; + if (ddata->cap->type != TI_SYSC_OMAP2_TIMER && ddata->cap->type != TI_SYSC_OMAP4_TIMER) return 0; + /* + * Quirk for omap3 beagleboard revision A to B4 to use gpt12. + * Revision C and later are fixed with commit 23885389dbbb ("ARM: + * dts: Fix timer regression for beagleboard revision c"). This all + * can be dropped if we stop supporting old beagleboard revisions + * A to B4 at some point. + */ + if (sysc_soc->soc == SOC_3430) + error = -ENXIO; + else + error = -EBUSY; + if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) && (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)) - return -ENXIO; + return error; return 0; } @@ -3277,7 +3291,9 @@ static int sysc_remove(struct platform_device *pdev) struct sysc *ddata = platform_get_drvdata(pdev); int error; - cancel_delayed_work_sync(&ddata->idle_work); + /* Device can still be enabled, see deferred idle quirk in probe */ + if (cancel_delayed_work_sync(&ddata->idle_work)) + ti_sysc_idle(&ddata->idle_work.work); error = pm_runtime_get_sync(ddata->dev); if (error < 0) { diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index d229a2d0c017..b4e65d1ede26 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -471,28 +471,41 @@ config ADI and SSM (Silicon Secured Memory). Intended consumers of this driver include crash and makedumpfile. -endmenu - config RANDOM_TRUST_CPU - bool "Trust the CPU manufacturer to initialize Linux's CRNG" + bool "Initialize RNG using CPU RNG instructions" + default y depends on ARCH_RANDOM - default n help - Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or - RDRAND, IBM for the S390 and Power PC architectures) is trustworthy - for the purposes of initializing Linux's CRNG. Since this is not - something that can be independently audited, this amounts to trusting - that CPU manufacturer (perhaps with the insistence or mandate - of a Nation State's intelligence or law enforcement agencies) - has not installed a hidden back door to compromise the CPU's - random number generation facilities. This can also be configured - at boot with "random.trust_cpu=on/off". + Initialize the RNG using random numbers supplied by the CPU's + RNG instructions (e.g. RDRAND), if supported and available. These + random numbers are never used directly, but are rather hashed into + the main input pool, and this happens regardless of whether or not + this option is enabled. Instead, this option controls whether the + they are credited and hence can initialize the RNG. Additionally, + other sources of randomness are always used, regardless of this + setting. Enabling this implies trusting that the CPU can supply high + quality and non-backdoored random numbers. + + Say Y here unless you have reason to mistrust your CPU or believe + its RNG facilities may be faulty. This may also be configured at + boot time with "random.trust_cpu=on/off". config RANDOM_TRUST_BOOTLOADER - bool "Trust the bootloader to initialize Linux's CRNG" + bool "Initialize RNG using bootloader-supplied seed" + default y help - Some bootloaders can provide entropy to increase the kernel's initial - device randomness. Say Y here to assume the entropy provided by the - booloader is trustworthy so it will be added to the kernel's entropy - pool. Otherwise, say N here so it will be regarded as device input that - only mixes the entropy pool. + Initialize the RNG using a seed supplied by the bootloader or boot + environment (e.g. EFI or a bootloader-generated device tree). This + seed is not used directly, but is rather hashed into the main input + pool, and this happens regardless of whether or not this option is + enabled. Instead, this option controls whether the seed is credited + and hence can initialize the RNG. Additionally, other sources of + randomness are always used, regardless of this setting. Enabling + this implies trusting that the bootloader can supply high quality and + non-backdoored seeds. + + Say Y here unless you have reason to mistrust your bootloader or + believe its RNG facilities may be faulty. This may also be configured + at boot time with "random.trust_bootloader=on/off". + +endmenu diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 8c1c47dd9f46..5749998feaa4 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c index 61c844baf26e..9b182e5bfa87 100644 --- a/drivers/char/hw_random/imx-rngc.c +++ b/drivers/char/hw_random/imx-rngc.c @@ -272,13 +272,6 @@ static int imx_rngc_probe(struct platform_device *pdev) goto err; } - ret = devm_request_irq(&pdev->dev, - irq, imx_rngc_irq, 0, pdev->name, (void *)rngc); - if (ret) { - dev_err(rngc->dev, "Can't get interrupt working.\n"); - goto err; - } - init_completion(&rngc->rng_op_done); rngc->rng.name = pdev->name; @@ -292,6 +285,13 @@ static int imx_rngc_probe(struct platform_device *pdev) imx_rngc_irq_mask_clear(rngc); + ret = devm_request_irq(&pdev->dev, + irq, imx_rngc_irq, 0, pdev->name, (void *)rngc); + if (ret) { + dev_err(rngc->dev, "Can't get interrupt working.\n"); + return ret; + } + if (self_test) { ret = imx_rngc_self_test(rngc); if (ret) { diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c index e0d77fa048fb..f06e4f95114f 100644 --- a/drivers/char/hw_random/omap3-rom-rng.c +++ b/drivers/char/hw_random/omap3-rom-rng.c @@ -92,7 +92,7 @@ static int __maybe_unused omap_rom_rng_runtime_resume(struct device *dev) r = ddata->rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT); if (r != 0) { - clk_disable(ddata->clk); + clk_disable_unprepare(ddata->clk); dev_err(dev, "HW init failed: %d\n", r); return -EIO; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 8f147274f826..05e7339752ac 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -11,8 +11,8 @@ * Copyright 2002 MontaVista Software Inc. */ -#define pr_fmt(fmt) "%s" fmt, "IPMI message handler: " -#define dev_fmt pr_fmt +#define pr_fmt(fmt) "IPMI message handler: " fmt +#define dev_fmt(fmt) pr_fmt(fmt) #include #include diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 3de679723648..477139749513 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -840,6 +840,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, break; case SSIF_GETTING_EVENTS: + if (!msg) { + /* Should never happen, but just in case. */ + dev_warn(&ssif_info->client->dev, + "No message set while getting events\n"); + ipmi_ssif_unlock_cond(ssif_info, flags); + break; + } + if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) { /* Error getting event, probably done. */ msg->done(msg); @@ -864,6 +872,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, break; case SSIF_GETTING_MESSAGES: + if (!msg) { + /* Should never happen, but just in case. */ + dev_warn(&ssif_info->client->dev, + "No message set while getting messages\n"); + ipmi_ssif_unlock_cond(ssif_info, flags); + break; + } + if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) { /* Error getting event, probably done. */ msg->done(msg); @@ -887,6 +903,13 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, deliver_recv_msg(ssif_info, msg); } break; + + default: + /* Should never happen, but just in case. */ + dev_warn(&ssif_info->client->dev, + "Invalid state in message done handling: %d\n", + ssif_info->ssif_state); + ipmi_ssif_unlock_cond(ssif_info, flags); } flags = ipmi_ssif_lock_cond(ssif_info, &oflags); diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 94c2b556cf97..7d483c332348 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -981,8 +981,8 @@ static const struct memdev { #endif [5] = { "zero", 0666, &zero_fops, 0 }, [7] = { "full", 0666, &full_fops, 0 }, - [8] = { "random", 0666, &random_fops, 0 }, - [9] = { "urandom", 0666, &urandom_fops, 0 }, + [8] = { "random", 0666, &random_fops, FMODE_NOWAIT }, + [9] = { "urandom", 0666, &urandom_fops, FMODE_NOWAIT }, #ifdef CONFIG_PRINTK [11] = { "kmsg", 0644, &kmsg_fops, 0 }, #endif diff --git a/drivers/char/random.c b/drivers/char/random.c index 5f541c946559..56bcf9633751 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1,310 +1,26 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* - * random.c -- A strong random number generator - * - * Copyright (C) 2017 Jason A. Donenfeld . All - * Rights Reserved. - * + * Copyright (C) 2017-2022 Jason A. Donenfeld . All Rights Reserved. * Copyright Matt Mackall , 2003, 2004, 2005 + * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved. * - * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All - * rights reserved. + * This driver produces cryptographically secure pseudorandom data. It is divided + * into roughly six sections, each with a section header: * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, and the entire permission notice in its entirety, - * including the disclaimer of warranties. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. + * - Initialization and readiness waiting. + * - Fast key erasure RNG, the "crng". + * - Entropy accumulation and extraction routines. + * - Entropy collection routines. + * - Userspace reader/writer interfaces. + * - Sysctl interface. * - * ALTERNATIVELY, this product may be distributed under the terms of - * the GNU General Public License, in which case the provisions of the GPL are - * required INSTEAD OF the above restrictions. (This clause is - * necessary due to a potential bad interaction between the GPL and - * the restrictions contained in a BSD-style copyright.) - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF - * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT - * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH - * DAMAGE. - */ - -/* - * (now, with legal B.S. out of the way.....) - * - * This routine gathers environmental noise from device drivers, etc., - * and returns good random numbers, suitable for cryptographic use. - * Besides the obvious cryptographic uses, these numbers are also good - * for seeding TCP sequence numbers, and other places where it is - * desirable to have numbers which are not only random, but hard to - * predict by an attacker. - * - * Theory of operation - * =================== - * - * Computers are very predictable devices. Hence it is extremely hard - * to produce truly random numbers on a computer --- as opposed to - * pseudo-random numbers, which can easily generated by using a - * algorithm. Unfortunately, it is very easy for attackers to guess - * the sequence of pseudo-random number generators, and for some - * applications this is not acceptable. So instead, we must try to - * gather "environmental noise" from the computer's environment, which - * must be hard for outside attackers to observe, and use that to - * generate random numbers. In a Unix environment, this is best done - * from inside the kernel. - * - * Sources of randomness from the environment include inter-keyboard - * timings, inter-interrupt timings from some interrupts, and other - * events which are both (a) non-deterministic and (b) hard for an - * outside observer to measure. Randomness from these sources are - * added to an "entropy pool", which is mixed using a CRC-like function. - * This is not cryptographically strong, but it is adequate assuming - * the randomness is not chosen maliciously, and it is fast enough that - * the overhead of doing it on every interrupt is very reasonable. - * As random bytes are mixed into the entropy pool, the routines keep - * an *estimate* of how many bits of randomness have been stored into - * the random number generator's internal state. - * - * When random bytes are desired, they are obtained by taking the SHA - * hash of the contents of the "entropy pool". The SHA hash avoids - * exposing the internal state of the entropy pool. It is believed to - * be computationally infeasible to derive any useful information - * about the input of SHA from its output. Even if it is possible to - * analyze SHA in some clever way, as long as the amount of data - * returned from the generator is less than the inherent entropy in - * the pool, the output data is totally unpredictable. For this - * reason, the routine decreases its internal estimate of how many - * bits of "true randomness" are contained in the entropy pool as it - * outputs random numbers. - * - * If this estimate goes to zero, the routine can still generate - * random numbers; however, an attacker may (at least in theory) be - * able to infer the future output of the generator from prior - * outputs. This requires successful cryptanalysis of SHA, which is - * not believed to be feasible, but there is a remote possibility. - * Nonetheless, these numbers should be useful for the vast majority - * of purposes. - * - * Exported interfaces ---- output - * =============================== - * - * There are four exported interfaces; two for use within the kernel, - * and two or use from userspace. - * - * Exported interfaces ---- userspace output - * ----------------------------------------- - * - * The userspace interfaces are two character devices /dev/random and - * /dev/urandom. /dev/random is suitable for use when very high - * quality randomness is desired (for example, for key generation or - * one-time pads), as it will only return a maximum of the number of - * bits of randomness (as estimated by the random number generator) - * contained in the entropy pool. - * - * The /dev/urandom device does not have this limit, and will return - * as many bytes as are requested. As more and more random bytes are - * requested without giving time for the entropy pool to recharge, - * this will result in random numbers that are merely cryptographically - * strong. For many applications, however, this is acceptable. - * - * Exported interfaces ---- kernel output - * -------------------------------------- - * - * The primary kernel interface is - * - * void get_random_bytes(void *buf, int nbytes); - * - * This interface will return the requested number of random bytes, - * and place it in the requested buffer. This is equivalent to a - * read from /dev/urandom. - * - * For less critical applications, there are the functions: - * - * u32 get_random_u32() - * u64 get_random_u64() - * unsigned int get_random_int() - * unsigned long get_random_long() - * - * These are produced by a cryptographic RNG seeded from get_random_bytes, - * and so do not deplete the entropy pool as much. These are recommended - * for most in-kernel operations *if the result is going to be stored in - * the kernel*. - * - * Specifically, the get_random_int() family do not attempt to do - * "anti-backtracking". If you capture the state of the kernel (e.g. - * by snapshotting the VM), you can figure out previous get_random_int() - * return values. But if the value is stored in the kernel anyway, - * this is not a problem. - * - * It *is* safe to expose get_random_int() output to attackers (e.g. as - * network cookies); given outputs 1..n, it's not feasible to predict - * outputs 0 or n+1. The only concern is an attacker who breaks into - * the kernel later; the get_random_int() engine is not reseeded as - * often as the get_random_bytes() one. - * - * get_random_bytes() is needed for keys that need to stay secret after - * they are erased from the kernel. For example, any key that will - * be wrapped and stored encrypted. And session encryption keys: we'd - * like to know that after the session is closed and the keys erased, - * the plaintext is unrecoverable to someone who recorded the ciphertext. - * - * But for network ports/cookies, stack canaries, PRNG seeds, address - * space layout randomization, session *authentication* keys, or other - * applications where the sensitive data is stored in the kernel in - * plaintext for as long as it's sensitive, the get_random_int() family - * is just fine. - * - * Consider ASLR. We want to keep the address space secret from an - * outside attacker while the process is running, but once the address - * space is torn down, it's of no use to an attacker any more. And it's - * stored in kernel data structures as long as it's alive, so worrying - * about an attacker's ability to extrapolate it from the get_random_int() - * CRNG is silly. - * - * Even some cryptographic keys are safe to generate with get_random_int(). - * In particular, keys for SipHash are generally fine. Here, knowledge - * of the key authorizes you to do something to a kernel object (inject - * packets to a network connection, or flood a hash table), and the - * key is stored with the object being protected. Once it goes away, - * we no longer care if anyone knows the key. - * - * prandom_u32() - * ------------- - * - * For even weaker applications, see the pseudorandom generator - * prandom_u32(), prandom_max(), and prandom_bytes(). If the random - * numbers aren't security-critical at all, these are *far* cheaper. - * Useful for self-tests, random error simulation, randomized backoffs, - * and any other application where you trust that nobody is trying to - * maliciously mess with you by guessing the "random" numbers. - * - * Exported interfaces ---- input - * ============================== - * - * The current exported interfaces for gathering environmental noise - * from the devices are: - * - * void add_device_randomness(const void *buf, unsigned int size); - * void add_input_randomness(unsigned int type, unsigned int code, - * unsigned int value); - * void add_interrupt_randomness(int irq, int irq_flags); - * void add_disk_randomness(struct gendisk *disk); - * - * add_device_randomness() is for adding data to the random pool that - * is likely to differ between two devices (or possibly even per boot). - * This would be things like MAC addresses or serial numbers, or the - * read-out of the RTC. This does *not* add any actual entropy to the - * pool, but it initializes the pool to different values for devices - * that might otherwise be identical and have very little entropy - * available to them (particularly common in the embedded world). - * - * add_input_randomness() uses the input layer interrupt timing, as well as - * the event type information from the hardware. - * - * add_interrupt_randomness() uses the interrupt timing as random - * inputs to the entropy pool. Using the cycle counters and the irq source - * as inputs, it feeds the randomness roughly once a second. - * - * add_disk_randomness() uses what amounts to the seek time of block - * layer request events, on a per-disk_devt basis, as input to the - * entropy pool. Note that high-speed solid state drives with very low - * seek times do not make for good sources of entropy, as their seek - * times are usually fairly consistent. - * - * All of these routines try to estimate how many bits of randomness a - * particular randomness source. They do this by keeping track of the - * first and second order deltas of the event timings. - * - * Ensuring unpredictability at system startup - * ============================================ - * - * When any operating system starts up, it will go through a sequence - * of actions that are fairly predictable by an adversary, especially - * if the start-up does not involve interaction with a human operator. - * This reduces the actual number of bits of unpredictability in the - * entropy pool below the value in entropy_count. In order to - * counteract this effect, it helps to carry information in the - * entropy pool across shut-downs and start-ups. To do this, put the - * following lines an appropriate script which is run during the boot - * sequence: - * - * echo "Initializing random number generator..." - * random_seed=/var/run/random-seed - * # Carry a random seed from start-up to start-up - * # Load and then save the whole entropy pool - * if [ -f $random_seed ]; then - * cat $random_seed >/dev/urandom - * else - * touch $random_seed - * fi - * chmod 600 $random_seed - * dd if=/dev/urandom of=$random_seed count=1 bs=512 - * - * and the following lines in an appropriate script which is run as - * the system is shutdown: - * - * # Carry a random seed from shut-down to start-up - * # Save the whole entropy pool - * echo "Saving random seed..." - * random_seed=/var/run/random-seed - * touch $random_seed - * chmod 600 $random_seed - * dd if=/dev/urandom of=$random_seed count=1 bs=512 - * - * For example, on most modern systems using the System V init - * scripts, such code fragments would be found in - * /etc/rc.d/init.d/random. On older Linux systems, the correct script - * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0. - * - * Effectively, these commands cause the contents of the entropy pool - * to be saved at shut-down time and reloaded into the entropy pool at - * start-up. (The 'dd' in the addition to the bootup script is to - * make sure that /etc/random-seed is different for every start-up, - * even if the system crashes without executing rc.0.) Even with - * complete knowledge of the start-up activities, predicting the state - * of the entropy pool requires knowledge of the previous history of - * the system. - * - * Configuring the /dev/random driver under Linux - * ============================================== - * - * The /dev/random driver under Linux uses minor numbers 8 and 9 of - * the /dev/mem major number (#1). So if your system does not have - * /dev/random and /dev/urandom created already, they can be created - * by using the commands: - * - * mknod /dev/random c 1 8 - * mknod /dev/urandom c 1 9 - * - * Acknowledgements: - * ================= - * - * Ideas for constructing this random number generator were derived - * from Pretty Good Privacy's random number generator, and from private - * discussions with Phil Karn. Colin Plumb provided a faster random - * number generator, which speed up the mixing function of the entropy - * pool, taken from PGPfone. Dale Worley has also contributed many - * useful ideas and suggestions to improve this driver. - * - * Any flaws in the design are solely my responsibility, and should - * not be attributed to the Phil, Colin, or any of authors of PGP. - * - * Further background information on this topic may be obtained from - * RFC 1750, "Randomness Recommendations for Security", by Donald - * Eastlake, Steve Crocker, and Jeff Schiller. + * The high level overview is that there is one input pool, into which + * various pieces of data are hashed. Prior to initialization, some of that + * data is then "credited" as having a certain number of bits of entropy. + * When enough bits of entropy are available, the hash is finalized and + * handed as a key to a stream cipher that expands it indefinitely for + * various consumers. This key is periodically refreshed as the various + * entropy collectors, described below, add data to the input pool. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -327,7 +43,6 @@ #include #include #include -#include #include #include #include @@ -335,804 +50,806 @@ #include #include #include -#include -#include - -#include #include +#include +#include +#include +#include +#include #include #include #include -#define CREATE_TRACE_POINTS -#include - -/* #define ADD_INTERRUPT_BENCH */ - -/* - * Configuration information - */ -#define INPUT_POOL_SHIFT 12 -#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5)) -#define OUTPUT_POOL_SHIFT 10 -#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5)) -#define EXTRACT_SIZE 10 - - -#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long)) - -/* - * To allow fractional bits to be tracked, the entropy_count field is - * denominated in units of 1/8th bits. - * - * 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in - * credit_entropy_bits() needs to be 64 bits wide. - */ -#define ENTROPY_SHIFT 3 -#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT) - -/* - * If the entropy count falls under this number of bits, then we - * should wake up processes which are selecting or polling on write - * access to /dev/random. - */ -static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS; - -/* - * Originally, we used a primitive polynomial of degree .poolwords - * over GF(2). The taps for various sizes are defined below. They - * were chosen to be evenly spaced except for the last tap, which is 1 - * to get the twisting happening as fast as possible. - * - * For the purposes of better mixing, we use the CRC-32 polynomial as - * well to make a (modified) twisted Generalized Feedback Shift - * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR - * generators. ACM Transactions on Modeling and Computer Simulation - * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted - * GFSR generators II. ACM Transactions on Modeling and Computer - * Simulation 4:254-266) - * - * Thanks to Colin Plumb for suggesting this. - * - * The mixing operation is much less sensitive than the output hash, - * where we use SHA-1. All that we want of mixing operation is that - * it be a good non-cryptographic hash; i.e. it not produce collisions - * when fed "random" data of the sort we expect to see. As long as - * the pool state differs for different inputs, we have preserved the - * input entropy and done a good job. The fact that an intelligent - * attacker can construct inputs that will produce controlled - * alterations to the pool's state is not important because we don't - * consider such inputs to contribute any randomness. The only - * property we need with respect to them is that the attacker can't - * increase his/her knowledge of the pool's state. Since all - * additions are reversible (knowing the final state and the input, - * you can reconstruct the initial state), if an attacker has any - * uncertainty about the initial state, he/she can only shuffle that - * uncertainty about, but never cause any collisions (which would - * decrease the uncertainty). - * - * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and - * Videau in their paper, "The Linux Pseudorandom Number Generator - * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their - * paper, they point out that we are not using a true Twisted GFSR, - * since Matsumoto & Kurita used a trinomial feedback polynomial (that - * is, with only three taps, instead of the six that we are using). - * As a result, the resulting polynomial is neither primitive nor - * irreducible, and hence does not have a maximal period over - * GF(2**32). They suggest a slight change to the generator - * polynomial which improves the resulting TGFSR polynomial to be - * irreducible, which we have made here. - */ -static const struct poolinfo { - int poolbitshift, poolwords, poolbytes, poolfracbits; -#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5) - int tap1, tap2, tap3, tap4, tap5; -} poolinfo_table[] = { - /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */ - /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */ - { S(128), 104, 76, 51, 25, 1 }, -}; - -/* - * Static global variables - */ -static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); -static struct fasync_struct *fasync; - -static DEFINE_SPINLOCK(random_ready_list_lock); -static LIST_HEAD(random_ready_list); - -struct crng_state { - __u32 state[16]; - unsigned long init_time; - spinlock_t lock; -}; - -static struct crng_state primary_crng = { - .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock), -}; - -/* - * crng_init = 0 --> Uninitialized - * 1 --> Initialized - * 2 --> Initialized from input_pool - * - * crng_init is protected by primary_crng->lock, and only increases - * its value (from 0->1->2). - */ -static int crng_init = 0; -static bool crng_need_final_init = false; -#define crng_ready() (likely(crng_init > 1)) -static int crng_init_cnt = 0; -static unsigned long crng_global_init_time = 0; -#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE) -static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]); -static void _crng_backtrack_protect(struct crng_state *crng, - __u8 tmp[CHACHA_BLOCK_SIZE], int used); -static void process_random_ready_list(void); -static void _get_random_bytes(void *buf, int nbytes); - -static struct ratelimit_state unseeded_warning = - RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); -static struct ratelimit_state urandom_warning = - RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); - -static int ratelimit_disable __read_mostly; - -module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); -MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); - -/********************************************************************** - * - * OS independent entropy store. Here are the functions which handle - * storing entropy in an entropy pool. - * - **********************************************************************/ - -struct entropy_store; -struct entropy_store { - /* read-only data: */ - const struct poolinfo *poolinfo; - __u32 *pool; - const char *name; - - /* read-write data: */ - spinlock_t lock; - unsigned short add_ptr; - unsigned short input_rotate; - int entropy_count; - unsigned int initialized:1; - unsigned int last_data_init:1; - __u8 last_data[EXTRACT_SIZE]; -}; - -static ssize_t extract_entropy(struct entropy_store *r, void *buf, - size_t nbytes, int min, int rsvd); -static ssize_t _extract_entropy(struct entropy_store *r, void *buf, - size_t nbytes, int fips); - -static void crng_reseed(struct crng_state *crng, struct entropy_store *r); -static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy; - -static struct entropy_store input_pool = { - .poolinfo = &poolinfo_table[0], - .name = "input", - .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), - .pool = input_pool_data -}; - -static __u32 const twist_table[8] = { - 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, - 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; - -/* - * This function adds bytes into the entropy "pool". It does not - * update the entropy estimate. The caller should call - * credit_entropy_bits if this is appropriate. - * - * The pool is stirred with a primitive polynomial of the appropriate - * degree, and then twisted. We twist by three bits at a time because - * it's cheap to do so and helps slightly in the expected case where - * the entropy is concentrated in the low-order bits. - */ -static void _mix_pool_bytes(struct entropy_store *r, const void *in, - int nbytes) -{ - unsigned long i, tap1, tap2, tap3, tap4, tap5; - int input_rotate; - int wordmask = r->poolinfo->poolwords - 1; - const char *bytes = in; - __u32 w; - - tap1 = r->poolinfo->tap1; - tap2 = r->poolinfo->tap2; - tap3 = r->poolinfo->tap3; - tap4 = r->poolinfo->tap4; - tap5 = r->poolinfo->tap5; - - input_rotate = r->input_rotate; - i = r->add_ptr; - - /* mix one byte at a time to simplify size handling and churn faster */ - while (nbytes--) { - w = rol32(*bytes++, input_rotate); - i = (i - 1) & wordmask; - - /* XOR in the various taps */ - w ^= r->pool[i]; - w ^= r->pool[(i + tap1) & wordmask]; - w ^= r->pool[(i + tap2) & wordmask]; - w ^= r->pool[(i + tap3) & wordmask]; - w ^= r->pool[(i + tap4) & wordmask]; - w ^= r->pool[(i + tap5) & wordmask]; - - /* Mix the result back in with a twist */ - r->pool[i] = (w >> 3) ^ twist_table[w & 7]; - - /* - * Normally, we add 7 bits of rotation to the pool. - * At the beginning of the pool, add an extra 7 bits - * rotation, so that successive passes spread the - * input bits across the pool evenly. - */ - input_rotate = (input_rotate + (i ? 7 : 14)) & 31; - } - - r->input_rotate = input_rotate; - r->add_ptr = i; -} - -static void __mix_pool_bytes(struct entropy_store *r, const void *in, - int nbytes) -{ - trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_); - _mix_pool_bytes(r, in, nbytes); -} - -static void mix_pool_bytes(struct entropy_store *r, const void *in, - int nbytes) -{ - unsigned long flags; - - trace_mix_pool_bytes(r->name, nbytes, _RET_IP_); - spin_lock_irqsave(&r->lock, flags); - _mix_pool_bytes(r, in, nbytes); - spin_unlock_irqrestore(&r->lock, flags); -} - -struct fast_pool { - __u32 pool[4]; - unsigned long last; - unsigned short reg_idx; - unsigned char count; -}; - -/* - * This is a fast mixing routine used by the interrupt randomness - * collector. It's hardcoded for an 128 bit pool and assumes that any - * locks that might be needed are taken by the caller. - */ -static void fast_mix(struct fast_pool *f) -{ - __u32 a = f->pool[0], b = f->pool[1]; - __u32 c = f->pool[2], d = f->pool[3]; - - a += b; c += d; - b = rol32(b, 6); d = rol32(d, 27); - d ^= a; b ^= c; - - a += b; c += d; - b = rol32(b, 16); d = rol32(d, 14); - d ^= a; b ^= c; - - a += b; c += d; - b = rol32(b, 6); d = rol32(d, 27); - d ^= a; b ^= c; - - a += b; c += d; - b = rol32(b, 16); d = rol32(d, 14); - d ^= a; b ^= c; - - f->pool[0] = a; f->pool[1] = b; - f->pool[2] = c; f->pool[3] = d; - f->count++; -} - -static void process_random_ready_list(void) -{ - unsigned long flags; - struct random_ready_callback *rdy, *tmp; - - spin_lock_irqsave(&random_ready_list_lock, flags); - list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) { - struct module *owner = rdy->owner; - - list_del_init(&rdy->list); - rdy->func(rdy); - module_put(owner); - } - spin_unlock_irqrestore(&random_ready_list_lock, flags); -} - -/* - * Credit (or debit) the entropy store with n bits of entropy. - * Use credit_entropy_bits_safe() if the value comes from userspace - * or otherwise should be checked for extreme values. - */ -static void credit_entropy_bits(struct entropy_store *r, int nbits) -{ - int entropy_count, orig, has_initialized = 0; - const int pool_size = r->poolinfo->poolfracbits; - int nfrac = nbits << ENTROPY_SHIFT; - - if (!nbits) - return; - -retry: - entropy_count = orig = READ_ONCE(r->entropy_count); - if (nfrac < 0) { - /* Debit */ - entropy_count += nfrac; - } else { - /* - * Credit: we have to account for the possibility of - * overwriting already present entropy. Even in the - * ideal case of pure Shannon entropy, new contributions - * approach the full value asymptotically: - * - * entropy <- entropy + (pool_size - entropy) * - * (1 - exp(-add_entropy/pool_size)) - * - * For add_entropy <= pool_size/2 then - * (1 - exp(-add_entropy/pool_size)) >= - * (add_entropy/pool_size)*0.7869... - * so we can approximate the exponential with - * 3/4*add_entropy/pool_size and still be on the - * safe side by adding at most pool_size/2 at a time. - * - * The use of pool_size-2 in the while statement is to - * prevent rounding artifacts from making the loop - * arbitrarily long; this limits the loop to log2(pool_size)*2 - * turns no matter how large nbits is. - */ - int pnfrac = nfrac; - const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2; - /* The +2 corresponds to the /4 in the denominator */ - - do { - unsigned int anfrac = min(pnfrac, pool_size/2); - unsigned int add = - ((pool_size - entropy_count)*anfrac*3) >> s; - - entropy_count += add; - pnfrac -= anfrac; - } while (unlikely(entropy_count < pool_size-2 && pnfrac)); - } - - if (WARN_ON(entropy_count < 0)) { - pr_warn("negative entropy/overflow: pool %s count %d\n", - r->name, entropy_count); - entropy_count = 0; - } else if (entropy_count > pool_size) - entropy_count = pool_size; - if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) - goto retry; - - if (has_initialized) { - r->initialized = 1; - kill_fasync(&fasync, SIGIO, POLL_IN); - } - - trace_credit_entropy_bits(r->name, nbits, - entropy_count >> ENTROPY_SHIFT, _RET_IP_); - - if (r == &input_pool) { - int entropy_bits = entropy_count >> ENTROPY_SHIFT; - - if (crng_init < 2) { - if (entropy_bits < 128) - return; - crng_reseed(&primary_crng, r); - entropy_bits = ENTROPY_BITS(r); - } - } -} - -static int credit_entropy_bits_safe(struct entropy_store *r, int nbits) -{ - const int nbits_max = r->poolinfo->poolwords * 32; - - if (nbits < 0) - return -EINVAL; - - /* Cap the value to avoid overflows */ - nbits = min(nbits, nbits_max); - - credit_entropy_bits(r, nbits); - return 0; -} +// GKI: Keep this header to retain the original CRC that previously used the +// random.h tracepoints. +#include /********************************************************************* * - * CRNG using CHACHA20 + * Initialization and readiness waiting. + * + * Much of the RNG infrastructure is devoted to various dependencies + * being able to wait until the RNG has collected enough entropy and + * is ready for safe consumption. * *********************************************************************/ -#define CRNG_RESEED_INTERVAL (300*HZ) - -static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); - -#ifdef CONFIG_NUMA /* - * Hack to deal with crazy userspace progams when they are all trying - * to access /dev/urandom in parallel. The programs are almost - * certainly doing something terribly wrong, but we'll work around - * their brain damage. + * crng_init is protected by base_crng->lock, and only increases + * its value (from empty->early->ready). */ -static struct crng_state **crng_node_pool __read_mostly; +static enum { + CRNG_EMPTY = 0, /* Little to no entropy collected */ + CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */ + CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */ +} crng_init __read_mostly = CRNG_EMPTY; +#define crng_ready() (likely(crng_init >= CRNG_READY)) +/* Various types of waiters for crng_init->CRNG_READY transition. */ +static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); +static struct fasync_struct *fasync; +static DEFINE_SPINLOCK(random_ready_chain_lock); +static RAW_NOTIFIER_HEAD(random_ready_chain); + +/* Control how we warn userspace. */ +static struct ratelimit_state urandom_warning = + RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE); +static int ratelimit_disable __read_mostly = + IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM); +module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); +MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); + +/* + * Returns whether or not the input pool has been seeded and thus guaranteed + * to supply cryptographically secure random numbers. This applies to: the + * /dev/urandom device, the get_random_bytes function, and the get_random_{u32, + * ,u64,int,long} family of functions. + * + * Returns: true if the input pool has been seeded. + * false if the input pool has not been seeded. + */ +bool rng_is_initialized(void) +{ + return crng_ready(); +} +EXPORT_SYMBOL(rng_is_initialized); + +/* Used by wait_for_random_bytes(), and considered an entropy collector, below. */ +static void try_to_generate_entropy(void); + +/* + * Wait for the input pool to be seeded and thus guaranteed to supply + * cryptographically secure random numbers. This applies to: the /dev/urandom + * device, the get_random_bytes function, and the get_random_{u32,u64,int,long} + * family of functions. Using any of these functions without first calling + * this function forfeits the guarantee of security. + * + * Returns: 0 if the input pool has been seeded. + * -ERESTARTSYS if the function was interrupted by a signal. + */ +int wait_for_random_bytes(void) +{ + while (!crng_ready()) { + int ret; + + try_to_generate_entropy(); + ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ); + if (ret) + return ret > 0 ? 0 : ret; + } + return 0; +} +EXPORT_SYMBOL(wait_for_random_bytes); + +/* + * Add a callback function that will be invoked when the input + * pool is initialised. + * + * returns: 0 if callback is successfully added + * -EALREADY if pool is already initialised (callback not called) + */ +int __cold register_random_ready_notifier(struct notifier_block *nb) +{ + unsigned long flags; + int ret = -EALREADY; + + if (crng_ready()) + return ret; + + spin_lock_irqsave(&random_ready_chain_lock, flags); + if (!crng_ready()) + ret = raw_notifier_chain_register(&random_ready_chain, nb); + spin_unlock_irqrestore(&random_ready_chain_lock, flags); + return ret; +} + +/* + * Delete a previously registered readiness callback function. + */ +int __cold unregister_random_ready_notifier(struct notifier_block *nb) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&random_ready_chain_lock, flags); + ret = raw_notifier_chain_unregister(&random_ready_chain, nb); + spin_unlock_irqrestore(&random_ready_chain_lock, flags); + return ret; +} + +static void process_oldschool_random_ready_list(void); +static void __cold process_random_ready_list(void) +{ + unsigned long flags; + + spin_lock_irqsave(&random_ready_chain_lock, flags); + raw_notifier_call_chain(&random_ready_chain, 0, NULL); + spin_unlock_irqrestore(&random_ready_chain_lock, flags); + + process_oldschool_random_ready_list(); +} + +#define warn_unseeded_randomness() \ + if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \ + printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \ + __func__, (void *)_RET_IP_, crng_init) + + +/********************************************************************* + * + * Fast key erasure RNG, the "crng". + * + * These functions expand entropy from the entropy extractor into + * long streams for external consumption using the "fast key erasure" + * RNG described at . + * + * There are a few exported interfaces for use by other drivers: + * + * void get_random_bytes(void *buf, size_t len) + * u32 get_random_u32() + * u64 get_random_u64() + * unsigned int get_random_int() + * unsigned long get_random_long() + * + * These interfaces will return the requested number of random bytes + * into the given buffer or as a return value. This is equivalent to + * a read from /dev/urandom. The u32, u64, int, and long family of + * functions may be higher performance for one-off random integers, + * because they do a bit of buffering and do not invoke reseeding + * until the buffer is emptied. + * + *********************************************************************/ + +enum { + CRNG_RESEED_START_INTERVAL = HZ, + CRNG_RESEED_INTERVAL = 60 * HZ +}; + +static struct { + u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long)); + unsigned long birth; + unsigned long generation; + spinlock_t lock; +} base_crng = { + .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock) +}; + +struct crng { + u8 key[CHACHA_KEY_SIZE]; + unsigned long generation; + local_lock_t lock; +}; + +static DEFINE_PER_CPU(struct crng, crngs) = { + .generation = ULONG_MAX, + .lock = INIT_LOCAL_LOCK(crngs.lock), +}; + +/* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */ +static void extract_entropy(void *buf, size_t len); + +/* This extracts a new crng key from the input pool. */ +static void crng_reseed(void) +{ + unsigned long flags; + unsigned long next_gen; + u8 key[CHACHA_KEY_SIZE]; + + extract_entropy(key, sizeof(key)); + + /* + * We copy the new key into the base_crng, overwriting the old one, + * and update the generation counter. We avoid hitting ULONG_MAX, + * because the per-cpu crngs are initialized to ULONG_MAX, so this + * forces new CPUs that come online to always initialize. + */ + spin_lock_irqsave(&base_crng.lock, flags); + memcpy(base_crng.key, key, sizeof(base_crng.key)); + next_gen = base_crng.generation + 1; + if (next_gen == ULONG_MAX) + ++next_gen; + WRITE_ONCE(base_crng.generation, next_gen); + WRITE_ONCE(base_crng.birth, jiffies); + if (!crng_ready()) + crng_init = CRNG_READY; + spin_unlock_irqrestore(&base_crng.lock, flags); + memzero_explicit(key, sizeof(key)); +} + +/* + * This generates a ChaCha block using the provided key, and then + * immediately overwites that key with half the block. It returns + * the resultant ChaCha state to the user, along with the second + * half of the block containing 32 bytes of random data that may + * be used; random_data_len may not be greater than 32. + * + * The returned ChaCha state contains within it a copy of the old + * key value, at index 4, so the state should always be zeroed out + * immediately after using in order to maintain forward secrecy. + * If the state cannot be erased in a timely manner, then it is + * safer to set the random_data parameter to &chacha_state[4] so + * that this function overwrites it before returning. + */ +static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], + u32 chacha_state[CHACHA_STATE_WORDS], + u8 *random_data, size_t random_data_len) +{ + u8 first_block[CHACHA_BLOCK_SIZE]; + + BUG_ON(random_data_len > 32); + + chacha_init_consts(chacha_state); + memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE); + memset(&chacha_state[12], 0, sizeof(u32) * 4); + chacha20_block(chacha_state, first_block); + + memcpy(key, first_block, CHACHA_KEY_SIZE); + memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len); + memzero_explicit(first_block, sizeof(first_block)); +} + +/* + * Return whether the crng seed is considered to be sufficiently old + * that a reseeding is needed. This happens if the last reseeding + * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval + * proportional to the uptime. + */ +static bool crng_has_old_seed(void) +{ + static bool early_boot = true; + unsigned long interval = CRNG_RESEED_INTERVAL; + + if (unlikely(READ_ONCE(early_boot))) { + time64_t uptime = ktime_get_seconds(); + if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2) + WRITE_ONCE(early_boot, false); + else + interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL, + (unsigned int)uptime / 2 * HZ); + } + return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval); +} + +/* + * This function returns a ChaCha state that you may use for generating + * random data. It also returns up to 32 bytes on its own of random data + * that may be used; random_data_len may not be greater than 32. + */ +static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], + u8 *random_data, size_t random_data_len) +{ + unsigned long flags; + struct crng *crng; + + BUG_ON(random_data_len > 32); + + /* + * For the fast path, we check whether we're ready, unlocked first, and + * then re-check once locked later. In the case where we're really not + * ready, we do fast key erasure with the base_crng directly, extracting + * when crng_init is CRNG_EMPTY. + */ + if (!crng_ready()) { + bool ready; + + spin_lock_irqsave(&base_crng.lock, flags); + ready = crng_ready(); + if (!ready) { + if (crng_init == CRNG_EMPTY) + extract_entropy(base_crng.key, sizeof(base_crng.key)); + crng_fast_key_erasure(base_crng.key, chacha_state, + random_data, random_data_len); + } + spin_unlock_irqrestore(&base_crng.lock, flags); + if (!ready) + return; + } + + /* + * If the base_crng is old enough, we reseed, which in turn bumps the + * generation counter that we check below. + */ + if (unlikely(crng_has_old_seed())) + crng_reseed(); + + local_lock_irqsave(&crngs.lock, flags); + crng = raw_cpu_ptr(&crngs); + + /* + * If our per-cpu crng is older than the base_crng, then it means + * somebody reseeded the base_crng. In that case, we do fast key + * erasure on the base_crng, and use its output as the new key + * for our per-cpu crng. This brings us up to date with base_crng. + */ + if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) { + spin_lock(&base_crng.lock); + crng_fast_key_erasure(base_crng.key, chacha_state, + crng->key, sizeof(crng->key)); + crng->generation = base_crng.generation; + spin_unlock(&base_crng.lock); + } + + /* + * Finally, when we've made it this far, our per-cpu crng has an up + * to date key, and we can do fast key erasure with it to produce + * some random data and a ChaCha state for the caller. All other + * branches of this function are "unlikely", so most of the time we + * should wind up here immediately. + */ + crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len); + local_unlock_irqrestore(&crngs.lock, flags); +} + +static void _get_random_bytes(void *buf, size_t len) +{ + u32 chacha_state[CHACHA_STATE_WORDS]; + u8 tmp[CHACHA_BLOCK_SIZE]; + size_t first_block_len; + + if (!len) + return; + + first_block_len = min_t(size_t, 32, len); + crng_make_state(chacha_state, buf, first_block_len); + len -= first_block_len; + buf += first_block_len; + + while (len) { + if (len < CHACHA_BLOCK_SIZE) { + chacha20_block(chacha_state, tmp); + memcpy(buf, tmp, len); + memzero_explicit(tmp, sizeof(tmp)); + break; + } + + chacha20_block(chacha_state, buf); + if (unlikely(chacha_state[12] == 0)) + ++chacha_state[13]; + len -= CHACHA_BLOCK_SIZE; + buf += CHACHA_BLOCK_SIZE; + } + + memzero_explicit(chacha_state, sizeof(chacha_state)); +} + +/* + * This function is the exported kernel interface. It returns some + * number of good random numbers, suitable for key generation, seeding + * TCP sequence numbers, etc. It does not rely on the hardware random + * number generator. For random bytes direct from the hardware RNG + * (when available), use get_random_bytes_arch(). In order to ensure + * that the randomness provided by this function is okay, the function + * wait_for_random_bytes() should be called and return 0 at least once + * at any point prior. + */ +void get_random_bytes(void *buf, int len) +{ + warn_unseeded_randomness(); + _get_random_bytes(buf, len); +} +EXPORT_SYMBOL(get_random_bytes); + +static ssize_t get_random_bytes_user(struct iov_iter *iter) +{ + u32 chacha_state[CHACHA_STATE_WORDS]; + u8 block[CHACHA_BLOCK_SIZE]; + size_t ret = 0, copied; + + if (unlikely(!iov_iter_count(iter))) + return 0; + + /* + * Immediately overwrite the ChaCha key at index 4 with random + * bytes, in case userspace causes copy_to_iter() below to sleep + * forever, so that we still retain forward secrecy in that case. + */ + crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE); + /* + * However, if we're doing a read of len <= 32, we don't need to + * use chacha_state after, so we can simply return those bytes to + * the user directly. + */ + if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) { + ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter); + goto out_zero_chacha; + } + + for (;;) { + chacha20_block(chacha_state, block); + if (unlikely(chacha_state[12] == 0)) + ++chacha_state[13]; + + copied = copy_to_iter(block, sizeof(block), iter); + ret += copied; + if (!iov_iter_count(iter) || copied != sizeof(block)) + break; + + BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0); + if (ret % PAGE_SIZE == 0) { + if (signal_pending(current)) + break; + cond_resched(); + } + } + + memzero_explicit(block, sizeof(block)); +out_zero_chacha: + memzero_explicit(chacha_state, sizeof(chacha_state)); + return ret ? ret : -EFAULT; +} + +/* + * Batched entropy returns random integers. The quality of the random + * number is good as /dev/urandom. In order to ensure that the randomness + * provided by this function is okay, the function wait_for_random_bytes() + * should be called and return 0 at least once at any point prior. + */ + +#define DEFINE_BATCHED_ENTROPY(type) \ +struct batch_ ##type { \ + /* \ + * We make this 1.5x a ChaCha block, so that we get the \ + * remaining 32 bytes from fast key erasure, plus one full \ + * block from the detached ChaCha state. We can increase \ + * the size of this later if needed so long as we keep the \ + * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \ + */ \ + type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \ + local_lock_t lock; \ + unsigned long generation; \ + unsigned int position; \ +}; \ + \ +static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \ + .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \ + .position = UINT_MAX \ +}; \ + \ +type get_random_ ##type(void) \ +{ \ + type ret; \ + unsigned long flags; \ + struct batch_ ##type *batch; \ + unsigned long next_gen; \ + \ + warn_unseeded_randomness(); \ + \ + if (!crng_ready()) { \ + _get_random_bytes(&ret, sizeof(ret)); \ + return ret; \ + } \ + \ + local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \ + batch = raw_cpu_ptr(&batched_entropy_##type); \ + \ + next_gen = READ_ONCE(base_crng.generation); \ + if (batch->position >= ARRAY_SIZE(batch->entropy) || \ + next_gen != batch->generation) { \ + _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \ + batch->position = 0; \ + batch->generation = next_gen; \ + } \ + \ + ret = batch->entropy[batch->position]; \ + batch->entropy[batch->position] = 0; \ + ++batch->position; \ + local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \ + return ret; \ +} \ +EXPORT_SYMBOL(get_random_ ##type); + +DEFINE_BATCHED_ENTROPY(u64) +DEFINE_BATCHED_ENTROPY(u32) + +#ifdef CONFIG_SMP +/* + * This function is called when the CPU is coming up, with entry + * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP. + */ +int __cold random_prepare_cpu(unsigned int cpu) +{ + /* + * When the cpu comes back online, immediately invalidate both + * the per-cpu crng and all batches, so that we serve fresh + * randomness. + */ + per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX; + per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX; + per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX; + return 0; +} #endif -static void invalidate_batched_entropy(void); -static void numa_crng_init(void); +/* + * This function will use the architecture-specific hardware random + * number generator if it is available. It is not recommended for + * use. Use get_random_bytes() instead. It returns the number of + * bytes filled in. + */ +int __must_check get_random_bytes_arch(void *buf, int len) +{ + size_t left = len; + u8 *p = buf; + + while (left) { + unsigned long v; + size_t block_len = min_t(size_t, left, sizeof(unsigned long)); + + if (!arch_get_random_long(&v)) + break; + + memcpy(p, &v, block_len); + p += block_len; + left -= block_len; + } + + return len - left; +} +EXPORT_SYMBOL(get_random_bytes_arch); + + +/********************************************************************** + * + * Entropy accumulation and extraction routines. + * + * Callers may add entropy via: + * + * static void mix_pool_bytes(const void *buf, size_t len) + * + * After which, if added entropy should be credited: + * + * static void credit_init_bits(size_t bits) + * + * Finally, extract entropy via: + * + * static void extract_entropy(void *buf, size_t len) + * + **********************************************************************/ + +enum { + POOL_BITS = BLAKE2S_HASH_SIZE * 8, + POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */ + POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */ +}; + +static struct { + struct blake2s_state hash; + spinlock_t lock; + unsigned int init_bits; +} input_pool = { + .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE), + BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4, + BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 }, + .hash.outlen = BLAKE2S_HASH_SIZE, + .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), +}; + +static void _mix_pool_bytes(const void *buf, size_t len) +{ + blake2s_update(&input_pool.hash, buf, len); +} + +/* + * This function adds bytes into the input pool. It does not + * update the initialization bit counter; the caller should call + * credit_init_bits if this is appropriate. + */ +static void mix_pool_bytes(const void *buf, size_t len) +{ + unsigned long flags; + + spin_lock_irqsave(&input_pool.lock, flags); + _mix_pool_bytes(buf, len); + spin_unlock_irqrestore(&input_pool.lock, flags); +} + +/* + * This is an HKDF-like construction for using the hashed collected entropy + * as a PRF key, that's then expanded block-by-block. + */ +static void extract_entropy(void *buf, size_t len) +{ + unsigned long flags; + u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE]; + struct { + unsigned long rdseed[32 / sizeof(long)]; + size_t counter; + } block; + size_t i; + + for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) { + if (!arch_get_random_seed_long(&block.rdseed[i]) && + !arch_get_random_long(&block.rdseed[i])) + block.rdseed[i] = random_get_entropy(); + } + + spin_lock_irqsave(&input_pool.lock, flags); + + /* seed = HASHPRF(last_key, entropy_input) */ + blake2s_final(&input_pool.hash, seed); + + /* next_key = HASHPRF(seed, RDSEED || 0) */ + block.counter = 0; + blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed)); + blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key)); + + spin_unlock_irqrestore(&input_pool.lock, flags); + memzero_explicit(next_key, sizeof(next_key)); + + while (len) { + i = min_t(size_t, len, BLAKE2S_HASH_SIZE); + /* output = HASHPRF(seed, RDSEED || ++counter) */ + ++block.counter; + blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed)); + len -= i; + buf += i; + } + + memzero_explicit(seed, sizeof(seed)); + memzero_explicit(&block, sizeof(block)); +} + +#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits) + +static void __cold _credit_init_bits(size_t bits) +{ + unsigned int new, orig, add; + unsigned long flags; + + if (!bits) + return; + + add = min_t(size_t, bits, POOL_BITS); + + do { + orig = READ_ONCE(input_pool.init_bits); + new = min_t(unsigned int, POOL_BITS, orig + add); + } while (cmpxchg(&input_pool.init_bits, orig, new) != orig); + + if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) { + crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */ + process_random_ready_list(); + wake_up_interruptible(&crng_init_wait); + kill_fasync(&fasync, SIGIO, POLL_IN); + pr_notice("crng init done\n"); + if (urandom_warning.missed) + pr_notice("%d urandom warning(s) missed due to ratelimiting\n", + urandom_warning.missed); + } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) { + spin_lock_irqsave(&base_crng.lock, flags); + /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */ + if (crng_init == CRNG_EMPTY) { + extract_entropy(base_crng.key, sizeof(base_crng.key)); + crng_init = CRNG_EARLY; + } + spin_unlock_irqrestore(&base_crng.lock, flags); + } +} + + +/********************************************************************** + * + * Entropy collection routines. + * + * The following exported functions are used for pushing entropy into + * the above entropy accumulation routines: + * + * void add_device_randomness(const void *buf, size_t len); + * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy); + * void add_bootloader_randomness(const void *buf, size_t len); + * void add_interrupt_randomness(int irq); + * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value); + * void add_disk_randomness(struct gendisk *disk); + * + * add_device_randomness() adds data to the input pool that + * is likely to differ between two devices (or possibly even per boot). + * This would be things like MAC addresses or serial numbers, or the + * read-out of the RTC. This does *not* credit any actual entropy to + * the pool, but it initializes the pool to different values for devices + * that might otherwise be identical and have very little entropy + * available to them (particularly common in the embedded world). + * + * add_hwgenerator_randomness() is for true hardware RNGs, and will credit + * entropy as specified by the caller. If the entropy pool is full it will + * block until more entropy is needed. + * + * add_bootloader_randomness() is called by bootloader drivers, such as EFI + * and device tree, and credits its input depending on whether or not the + * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set. + * + * add_interrupt_randomness() uses the interrupt timing as random + * inputs to the entropy pool. Using the cycle counters and the irq source + * as inputs, it feeds the input pool roughly once a second or after 64 + * interrupts, crediting 1 bit of entropy for whichever comes first. + * + * add_input_randomness() uses the input layer interrupt timing, as well + * as the event type information from the hardware. + * + * add_disk_randomness() uses what amounts to the seek time of block + * layer request events, on a per-disk_devt basis, as input to the + * entropy pool. Note that high-speed solid state drives with very low + * seek times do not make for good sources of entropy, as their seek + * times are usually fairly consistent. + * + * The last two routines try to estimate how many bits of entropy + * to credit. They do this by keeping track of the first and second + * order deltas of the event timings. + * + **********************************************************************/ static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); +static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER); static int __init parse_trust_cpu(char *arg) { return kstrtobool(arg, &trust_cpu); } +static int __init parse_trust_bootloader(char *arg) +{ + return kstrtobool(arg, &trust_bootloader); +} early_param("random.trust_cpu", parse_trust_cpu); +early_param("random.trust_bootloader", parse_trust_bootloader); -static bool crng_init_try_arch(struct crng_state *crng) +/* + * The first collection of entropy occurs at system boot while interrupts + * are still turned off. Here we push in latent entropy, RDSEED, a timestamp, + * utsname(), and the command line. Depending on the above configuration knob, + * RDSEED may be considered sufficient for initialization. Note that much + * earlier setup may already have pushed entropy into the input pool by the + * time we get here. + */ +int __init random_init(const char *command_line) { - int i; - bool arch_init = true; - unsigned long rv; + ktime_t now = ktime_get_real(); + unsigned int i, arch_bytes; + unsigned long entropy; - for (i = 4; i < 16; i++) { - if (!arch_get_random_seed_long(&rv) && - !arch_get_random_long(&rv)) { - rv = random_get_entropy(); - arch_init = false; - } - crng->state[i] ^= rv; - } - - return arch_init; -} - -static bool __init crng_init_try_arch_early(struct crng_state *crng) -{ - int i; - bool arch_init = true; - unsigned long rv; - - for (i = 4; i < 16; i++) { - if (!arch_get_random_seed_long_early(&rv) && - !arch_get_random_long_early(&rv)) { - rv = random_get_entropy(); - arch_init = false; - } - crng->state[i] ^= rv; - } - - return arch_init; -} - -static void __maybe_unused crng_initialize_secondary(struct crng_state *crng) -{ - chacha_init_consts(crng->state); - _get_random_bytes(&crng->state[4], sizeof(__u32) * 12); - crng_init_try_arch(crng); - crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; -} - -static void __init crng_initialize_primary(struct crng_state *crng) -{ - chacha_init_consts(crng->state); - _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0); - if (crng_init_try_arch_early(crng) && trust_cpu) { - invalidate_batched_entropy(); - numa_crng_init(); - crng_init = 2; - pr_notice("crng done (trusting CPU's manufacturer)\n"); - } - crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; -} - -static void crng_finalize_init(struct crng_state *crng) -{ - if (crng != &primary_crng || crng_init >= 2) - return; - if (!system_wq) { - /* We can't call numa_crng_init until we have workqueues, - * so mark this for processing later. */ - crng_need_final_init = true; - return; - } - - invalidate_batched_entropy(); - numa_crng_init(); - crng_init = 2; - process_random_ready_list(); - wake_up_interruptible(&crng_init_wait); - kill_fasync(&fasync, SIGIO, POLL_IN); - pr_notice("crng init done\n"); - if (unseeded_warning.missed) { - pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n", - unseeded_warning.missed); - unseeded_warning.missed = 0; - } - if (urandom_warning.missed) { - pr_notice("%d urandom warning(s) missed due to ratelimiting\n", - urandom_warning.missed); - urandom_warning.missed = 0; - } -} - -#ifdef CONFIG_NUMA -static void do_numa_crng_init(struct work_struct *work) -{ - int i; - struct crng_state *crng; - struct crng_state **pool; - - pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); - for_each_online_node(i) { - crng = kmalloc_node(sizeof(struct crng_state), - GFP_KERNEL | __GFP_NOFAIL, i); - spin_lock_init(&crng->lock); - crng_initialize_secondary(crng); - pool[i] = crng; - } - /* pairs with READ_ONCE() in select_crng() */ - if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) { - for_each_node(i) - kfree(pool[i]); - kfree(pool); - } -} - -static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init); - -static void numa_crng_init(void) -{ - schedule_work(&numa_crng_init_work); -} - -static struct crng_state *select_crng(void) -{ - struct crng_state **pool; - int nid = numa_node_id(); - - /* pairs with cmpxchg_release() in do_numa_crng_init() */ - pool = READ_ONCE(crng_node_pool); - if (pool && pool[nid]) - return pool[nid]; - - return &primary_crng; -} -#else -static void numa_crng_init(void) {} - -static struct crng_state *select_crng(void) -{ - return &primary_crng; -} +#if defined(LATENT_ENTROPY_PLUGIN) + static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy; + _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed)); #endif -/* - * crng_fast_load() can be called by code in the interrupt service - * path. So we can't afford to dilly-dally. Returns the number of - * bytes processed from cp. - */ -static size_t crng_fast_load(const char *cp, size_t len) -{ - unsigned long flags; - char *p; - size_t ret = 0; - - if (!spin_trylock_irqsave(&primary_crng.lock, flags)) - return 0; - if (crng_init != 0) { - spin_unlock_irqrestore(&primary_crng.lock, flags); - return 0; - } - p = (unsigned char *) &primary_crng.state[4]; - while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) { - p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp; - cp++; crng_init_cnt++; len--; ret++; - } - spin_unlock_irqrestore(&primary_crng.lock, flags); - if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { - invalidate_batched_entropy(); - crng_init = 1; - pr_notice("fast init done\n"); - } - return ret; -} - -/* - * crng_slow_load() is called by add_device_randomness, which has two - * attributes. (1) We can't trust the buffer passed to it is - * guaranteed to be unpredictable (so it might not have any entropy at - * all), and (2) it doesn't have the performance constraints of - * crng_fast_load(). - * - * So we do something more comprehensive which is guaranteed to touch - * all of the primary_crng's state, and which uses a LFSR with a - * period of 255 as part of the mixing algorithm. Finally, we do - * *not* advance crng_init_cnt since buffer we may get may be something - * like a fixed DMI table (for example), which might very well be - * unique to the machine, but is otherwise unvarying. - */ -static int crng_slow_load(const char *cp, size_t len) -{ - unsigned long flags; - static unsigned char lfsr = 1; - unsigned char tmp; - unsigned i, max = CHACHA_KEY_SIZE; - const char * src_buf = cp; - char * dest_buf = (char *) &primary_crng.state[4]; - - if (!spin_trylock_irqsave(&primary_crng.lock, flags)) - return 0; - if (crng_init != 0) { - spin_unlock_irqrestore(&primary_crng.lock, flags); - return 0; - } - if (len > max) - max = len; - - for (i = 0; i < max ; i++) { - tmp = lfsr; - lfsr >>= 1; - if (tmp & 1) - lfsr ^= 0xE1; - tmp = dest_buf[i % CHACHA_KEY_SIZE]; - dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr; - lfsr += (tmp << 3) | (tmp >> 5); - } - spin_unlock_irqrestore(&primary_crng.lock, flags); - return 1; -} - -static void crng_reseed(struct crng_state *crng, struct entropy_store *r) -{ - unsigned long flags; - int i, num; - union { - __u8 block[CHACHA_BLOCK_SIZE]; - __u32 key[8]; - } buf; - - if (r) { - num = extract_entropy(r, &buf, 32, 16, 0); - if (num == 0) - return; - } else { - _extract_crng(&primary_crng, buf.block); - _crng_backtrack_protect(&primary_crng, buf.block, - CHACHA_KEY_SIZE); - } - spin_lock_irqsave(&crng->lock, flags); - for (i = 0; i < 8; i++) { - unsigned long rv; - if (!arch_get_random_seed_long(&rv) && - !arch_get_random_long(&rv)) - rv = random_get_entropy(); - crng->state[i+4] ^= buf.key[i] ^ rv; - } - memzero_explicit(&buf, sizeof(buf)); - WRITE_ONCE(crng->init_time, jiffies); - spin_unlock_irqrestore(&crng->lock, flags); - crng_finalize_init(crng); -} - -static void _extract_crng(struct crng_state *crng, - __u8 out[CHACHA_BLOCK_SIZE]) -{ - unsigned long v, flags, init_time; - - if (crng_ready()) { - init_time = READ_ONCE(crng->init_time); - if (time_after(READ_ONCE(crng_global_init_time), init_time) || - time_after(jiffies, init_time + CRNG_RESEED_INTERVAL)) - crng_reseed(crng, crng == &primary_crng ? - &input_pool : NULL); - } - spin_lock_irqsave(&crng->lock, flags); - if (arch_get_random_long(&v)) - crng->state[14] ^= v; - chacha20_block(&crng->state[0], out); - if (crng->state[12] == 0) - crng->state[13]++; - spin_unlock_irqrestore(&crng->lock, flags); -} - -static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE]) -{ - _extract_crng(select_crng(), out); -} - -/* - * Use the leftover bytes from the CRNG block output (if there is - * enough) to mutate the CRNG key to provide backtracking protection. - */ -static void _crng_backtrack_protect(struct crng_state *crng, - __u8 tmp[CHACHA_BLOCK_SIZE], int used) -{ - unsigned long flags; - __u32 *s, *d; - int i; - - used = round_up(used, sizeof(__u32)); - if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) { - extract_crng(tmp); - used = 0; - } - spin_lock_irqsave(&crng->lock, flags); - s = (__u32 *) &tmp[used]; - d = &crng->state[4]; - for (i=0; i < 8; i++) - *d++ ^= *s++; - spin_unlock_irqrestore(&crng->lock, flags); -} - -static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used) -{ - _crng_backtrack_protect(select_crng(), tmp, used); -} - -static ssize_t extract_crng_user(void __user *buf, size_t nbytes) -{ - ssize_t ret = 0, i = CHACHA_BLOCK_SIZE; - __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4); - int large_request = (nbytes > 256); - - while (nbytes) { - if (large_request && need_resched()) { - if (signal_pending(current)) { - if (ret == 0) - ret = -ERESTARTSYS; - break; - } - schedule(); + for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE; + i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) { + if (!arch_get_random_seed_long_early(&entropy) && + !arch_get_random_long_early(&entropy)) { + entropy = random_get_entropy(); + arch_bytes -= sizeof(entropy); } - - extract_crng(tmp); - i = min_t(int, nbytes, CHACHA_BLOCK_SIZE); - if (copy_to_user(buf, tmp, i)) { - ret = -EFAULT; - break; - } - - nbytes -= i; - buf += i; - ret += i; + _mix_pool_bytes(&entropy, sizeof(entropy)); } - crng_backtrack_protect(tmp, i); + _mix_pool_bytes(&now, sizeof(now)); + _mix_pool_bytes(utsname(), sizeof(*(utsname()))); + _mix_pool_bytes(command_line, strlen(command_line)); + add_latent_entropy(); - /* Wipe data just written to memory */ - memzero_explicit(tmp, sizeof(tmp)); + if (crng_ready()) + crng_reseed(); + else if (trust_cpu) + credit_init_bits(arch_bytes * 8); - return ret; + return 0; } - -/********************************************************************* - * - * Entropy input management - * - *********************************************************************/ - -/* There is one of these per entropy source */ -struct timer_rand_state { - cycles_t last_time; - long last_delta, last_delta2; -}; - -#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, }; - /* * Add device- or boot-specific data to the input pool to help * initialize it. @@ -1141,57 +858,212 @@ struct timer_rand_state { * the entropy pool having similar initial state across largely * identical devices. */ -void add_device_randomness(const void *buf, unsigned int size) +void add_device_randomness(const void *buf, unsigned int len) { - unsigned long time = random_get_entropy() ^ jiffies; + unsigned long entropy = random_get_entropy(); unsigned long flags; - if (!crng_ready() && size) - crng_slow_load(buf, size); - - trace_add_device_randomness(size, _RET_IP_); spin_lock_irqsave(&input_pool.lock, flags); - _mix_pool_bytes(&input_pool, buf, size); - _mix_pool_bytes(&input_pool, &time, sizeof(time)); + _mix_pool_bytes(&entropy, sizeof(entropy)); + _mix_pool_bytes(buf, len); spin_unlock_irqrestore(&input_pool.lock, flags); } EXPORT_SYMBOL(add_device_randomness); -static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE; +/* + * Interface for in-kernel drivers of true hardware RNGs. + * Those devices may produce endless random bits and will be throttled + * when our pool is full. + */ +void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy) +{ + mix_pool_bytes(buf, len); + credit_init_bits(entropy); + + /* + * Throttle writing to once every CRNG_RESEED_INTERVAL, unless + * we're not yet initialized. + */ + if (!kthread_should_stop() && crng_ready()) + schedule_timeout_interruptible(CRNG_RESEED_INTERVAL); +} +EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); + +/* + * Handle random seed passed by bootloader, and credit it if + * CONFIG_RANDOM_TRUST_BOOTLOADER is set. + */ +void __cold add_bootloader_randomness(const void *buf, size_t len) +{ + mix_pool_bytes(buf, len); + if (trust_bootloader) + credit_init_bits(len * 8); +} +EXPORT_SYMBOL_GPL(add_bootloader_randomness); + +struct fast_pool { + unsigned long pool[4]; + unsigned long last; + unsigned int count; + struct timer_list mix; +}; + +static void mix_interrupt_randomness(struct timer_list *work); + +static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { +#ifdef CONFIG_64BIT +#define FASTMIX_PERM SIPHASH_PERMUTATION + .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 }, +#else +#define FASTMIX_PERM HSIPHASH_PERMUTATION + .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 }, +#endif + .mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0) +}; + +/* + * This is [Half]SipHash-1-x, starting from an empty key. Because + * the key is fixed, it assumes that its inputs are non-malicious, + * and therefore this has no security on its own. s represents the + * four-word SipHash state, while v represents a two-word input. + */ +static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2) +{ + s[3] ^= v1; + FASTMIX_PERM(s[0], s[1], s[2], s[3]); + s[0] ^= v1; + s[3] ^= v2; + FASTMIX_PERM(s[0], s[1], s[2], s[3]); + s[0] ^= v2; +} + +#ifdef CONFIG_SMP +/* + * This function is called when the CPU has just come online, with + * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE. + */ +int __cold random_online_cpu(unsigned int cpu) +{ + /* + * During CPU shutdown and before CPU onlining, add_interrupt_ + * randomness() may schedule mix_interrupt_randomness(), and + * set the MIX_INFLIGHT flag. However, because the worker can + * be scheduled on a different CPU during this period, that + * flag will never be cleared. For that reason, we zero out + * the flag here, which runs just after workqueues are onlined + * for the CPU again. This also has the effect of setting the + * irq randomness count to zero so that new accumulated irqs + * are fresh. + */ + per_cpu_ptr(&irq_randomness, cpu)->count = 0; + return 0; +} +#endif + +static void mix_interrupt_randomness(struct timer_list *work) +{ + struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix); + /* + * The size of the copied stack pool is explicitly 2 longs so that we + * only ever ingest half of the siphash output each time, retaining + * the other half as the next "key" that carries over. The entropy is + * supposed to be sufficiently dispersed between bits so on average + * we don't wind up "losing" some. + */ + unsigned long pool[2]; + unsigned int count; + + /* Check to see if we're running on the wrong CPU due to hotplug. */ + local_irq_disable(); + if (fast_pool != this_cpu_ptr(&irq_randomness)) { + local_irq_enable(); + return; + } + + /* + * Copy the pool to the stack so that the mixer always has a + * consistent view, before we reenable irqs again. + */ + memcpy(pool, fast_pool->pool, sizeof(pool)); + count = fast_pool->count; + fast_pool->count = 0; + fast_pool->last = jiffies; + local_irq_enable(); + + mix_pool_bytes(pool, sizeof(pool)); + credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8)); + + memzero_explicit(pool, sizeof(pool)); +} + +void add_interrupt_randomness(int irq) +{ + enum { MIX_INFLIGHT = 1U << 31 }; + unsigned long entropy = random_get_entropy(); + struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); + struct pt_regs *regs = get_irq_regs(); + unsigned int new_count; + + fast_mix(fast_pool->pool, entropy, + (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq)); + new_count = ++fast_pool->count; + + if (new_count & MIX_INFLIGHT) + return; + + if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ)) + return; + + fast_pool->count |= MIX_INFLIGHT; + if (!timer_pending(&fast_pool->mix)) { + fast_pool->mix.expires = jiffies; + add_timer_on(&fast_pool->mix, raw_smp_processor_id()); + } +} +EXPORT_SYMBOL_GPL(add_interrupt_randomness); + +/* There is one of these per entropy source */ +struct timer_rand_state { + unsigned long last_time; + long last_delta, last_delta2; +}; /* * This function adds entropy to the entropy "pool" by using timing - * delays. It uses the timer_rand_state structure to make an estimate - * of how many bits of entropy this call has added to the pool. - * - * The number "num" is also added to the pool - it should somehow describe - * the type of event which just happened. This is currently 0-255 for - * keyboard scan codes, and 256 upwards for interrupts. - * + * delays. It uses the timer_rand_state structure to make an estimate + * of how many bits of entropy this call has added to the pool. The + * value "num" is also added to the pool; it should somehow describe + * the type of event that just happened. */ -static void add_timer_randomness(struct timer_rand_state *state, unsigned num) +static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) { - struct entropy_store *r; - struct { - long jiffies; - unsigned cycles; - unsigned num; - } sample; + unsigned long entropy = random_get_entropy(), now = jiffies, flags; long delta, delta2, delta3; + unsigned int bits; - sample.jiffies = jiffies; - sample.cycles = random_get_entropy(); - sample.num = num; - r = &input_pool; - mix_pool_bytes(r, &sample, sizeof(sample)); + /* + * If we're in a hard IRQ, add_interrupt_randomness() will be called + * sometime after, so mix into the fast pool. + */ + if (in_irq()) { + fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num); + } else { + spin_lock_irqsave(&input_pool.lock, flags); + _mix_pool_bytes(&entropy, sizeof(entropy)); + _mix_pool_bytes(&num, sizeof(num)); + spin_unlock_irqrestore(&input_pool.lock, flags); + } + + if (crng_ready()) + return; /* * Calculate number of bits of randomness we probably added. * We take into account the first, second and third-order deltas * in order to make our estimate. */ - delta = sample.jiffies - READ_ONCE(state->last_time); - WRITE_ONCE(state->last_time, sample.jiffies); + delta = now - READ_ONCE(state->last_time); + WRITE_ONCE(state->last_time, now); delta2 = delta - READ_ONCE(state->last_delta); WRITE_ONCE(state->last_delta, delta); @@ -1211,392 +1083,65 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) delta = delta3; /* - * delta is now minimum absolute delta. - * Round down by 1 bit on general principles, - * and limit entropy estimate to 12 bits. + * delta is now minimum absolute delta. Round down by 1 bit + * on general principles, and limit entropy estimate to 11 bits. */ - credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); + bits = min(fls(delta >> 1), 11); + + /* + * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness() + * will run after this, which uses a different crediting scheme of 1 bit + * per every 64 interrupts. In order to let that function do accounting + * close to the one in this function, we credit a full 64/64 bit per bit, + * and then subtract one to account for the extra one added. + */ + if (in_irq()) + this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1; + else + _credit_init_bits(bits); } -void add_input_randomness(unsigned int type, unsigned int code, - unsigned int value) +void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) { static unsigned char last_value; + static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES }; - /* ignore autorepeat and the like */ + /* Ignore autorepeat and the like. */ if (value == last_value) return; last_value = value; add_timer_randomness(&input_timer_state, (type << 4) ^ code ^ (code >> 4) ^ value); - trace_add_input_randomness(ENTROPY_BITS(&input_pool)); } EXPORT_SYMBOL_GPL(add_input_randomness); -static DEFINE_PER_CPU(struct fast_pool, irq_randomness); - -#ifdef ADD_INTERRUPT_BENCH -static unsigned long avg_cycles, avg_deviation; - -#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */ -#define FIXED_1_2 (1 << (AVG_SHIFT-1)) - -static void add_interrupt_bench(cycles_t start) -{ - long delta = random_get_entropy() - start; - - /* Use a weighted moving average */ - delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT); - avg_cycles += delta; - /* And average deviation */ - delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT); - avg_deviation += delta; -} -#else -#define add_interrupt_bench(x) -#endif - -static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) -{ - __u32 *ptr = (__u32 *) regs; - unsigned int idx; - - if (regs == NULL) - return 0; - idx = READ_ONCE(f->reg_idx); - if (idx >= sizeof(struct pt_regs) / sizeof(__u32)) - idx = 0; - ptr += idx++; - WRITE_ONCE(f->reg_idx, idx); - return *ptr; -} - -void add_interrupt_randomness(int irq, int irq_flags) -{ - struct entropy_store *r; - struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); - struct pt_regs *regs = get_irq_regs(); - unsigned long now = jiffies; - cycles_t cycles = random_get_entropy(); - __u32 c_high, j_high; - __u64 ip; - unsigned long seed; - int credit = 0; - - if (cycles == 0) - cycles = get_reg(fast_pool, regs); - c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; - j_high = (sizeof(now) > 4) ? now >> 32 : 0; - fast_pool->pool[0] ^= cycles ^ j_high ^ irq; - fast_pool->pool[1] ^= now ^ c_high; - ip = regs ? instruction_pointer(regs) : _RET_IP_; - fast_pool->pool[2] ^= ip; - fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : - get_reg(fast_pool, regs); - - fast_mix(fast_pool); - add_interrupt_bench(cycles); - - if (unlikely(crng_init == 0)) { - if ((fast_pool->count >= 64) && - crng_fast_load((char *) fast_pool->pool, - sizeof(fast_pool->pool)) > 0) { - fast_pool->count = 0; - fast_pool->last = now; - } - return; - } - - if ((fast_pool->count < 64) && - !time_after(now, fast_pool->last + HZ)) - return; - - r = &input_pool; - if (!spin_trylock(&r->lock)) - return; - - fast_pool->last = now; - __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool)); - - /* - * If we have architectural seed generator, produce a seed and - * add it to the pool. For the sake of paranoia don't let the - * architectural seed generator dominate the input from the - * interrupt noise. - */ - if (arch_get_random_seed_long(&seed)) { - __mix_pool_bytes(r, &seed, sizeof(seed)); - credit = 1; - } - spin_unlock(&r->lock); - - fast_pool->count = 0; - - /* award one bit for the contents of the fast pool */ - credit_entropy_bits(r, credit + 1); -} -EXPORT_SYMBOL_GPL(add_interrupt_randomness); - #ifdef CONFIG_BLOCK void add_disk_randomness(struct gendisk *disk) { if (!disk || !disk->random) return; - /* first major is 1, so we get >= 0x200 here */ + /* First major is 1, so we get >= 0x200 here. */ add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); - trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool)); } EXPORT_SYMBOL_GPL(add_disk_randomness); -#endif -/********************************************************************* - * - * Entropy extraction routines - * - *********************************************************************/ - -/* - * This function decides how many bytes to actually take from the - * given pool, and also debits the entropy count accordingly. - */ -static size_t account(struct entropy_store *r, size_t nbytes, int min, - int reserved) +void __cold rand_initialize_disk(struct gendisk *disk) { - int entropy_count, orig, have_bytes; - size_t ibytes, nfrac; - - BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); - - /* Can we pull enough? */ -retry: - entropy_count = orig = READ_ONCE(r->entropy_count); - ibytes = nbytes; - /* never pull more than available */ - have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); - - if ((have_bytes -= reserved) < 0) - have_bytes = 0; - ibytes = min_t(size_t, ibytes, have_bytes); - if (ibytes < min) - ibytes = 0; - - if (WARN_ON(entropy_count < 0)) { - pr_warn("negative entropy count: pool %s count %d\n", - r->name, entropy_count); - entropy_count = 0; - } - nfrac = ibytes << (ENTROPY_SHIFT + 3); - if ((size_t) entropy_count > nfrac) - entropy_count -= nfrac; - else - entropy_count = 0; - - if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) - goto retry; - - trace_debit_entropy(r->name, 8 * ibytes); - if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) { - wake_up_interruptible(&random_write_wait); - kill_fasync(&fasync, SIGIO, POLL_OUT); - } - - return ibytes; -} - -/* - * This function does the actual extraction for extract_entropy and - * extract_entropy_user. - * - * Note: we assume that .poolwords is a multiple of 16 words. - */ -static void extract_buf(struct entropy_store *r, __u8 *out) -{ - int i; - union { - __u32 w[5]; - unsigned long l[LONGS(20)]; - } hash; - __u32 workspace[SHA1_WORKSPACE_WORDS]; - unsigned long flags; + struct timer_rand_state *state; /* - * If we have an architectural hardware random number - * generator, use it for SHA's initial vector + * If kzalloc returns null, we just won't use that entropy + * source. */ - sha1_init(hash.w); - for (i = 0; i < LONGS(20); i++) { - unsigned long v; - if (!arch_get_random_long(&v)) - break; - hash.l[i] = v; + state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); + if (state) { + state->last_time = INITIAL_JIFFIES; + disk->random = state; } - - /* Generate a hash across the pool, 16 words (512 bits) at a time */ - spin_lock_irqsave(&r->lock, flags); - for (i = 0; i < r->poolinfo->poolwords; i += 16) - sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace); - - /* - * We mix the hash back into the pool to prevent backtracking - * attacks (where the attacker knows the state of the pool - * plus the current outputs, and attempts to find previous - * ouputs), unless the hash function can be inverted. By - * mixing at least a SHA1 worth of hash data back, we make - * brute-forcing the feedback as hard as brute-forcing the - * hash. - */ - __mix_pool_bytes(r, hash.w, sizeof(hash.w)); - spin_unlock_irqrestore(&r->lock, flags); - - memzero_explicit(workspace, sizeof(workspace)); - - /* - * In case the hash function has some recognizable output - * pattern, we fold it in half. Thus, we always feed back - * twice as much data as we output. - */ - hash.w[0] ^= hash.w[3]; - hash.w[1] ^= hash.w[4]; - hash.w[2] ^= rol32(hash.w[2], 16); - - memcpy(out, &hash, EXTRACT_SIZE); - memzero_explicit(&hash, sizeof(hash)); } - -static ssize_t _extract_entropy(struct entropy_store *r, void *buf, - size_t nbytes, int fips) -{ - ssize_t ret = 0, i; - __u8 tmp[EXTRACT_SIZE]; - unsigned long flags; - - while (nbytes) { - extract_buf(r, tmp); - - if (fips) { - spin_lock_irqsave(&r->lock, flags); - if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) - panic("Hardware RNG duplicated output!\n"); - memcpy(r->last_data, tmp, EXTRACT_SIZE); - spin_unlock_irqrestore(&r->lock, flags); - } - i = min_t(int, nbytes, EXTRACT_SIZE); - memcpy(buf, tmp, i); - nbytes -= i; - buf += i; - ret += i; - } - - /* Wipe data just returned from memory */ - memzero_explicit(tmp, sizeof(tmp)); - - return ret; -} - -/* - * This function extracts randomness from the "entropy pool", and - * returns it in a buffer. - * - * The min parameter specifies the minimum amount we can pull before - * failing to avoid races that defeat catastrophic reseeding while the - * reserved parameter indicates how much entropy we must leave in the - * pool after each pull to avoid starving other readers. - */ -static ssize_t extract_entropy(struct entropy_store *r, void *buf, - size_t nbytes, int min, int reserved) -{ - __u8 tmp[EXTRACT_SIZE]; - unsigned long flags; - - /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */ - if (fips_enabled) { - spin_lock_irqsave(&r->lock, flags); - if (!r->last_data_init) { - r->last_data_init = 1; - spin_unlock_irqrestore(&r->lock, flags); - trace_extract_entropy(r->name, EXTRACT_SIZE, - ENTROPY_BITS(r), _RET_IP_); - extract_buf(r, tmp); - spin_lock_irqsave(&r->lock, flags); - memcpy(r->last_data, tmp, EXTRACT_SIZE); - } - spin_unlock_irqrestore(&r->lock, flags); - } - - trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_); - nbytes = account(r, nbytes, min, reserved); - - return _extract_entropy(r, buf, nbytes, fips_enabled); -} - -#define warn_unseeded_randomness(previous) \ - _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous)) - -static void _warn_unseeded_randomness(const char *func_name, void *caller, - void **previous) -{ -#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM - const bool print_once = false; -#else - static bool print_once __read_mostly; #endif - if (print_once || - crng_ready() || - (previous && (caller == READ_ONCE(*previous)))) - return; - WRITE_ONCE(*previous, caller); -#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM - print_once = true; -#endif - if (__ratelimit(&unseeded_warning)) - printk_deferred(KERN_NOTICE "random: %s called from %pS " - "with crng_init=%d\n", func_name, caller, - crng_init); -} - -/* - * This function is the exported kernel interface. It returns some - * number of good random numbers, suitable for key generation, seeding - * TCP sequence numbers, etc. It does not rely on the hardware random - * number generator. For random bytes direct from the hardware RNG - * (when available), use get_random_bytes_arch(). In order to ensure - * that the randomness provided by this function is okay, the function - * wait_for_random_bytes() should be called and return 0 at least once - * at any point prior. - */ -static void _get_random_bytes(void *buf, int nbytes) -{ - __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4); - - trace_get_random_bytes(nbytes, _RET_IP_); - - while (nbytes >= CHACHA_BLOCK_SIZE) { - extract_crng(buf); - buf += CHACHA_BLOCK_SIZE; - nbytes -= CHACHA_BLOCK_SIZE; - } - - if (nbytes > 0) { - extract_crng(tmp); - memcpy(buf, tmp, nbytes); - crng_backtrack_protect(tmp, nbytes); - } else - crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE); - memzero_explicit(tmp, sizeof(tmp)); -} - -void get_random_bytes(void *buf, int nbytes) -{ - static void *previous; - - warn_unseeded_randomness(&previous); - _get_random_bytes(buf, nbytes); -} -EXPORT_SYMBOL(get_random_bytes); - - /* * Each time the timer fires, we expect that we got an unpredictable * jump in the cycle counter. Even if the timer is running on another @@ -1610,93 +1155,401 @@ EXPORT_SYMBOL(get_random_bytes); * * So the re-arming always happens in the entropy loop itself. */ -static void entropy_timer(struct timer_list *t) +static void __cold entropy_timer(struct timer_list *t) { - credit_entropy_bits(&input_pool, 1); + credit_init_bits(1); } /* * If we have an actual cycle counter, see if we can * generate enough entropy with timing noise */ -static void try_to_generate_entropy(void) +static void __cold try_to_generate_entropy(void) { struct { - unsigned long now; + unsigned long entropy; struct timer_list timer; } stack; - stack.now = random_get_entropy(); + stack.entropy = random_get_entropy(); /* Slow counter - or none. Don't even bother */ - if (stack.now == random_get_entropy()) + if (stack.entropy == random_get_entropy()) return; timer_setup_on_stack(&stack.timer, entropy_timer, 0); - while (!crng_ready()) { + while (!crng_ready() && !signal_pending(current)) { if (!timer_pending(&stack.timer)) - mod_timer(&stack.timer, jiffies+1); - mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now)); + mod_timer(&stack.timer, jiffies + 1); + mix_pool_bytes(&stack.entropy, sizeof(stack.entropy)); schedule(); - stack.now = random_get_entropy(); + stack.entropy = random_get_entropy(); } del_timer_sync(&stack.timer); destroy_timer_on_stack(&stack.timer); - mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now)); + mix_pool_bytes(&stack.entropy, sizeof(stack.entropy)); } -/* - * Wait for the urandom pool to be seeded and thus guaranteed to supply - * cryptographically secure random numbers. This applies to: the /dev/urandom - * device, the get_random_bytes function, and the get_random_{u32,u64,int,long} - * family of functions. Using any of these functions without first calling - * this function forfeits the guarantee of security. + +/********************************************************************** * - * Returns: 0 if the urandom pool has been seeded. - * -ERESTARTSYS if the function was interrupted by a signal. - */ -int wait_for_random_bytes(void) + * Userspace reader/writer interfaces. + * + * getrandom(2) is the primary modern interface into the RNG and should + * be used in preference to anything else. + * + * Reading from /dev/random has the same functionality as calling + * getrandom(2) with flags=0. In earlier versions, however, it had + * vastly different semantics and should therefore be avoided, to + * prevent backwards compatibility issues. + * + * Reading from /dev/urandom has the same functionality as calling + * getrandom(2) with flags=GRND_INSECURE. Because it does not block + * waiting for the RNG to be ready, it should not be used. + * + * Writing to either /dev/random or /dev/urandom adds entropy to + * the input pool but does not credit it. + * + * Polling on /dev/random indicates when the RNG is initialized, on + * the read side, and when it wants new entropy, on the write side. + * + * Both /dev/random and /dev/urandom have the same set of ioctls for + * adding entropy, getting the entropy count, zeroing the count, and + * reseeding the crng. + * + **********************************************************************/ + +SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags) { - if (likely(crng_ready())) + struct iov_iter iter; + struct iovec iov; + int ret; + + if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) + return -EINVAL; + + /* + * Requesting insecure and blocking randomness at the same time makes + * no sense. + */ + if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM)) + return -EINVAL; + + if (!crng_ready() && !(flags & GRND_INSECURE)) { + if (flags & GRND_NONBLOCK) + return -EAGAIN; + ret = wait_for_random_bytes(); + if (unlikely(ret)) + return ret; + } + + ret = import_single_range(READ, ubuf, len, &iov, &iter); + if (unlikely(ret)) + return ret; + return get_random_bytes_user(&iter); +} + +static __poll_t random_poll(struct file *file, poll_table *wait) +{ + poll_wait(file, &crng_init_wait, wait); + return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM; +} + +static ssize_t write_pool_user(struct iov_iter *iter) +{ + u8 block[BLAKE2S_BLOCK_SIZE]; + ssize_t ret = 0; + size_t copied; + + if (unlikely(!iov_iter_count(iter))) return 0; - do { - int ret; - ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ); - if (ret) - return ret > 0 ? 0 : ret; + for (;;) { + copied = copy_from_iter(block, sizeof(block), iter); + ret += copied; + mix_pool_bytes(block, copied); + if (!iov_iter_count(iter) || copied != sizeof(block)) + break; - try_to_generate_entropy(); - } while (!crng_ready()); + BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0); + if (ret % PAGE_SIZE == 0) { + if (signal_pending(current)) + break; + cond_resched(); + } + } - return 0; + memzero_explicit(block, sizeof(block)); + return ret ? ret : -EFAULT; } -EXPORT_SYMBOL(wait_for_random_bytes); -/* - * Returns whether or not the urandom pool has been seeded and thus guaranteed - * to supply cryptographically secure random numbers. This applies to: the - * /dev/urandom device, the get_random_bytes function, and the get_random_{u32, - * ,u64,int,long} family of functions. - * - * Returns: true if the urandom pool has been seeded. - * false if the urandom pool has not been seeded. - */ -bool rng_is_initialized(void) +static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter) { - return crng_ready(); + return write_pool_user(iter); } -EXPORT_SYMBOL(rng_is_initialized); + +static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter) +{ + static int maxwarn = 10; + + if (!crng_ready()) { + if (!ratelimit_disable && maxwarn <= 0) + ++urandom_warning.missed; + else if (ratelimit_disable || __ratelimit(&urandom_warning)) { + --maxwarn; + pr_notice("%s: uninitialized urandom read (%zu bytes read)\n", + current->comm, iov_iter_count(iter)); + } + } + + return get_random_bytes_user(iter); +} + +static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter) +{ + int ret; + + if (!crng_ready() && + ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) || + (kiocb->ki_filp->f_flags & O_NONBLOCK))) + return -EAGAIN; + + ret = wait_for_random_bytes(); + if (ret != 0) + return ret; + return get_random_bytes_user(iter); +} + +static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) +{ + int __user *p = (int __user *)arg; + int ent_count; + + switch (cmd) { + case RNDGETENTCNT: + /* Inherently racy, no point locking. */ + if (put_user(input_pool.init_bits, p)) + return -EFAULT; + return 0; + case RNDADDTOENTCNT: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (get_user(ent_count, p)) + return -EFAULT; + if (ent_count < 0) + return -EINVAL; + credit_init_bits(ent_count); + return 0; + case RNDADDENTROPY: { + struct iov_iter iter; + struct iovec iov; + ssize_t ret; + int len; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (get_user(ent_count, p++)) + return -EFAULT; + if (ent_count < 0) + return -EINVAL; + if (get_user(len, p++)) + return -EFAULT; + ret = import_single_range(WRITE, p, len, &iov, &iter); + if (unlikely(ret)) + return ret; + ret = write_pool_user(&iter); + if (unlikely(ret < 0)) + return ret; + /* Since we're crediting, enforce that it was all written into the pool. */ + if (unlikely(ret != len)) + return -EFAULT; + credit_init_bits(ent_count); + return 0; + } + case RNDZAPENTCNT: + case RNDCLEARPOOL: + /* No longer has any effect. */ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return 0; + case RNDRESEEDCRNG: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (!crng_ready()) + return -ENODATA; + crng_reseed(); + return 0; + default: + return -EINVAL; + } +} + +static int random_fasync(int fd, struct file *filp, int on) +{ + return fasync_helper(fd, filp, on, &fasync); +} + +const struct file_operations random_fops = { + .read_iter = random_read_iter, + .write_iter = random_write_iter, + .poll = random_poll, + .unlocked_ioctl = random_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .fasync = random_fasync, + .llseek = noop_llseek, + .splice_read = generic_file_splice_read, + .splice_write = iter_file_splice_write, +}; + +const struct file_operations urandom_fops = { + .read_iter = urandom_read_iter, + .write_iter = random_write_iter, + .unlocked_ioctl = random_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .fasync = random_fasync, + .llseek = noop_llseek, + .splice_read = generic_file_splice_read, + .splice_write = iter_file_splice_write, +}; + + +/******************************************************************** + * + * Sysctl interface. + * + * These are partly unused legacy knobs with dummy values to not break + * userspace and partly still useful things. They are usually accessible + * in /proc/sys/kernel/random/ and are as follows: + * + * - boot_id - a UUID representing the current boot. + * + * - uuid - a random UUID, different each time the file is read. + * + * - poolsize - the number of bits of entropy that the input pool can + * hold, tied to the POOL_BITS constant. + * + * - entropy_avail - the number of bits of entropy currently in the + * input pool. Always <= poolsize. + * + * - write_wakeup_threshold - the amount of entropy in the input pool + * below which write polls to /dev/random will unblock, requesting + * more entropy, tied to the POOL_READY_BITS constant. It is writable + * to avoid breaking old userspaces, but writing to it does not + * change any behavior of the RNG. + * + * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL. + * It is writable to avoid breaking old userspaces, but writing + * to it does not change any behavior of the RNG. + * + ********************************************************************/ + +#ifdef CONFIG_SYSCTL + +#include + +static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ; +static int sysctl_random_write_wakeup_bits = POOL_READY_BITS; +static int sysctl_poolsize = POOL_BITS; +static u8 sysctl_bootid[UUID_SIZE]; /* - * Add a callback function that will be invoked when the nonblocking - * pool is initialised. - * - * returns: 0 if callback is successfully added - * -EALREADY if pool is already initialised (callback not called) - * -ENOENT if module for callback is not alive + * This function is used to return both the bootid UUID, and random + * UUID. The difference is in whether table->data is NULL; if it is, + * then a new UUID is generated and returned to the user. */ +static int proc_do_uuid(struct ctl_table *table, int write, void *buf, + size_t *lenp, loff_t *ppos) +{ + u8 tmp_uuid[UUID_SIZE], *uuid; + char uuid_string[UUID_STRING_LEN + 1]; + struct ctl_table fake_table = { + .data = uuid_string, + .maxlen = UUID_STRING_LEN + }; + + if (write) + return -EPERM; + + uuid = table->data; + if (!uuid) { + uuid = tmp_uuid; + generate_random_uuid(uuid); + } else { + static DEFINE_SPINLOCK(bootid_spinlock); + + spin_lock(&bootid_spinlock); + if (!uuid[8]) + generate_random_uuid(uuid); + spin_unlock(&bootid_spinlock); + } + + snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid); + return proc_dostring(&fake_table, 0, buf, lenp, ppos); +} + +/* The same as proc_dointvec, but writes don't change anything. */ +static int proc_do_rointvec(struct ctl_table *table, int write, void *buf, + size_t *lenp, loff_t *ppos) +{ + return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos); +} + +extern struct ctl_table random_table[]; +struct ctl_table random_table[] = { + { + .procname = "poolsize", + .data = &sysctl_poolsize, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = proc_dointvec, + }, + { + .procname = "entropy_avail", + .data = &input_pool.init_bits, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = proc_dointvec, + }, + { + .procname = "write_wakeup_threshold", + .data = &sysctl_random_write_wakeup_bits, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_do_rointvec, + }, + { + .procname = "urandom_min_reseed_secs", + .data = &sysctl_random_min_urandom_seed, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_do_rointvec, + }, + { + .procname = "boot_id", + .data = &sysctl_bootid, + .mode = 0444, + .proc_handler = proc_do_uuid, + }, + { + .procname = "uuid", + .mode = 0444, + .proc_handler = proc_do_uuid, + }, + { } +}; +#endif /* CONFIG_SYSCTL */ + +/* + * Android KABI fixups + * + * Add back two functions that were being used by out-of-tree drivers. + * + * Yes, horrible hack, the things we do for FIPS "compliance"... + */ +static DEFINE_SPINLOCK(random_ready_list_lock); +static LIST_HEAD(random_ready_list); + int add_random_ready_callback(struct random_ready_callback *rdy) { struct module *owner; @@ -1728,9 +1581,6 @@ out: } EXPORT_SYMBOL(add_random_ready_callback); -/* - * Delete a previously registered readiness callback function. - */ void del_random_ready_callback(struct random_ready_callback *rdy) { unsigned long flags; @@ -1747,612 +1597,18 @@ void del_random_ready_callback(struct random_ready_callback *rdy) } EXPORT_SYMBOL(del_random_ready_callback); -/* - * This function will use the architecture-specific hardware random - * number generator if it is available. The arch-specific hw RNG will - * almost certainly be faster than what we can do in software, but it - * is impossible to verify that it is implemented securely (as - * opposed, to, say, the AES encryption of a sequence number using a - * key known by the NSA). So it's useful if we need the speed, but - * only if we're willing to trust the hardware manufacturer not to - * have put in a back door. - * - * Return number of bytes filled in. - */ -int __must_check get_random_bytes_arch(void *buf, int nbytes) -{ - int left = nbytes; - char *p = buf; - - trace_get_random_bytes_arch(left, _RET_IP_); - while (left) { - unsigned long v; - int chunk = min_t(int, left, sizeof(unsigned long)); - - if (!arch_get_random_long(&v)) - break; - - memcpy(p, &v, chunk); - p += chunk; - left -= chunk; - } - - return nbytes - left; -} -EXPORT_SYMBOL(get_random_bytes_arch); - -/* - * init_std_data - initialize pool with system data - * - * @r: pool to initialize - * - * This function clears the pool's entropy count and mixes some system - * data into the pool to prepare it for use. The pool is not cleared - * as that can only decrease the entropy in the pool. - */ -static void __init init_std_data(struct entropy_store *r) -{ - int i; - ktime_t now = ktime_get_real(); - unsigned long rv; - - mix_pool_bytes(r, &now, sizeof(now)); - for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) { - if (!arch_get_random_seed_long(&rv) && - !arch_get_random_long(&rv)) - rv = random_get_entropy(); - mix_pool_bytes(r, &rv, sizeof(rv)); - } - mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); -} - -/* - * Note that setup_arch() may call add_device_randomness() - * long before we get here. This allows seeding of the pools - * with some platform dependent data very early in the boot - * process. But it limits our options here. We must use - * statically allocated structures that already have all - * initializations complete at compile time. We should also - * take care not to overwrite the precious per platform data - * we were given. - */ -int __init rand_initialize(void) -{ - init_std_data(&input_pool); - if (crng_need_final_init) - crng_finalize_init(&primary_crng); - crng_initialize_primary(&primary_crng); - crng_global_init_time = jiffies; - if (ratelimit_disable) { - urandom_warning.interval = 0; - unseeded_warning.interval = 0; - } - return 0; -} - -#ifdef CONFIG_BLOCK -void rand_initialize_disk(struct gendisk *disk) -{ - struct timer_rand_state *state; - - /* - * If kzalloc returns null, we just won't use that entropy - * source. - */ - state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); - if (state) { - state->last_time = INITIAL_JIFFIES; - disk->random = state; - } -} -#endif - -static ssize_t -urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes, - loff_t *ppos) -{ - int ret; - - nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3)); - ret = extract_crng_user(buf, nbytes); - trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool)); - return ret; -} - -static ssize_t -urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) +static void process_oldschool_random_ready_list(void) { unsigned long flags; - static int maxwarn = 10; + struct random_ready_callback *rdy, *tmp; - if (!crng_ready() && maxwarn > 0) { - maxwarn--; - if (__ratelimit(&urandom_warning)) - pr_notice("%s: uninitialized urandom read (%zd bytes read)\n", - current->comm, nbytes); - spin_lock_irqsave(&primary_crng.lock, flags); - crng_init_cnt = 0; - spin_unlock_irqrestore(&primary_crng.lock, flags); + spin_lock_irqsave(&random_ready_list_lock, flags); + list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) { + struct module *owner = rdy->owner; + + list_del_init(&rdy->list); + rdy->func(rdy); + module_put(owner); } - - return urandom_read_nowarn(file, buf, nbytes, ppos); + spin_unlock_irqrestore(&random_ready_list_lock, flags); } - -static ssize_t -random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) -{ - int ret; - - ret = wait_for_random_bytes(); - if (ret != 0) - return ret; - return urandom_read_nowarn(file, buf, nbytes, ppos); -} - -static __poll_t -random_poll(struct file *file, poll_table * wait) -{ - __poll_t mask; - - poll_wait(file, &crng_init_wait, wait); - poll_wait(file, &random_write_wait, wait); - mask = 0; - if (crng_ready()) - mask |= EPOLLIN | EPOLLRDNORM; - if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits) - mask |= EPOLLOUT | EPOLLWRNORM; - return mask; -} - -static int -write_pool(struct entropy_store *r, const char __user *buffer, size_t count) -{ - size_t bytes; - __u32 t, buf[16]; - const char __user *p = buffer; - - while (count > 0) { - int b, i = 0; - - bytes = min(count, sizeof(buf)); - if (copy_from_user(&buf, p, bytes)) - return -EFAULT; - - for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) { - if (!arch_get_random_int(&t)) - break; - buf[i] ^= t; - } - - count -= bytes; - p += bytes; - - mix_pool_bytes(r, buf, bytes); - cond_resched(); - } - - return 0; -} - -static ssize_t random_write(struct file *file, const char __user *buffer, - size_t count, loff_t *ppos) -{ - size_t ret; - - ret = write_pool(&input_pool, buffer, count); - if (ret) - return ret; - - return (ssize_t)count; -} - -static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) -{ - int size, ent_count; - int __user *p = (int __user *)arg; - int retval; - - switch (cmd) { - case RNDGETENTCNT: - /* inherently racy, no point locking */ - ent_count = ENTROPY_BITS(&input_pool); - if (put_user(ent_count, p)) - return -EFAULT; - return 0; - case RNDADDTOENTCNT: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (get_user(ent_count, p)) - return -EFAULT; - return credit_entropy_bits_safe(&input_pool, ent_count); - case RNDADDENTROPY: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (get_user(ent_count, p++)) - return -EFAULT; - if (ent_count < 0) - return -EINVAL; - if (get_user(size, p++)) - return -EFAULT; - retval = write_pool(&input_pool, (const char __user *)p, - size); - if (retval < 0) - return retval; - return credit_entropy_bits_safe(&input_pool, ent_count); - case RNDZAPENTCNT: - case RNDCLEARPOOL: - /* - * Clear the entropy pool counters. We no longer clear - * the entropy pool, as that's silly. - */ - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) { - wake_up_interruptible(&random_write_wait); - kill_fasync(&fasync, SIGIO, POLL_OUT); - } - return 0; - case RNDRESEEDCRNG: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (crng_init < 2) - return -ENODATA; - crng_reseed(&primary_crng, &input_pool); - WRITE_ONCE(crng_global_init_time, jiffies - 1); - return 0; - default: - return -EINVAL; - } -} - -static int random_fasync(int fd, struct file *filp, int on) -{ - return fasync_helper(fd, filp, on, &fasync); -} - -const struct file_operations random_fops = { - .read = random_read, - .write = random_write, - .poll = random_poll, - .unlocked_ioctl = random_ioctl, - .compat_ioctl = compat_ptr_ioctl, - .fasync = random_fasync, - .llseek = noop_llseek, -}; - -const struct file_operations urandom_fops = { - .read = urandom_read, - .write = random_write, - .unlocked_ioctl = random_ioctl, - .compat_ioctl = compat_ptr_ioctl, - .fasync = random_fasync, - .llseek = noop_llseek, -}; - -SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, - unsigned int, flags) -{ - int ret; - - if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE)) - return -EINVAL; - - /* - * Requesting insecure and blocking randomness at the same time makes - * no sense. - */ - if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM)) - return -EINVAL; - - if (count > INT_MAX) - count = INT_MAX; - - if (!(flags & GRND_INSECURE) && !crng_ready()) { - if (flags & GRND_NONBLOCK) - return -EAGAIN; - ret = wait_for_random_bytes(); - if (unlikely(ret)) - return ret; - } - return urandom_read_nowarn(NULL, buf, count, NULL); -} - -/******************************************************************** - * - * Sysctl interface - * - ********************************************************************/ - -#ifdef CONFIG_SYSCTL - -#include - -static int min_write_thresh; -static int max_write_thresh = INPUT_POOL_WORDS * 32; -static int random_min_urandom_seed = 60; -static char sysctl_bootid[16]; - -/* - * This function is used to return both the bootid UUID, and random - * UUID. The difference is in whether table->data is NULL; if it is, - * then a new UUID is generated and returned to the user. - * - * If the user accesses this via the proc interface, the UUID will be - * returned as an ASCII string in the standard UUID format; if via the - * sysctl system call, as 16 bytes of binary data. - */ -static int proc_do_uuid(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos) -{ - struct ctl_table fake_table; - unsigned char buf[64], tmp_uuid[16], *uuid; - - uuid = table->data; - if (!uuid) { - uuid = tmp_uuid; - generate_random_uuid(uuid); - } else { - static DEFINE_SPINLOCK(bootid_spinlock); - - spin_lock(&bootid_spinlock); - if (!uuid[8]) - generate_random_uuid(uuid); - spin_unlock(&bootid_spinlock); - } - - sprintf(buf, "%pU", uuid); - - fake_table.data = buf; - fake_table.maxlen = sizeof(buf); - - return proc_dostring(&fake_table, write, buffer, lenp, ppos); -} - -/* - * Return entropy available scaled to integral bits - */ -static int proc_do_entropy(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos) -{ - struct ctl_table fake_table; - int entropy_count; - - entropy_count = *(int *)table->data >> ENTROPY_SHIFT; - - fake_table.data = &entropy_count; - fake_table.maxlen = sizeof(entropy_count); - - return proc_dointvec(&fake_table, write, buffer, lenp, ppos); -} - -static int sysctl_poolsize = INPUT_POOL_WORDS * 32; -extern struct ctl_table random_table[]; -struct ctl_table random_table[] = { - { - .procname = "poolsize", - .data = &sysctl_poolsize, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = proc_dointvec, - }, - { - .procname = "entropy_avail", - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = proc_do_entropy, - .data = &input_pool.entropy_count, - }, - { - .procname = "write_wakeup_threshold", - .data = &random_write_wakeup_bits, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &min_write_thresh, - .extra2 = &max_write_thresh, - }, - { - .procname = "urandom_min_reseed_secs", - .data = &random_min_urandom_seed, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, - { - .procname = "boot_id", - .data = &sysctl_bootid, - .maxlen = 16, - .mode = 0444, - .proc_handler = proc_do_uuid, - }, - { - .procname = "uuid", - .maxlen = 16, - .mode = 0444, - .proc_handler = proc_do_uuid, - }, -#ifdef ADD_INTERRUPT_BENCH - { - .procname = "add_interrupt_avg_cycles", - .data = &avg_cycles, - .maxlen = sizeof(avg_cycles), - .mode = 0444, - .proc_handler = proc_doulongvec_minmax, - }, - { - .procname = "add_interrupt_avg_deviation", - .data = &avg_deviation, - .maxlen = sizeof(avg_deviation), - .mode = 0444, - .proc_handler = proc_doulongvec_minmax, - }, -#endif - { } -}; -#endif /* CONFIG_SYSCTL */ - -struct batched_entropy { - union { - u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)]; - u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)]; - }; - unsigned int position; - spinlock_t batch_lock; -}; - -/* - * Get a random word for internal kernel use only. The quality of the random - * number is good as /dev/urandom, but there is no backtrack protection, with - * the goal of being quite fast and not depleting entropy. In order to ensure - * that the randomness provided by this function is okay, the function - * wait_for_random_bytes() should be called and return 0 at least once at any - * point prior. - */ -static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { - .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock), -}; - -u64 get_random_u64(void) -{ - u64 ret; - unsigned long flags; - struct batched_entropy *batch; - static void *previous; - - warn_unseeded_randomness(&previous); - - batch = raw_cpu_ptr(&batched_entropy_u64); - spin_lock_irqsave(&batch->batch_lock, flags); - if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { - extract_crng((u8 *)batch->entropy_u64); - batch->position = 0; - } - ret = batch->entropy_u64[batch->position++]; - spin_unlock_irqrestore(&batch->batch_lock, flags); - return ret; -} -EXPORT_SYMBOL(get_random_u64); - -static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { - .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock), -}; -u32 get_random_u32(void) -{ - u32 ret; - unsigned long flags; - struct batched_entropy *batch; - static void *previous; - - warn_unseeded_randomness(&previous); - - batch = raw_cpu_ptr(&batched_entropy_u32); - spin_lock_irqsave(&batch->batch_lock, flags); - if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { - extract_crng((u8 *)batch->entropy_u32); - batch->position = 0; - } - ret = batch->entropy_u32[batch->position++]; - spin_unlock_irqrestore(&batch->batch_lock, flags); - return ret; -} -EXPORT_SYMBOL(get_random_u32); - -/* It's important to invalidate all potential batched entropy that might - * be stored before the crng is initialized, which we can do lazily by - * simply resetting the counter to zero so that it's re-extracted on the - * next usage. */ -static void invalidate_batched_entropy(void) -{ - int cpu; - unsigned long flags; - - for_each_possible_cpu (cpu) { - struct batched_entropy *batched_entropy; - - batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu); - spin_lock_irqsave(&batched_entropy->batch_lock, flags); - batched_entropy->position = 0; - spin_unlock(&batched_entropy->batch_lock); - - batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu); - spin_lock(&batched_entropy->batch_lock); - batched_entropy->position = 0; - spin_unlock_irqrestore(&batched_entropy->batch_lock, flags); - } -} - -/** - * randomize_page - Generate a random, page aligned address - * @start: The smallest acceptable address the caller will take. - * @range: The size of the area, starting at @start, within which the - * random address must fall. - * - * If @start + @range would overflow, @range is capped. - * - * NOTE: Historical use of randomize_range, which this replaces, presumed that - * @start was already page aligned. We now align it regardless. - * - * Return: A page aligned address within [start, start + range). On error, - * @start is returned. - */ -unsigned long -randomize_page(unsigned long start, unsigned long range) -{ - if (!PAGE_ALIGNED(start)) { - range -= PAGE_ALIGN(start) - start; - start = PAGE_ALIGN(start); - } - - if (start > ULONG_MAX - range) - range = ULONG_MAX - start; - - range >>= PAGE_SHIFT; - - if (range == 0) - return start; - - return start + (get_random_long() % range << PAGE_SHIFT); -} - -/* Interface for in-kernel drivers of true hardware RNGs. - * Those devices may produce endless random bits and will be throttled - * when our pool is full. - */ -void add_hwgenerator_randomness(const char *buffer, size_t count, - size_t entropy) -{ - struct entropy_store *poolp = &input_pool; - - if (unlikely(crng_init == 0)) { - size_t ret = crng_fast_load(buffer, count); - count -= ret; - buffer += ret; - if (!count || crng_init == 0) - return; - } - - /* Suspend writing if we're above the trickle threshold. - * We'll be woken up again once below random_write_wakeup_thresh, - * or when the calling thread is about to terminate. - */ - wait_event_interruptible(random_write_wait, - !system_wq || kthread_should_stop() || - ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits); - mix_pool_bytes(poolp, buffer, count); - credit_entropy_bits(poolp, entropy); -} -EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); - -/* Handle random seed passed by bootloader. - * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise - * it would be regarded as device data. - * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER. - */ -void add_bootloader_randomness(const void *buf, unsigned int size) -{ - if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER)) - add_hwgenerator_randomness(buf, size, size * 8); - else - add_device_randomness(buf, size); -} -EXPORT_SYMBOL_GPL(add_bootloader_randomness); diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 1621ce818705..d69905233aff 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -401,13 +401,14 @@ int tpm_pm_suspend(struct device *dev) !pm_suspend_via_firmware()) goto suspended; - if (!tpm_chip_start(chip)) { + rc = tpm_try_get_ops(chip); + if (!rc) { if (chip->flags & TPM_CHIP_FLAG_TPM2) tpm2_shutdown(chip, TPM2_SU_STATE); else rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); - tpm_chip_stop(chip); + tpm_put_ops(chip); } suspended: diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index c84d23951219..d0e11d7a3c08 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -400,7 +400,16 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, if (!rc) { out = (struct tpm2_get_cap_out *) &buf.data[TPM_HEADER_SIZE]; - *value = be32_to_cpu(out->value); + /* + * To prevent failing boot up of some systems, Infineon TPM2.0 + * returns SUCCESS on TPM2_Startup in field upgrade mode. Also + * the TPM2_Getcapability command returns a zero length list + * in field upgrade mode. + */ + if (be32_to_cpu(out->property_cnt) > 0) + *value = be32_to_cpu(out->value); + else + rc = -ENODATA; } tpm_buf_destroy(&buf); return rc; diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 3ca7528322f5..a1ec722d62a7 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -683,6 +683,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, if (!wait_event_timeout(ibmvtpm->crq_queue.wq, ibmvtpm->rtce_buf != NULL, HZ)) { + rc = -ENODEV; dev_err(dev, "CRQ response timed out\n"); goto init_irq_cleanup; } diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 3dd4deb60adb..6d361420ffe8 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -2239,7 +2239,7 @@ static struct virtio_driver virtio_rproc_serial = { .remove = virtcons_remove, }; -static int __init init(void) +static int __init virtio_console_init(void) { int err; @@ -2276,7 +2276,7 @@ free: return err; } -static void __exit fini(void) +static void __exit virtio_console_fini(void) { reclaim_dma_bufs(); @@ -2286,8 +2286,8 @@ static void __exit fini(void) class_destroy(pdrvdata.class); debugfs_remove_recursive(pdrvdata.debugfs_dir); } -module_init(init); -module_exit(fini); +module_init(virtio_console_init); +module_exit(virtio_console_fini); MODULE_DESCRIPTION("Virtio console driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c index 2c3d8e6ca63c..7cc20c0f8865 100644 --- a/drivers/clk/at91/at91rm9200.c +++ b/drivers/clk/at91/at91rm9200.c @@ -38,7 +38,7 @@ static const struct clk_pll_characteristics rm9200_pll_characteristics = { }; static const struct sck at91rm9200_systemck[] = { - { .n = "udpck", .p = "usbck", .id = 2 }, + { .n = "udpck", .p = "usbck", .id = 1 }, { .n = "uhpck", .p = "usbck", .id = 4 }, { .n = "pck0", .p = "prog0", .id = 8 }, { .n = "pck1", .p = "prog1", .id = 9 }, diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c index b656d25a9767..fe772baeb15f 100644 --- a/drivers/clk/at91/clk-generated.c +++ b/drivers/clk/at91/clk-generated.c @@ -106,6 +106,10 @@ static void clk_generated_best_diff(struct clk_rate_request *req, tmp_rate = parent_rate; else tmp_rate = parent_rate / div; + + if (tmp_rate < req->min_rate || tmp_rate > req->max_rate) + return; + tmp_diff = abs(req->rate - tmp_rate); if (*best_diff < 0 || *best_diff >= tmp_diff) { diff --git a/drivers/clk/baikal-t1/ccu-div.c b/drivers/clk/baikal-t1/ccu-div.c index 4062092d67f9..a6642f3d33d4 100644 --- a/drivers/clk/baikal-t1/ccu-div.c +++ b/drivers/clk/baikal-t1/ccu-div.c @@ -34,6 +34,7 @@ #define CCU_DIV_CTL_CLKDIV_MASK(_width) \ GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD) #define CCU_DIV_CTL_LOCK_SHIFTED BIT(27) +#define CCU_DIV_CTL_GATE_REF_BUF BIT(28) #define CCU_DIV_CTL_LOCK_NORMAL BIT(31) #define CCU_DIV_RST_DELAY_US 1 @@ -170,6 +171,40 @@ static int ccu_div_gate_is_enabled(struct clk_hw *hw) return !!(val & CCU_DIV_CTL_EN); } +static int ccu_div_buf_enable(struct clk_hw *hw) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long flags; + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, + CCU_DIV_CTL_GATE_REF_BUF, 0); + spin_unlock_irqrestore(&div->lock, flags); + + return 0; +} + +static void ccu_div_buf_disable(struct clk_hw *hw) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long flags; + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, + CCU_DIV_CTL_GATE_REF_BUF, CCU_DIV_CTL_GATE_REF_BUF); + spin_unlock_irqrestore(&div->lock, flags); +} + +static int ccu_div_buf_is_enabled(struct clk_hw *hw) +{ + struct ccu_div *div = to_ccu_div(hw); + u32 val = 0; + + regmap_read(div->sys_regs, div->reg_ctl, &val); + + return !(val & CCU_DIV_CTL_GATE_REF_BUF); +} + static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { @@ -323,6 +358,7 @@ static const struct ccu_div_dbgfs_bit ccu_div_bits[] = { CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN), CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST), CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV), + CCU_DIV_DBGFS_BIT_ATTR("div_buf", CCU_DIV_CTL_GATE_REF_BUF), CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL) }; @@ -441,6 +477,9 @@ static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry) continue; } + if (!strcmp("div_buf", name)) + continue; + bits[didx] = ccu_div_bits[bidx]; bits[didx].div = div; @@ -477,6 +516,21 @@ static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry) &ccu_div_dbgfs_fixed_clkdiv_fops); } +static void ccu_div_buf_debug_init(struct clk_hw *hw, struct dentry *dentry) +{ + struct ccu_div *div = to_ccu_div(hw); + struct ccu_div_dbgfs_bit *bit; + + bit = kmalloc(sizeof(*bit), GFP_KERNEL); + if (!bit) + return; + + *bit = ccu_div_bits[3]; + bit->div = div; + debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit, + &ccu_div_dbgfs_bit_fops); +} + static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry) { struct ccu_div *div = to_ccu_div(hw); @@ -489,6 +543,7 @@ static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry) #define ccu_div_var_debug_init NULL #define ccu_div_gate_debug_init NULL +#define ccu_div_buf_debug_init NULL #define ccu_div_fixed_debug_init NULL #endif /* !CONFIG_DEBUG_FS */ @@ -520,6 +575,13 @@ static const struct clk_ops ccu_div_gate_ops = { .debug_init = ccu_div_gate_debug_init }; +static const struct clk_ops ccu_div_buf_ops = { + .enable = ccu_div_buf_enable, + .disable = ccu_div_buf_disable, + .is_enabled = ccu_div_buf_is_enabled, + .debug_init = ccu_div_buf_debug_init +}; + static const struct clk_ops ccu_div_fixed_ops = { .recalc_rate = ccu_div_fixed_recalc_rate, .round_rate = ccu_div_fixed_round_rate, @@ -566,6 +628,8 @@ struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init) } else if (div_init->type == CCU_DIV_GATE) { hw_init.ops = &ccu_div_gate_ops; div->divider = div_init->divider; + } else if (div_init->type == CCU_DIV_BUF) { + hw_init.ops = &ccu_div_buf_ops; } else if (div_init->type == CCU_DIV_FIXED) { hw_init.ops = &ccu_div_fixed_ops; div->divider = div_init->divider; @@ -579,6 +643,7 @@ struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init) goto err_free_div; } parent_data.fw_name = div_init->parent_name; + parent_data.name = div_init->parent_name; hw_init.parent_data = &parent_data; hw_init.num_parents = 1; diff --git a/drivers/clk/baikal-t1/ccu-div.h b/drivers/clk/baikal-t1/ccu-div.h index 795665caefbd..4eb49ff4803c 100644 --- a/drivers/clk/baikal-t1/ccu-div.h +++ b/drivers/clk/baikal-t1/ccu-div.h @@ -13,6 +13,14 @@ #include #include +/* + * CCU Divider private clock IDs + * @CCU_SYS_SATA_CLK: CCU SATA internal clock + * @CCU_SYS_XGMAC_CLK: CCU XGMAC internal clock + */ +#define CCU_SYS_SATA_CLK -1 +#define CCU_SYS_XGMAC_CLK -2 + /* * CCU Divider private flags * @CCU_DIV_SKIP_ONE: Due to some reason divider can't be set to 1. @@ -31,11 +39,13 @@ * enum ccu_div_type - CCU Divider types * @CCU_DIV_VAR: Clocks gate with variable divider. * @CCU_DIV_GATE: Clocks gate with fixed divider. + * @CCU_DIV_BUF: Clock gate with no divider. * @CCU_DIV_FIXED: Ungateable clock with fixed divider. */ enum ccu_div_type { CCU_DIV_VAR, CCU_DIV_GATE, + CCU_DIV_BUF, CCU_DIV_FIXED }; diff --git a/drivers/clk/baikal-t1/clk-ccu-div.c b/drivers/clk/baikal-t1/clk-ccu-div.c index f141fda12b09..90f4fda406ee 100644 --- a/drivers/clk/baikal-t1/clk-ccu-div.c +++ b/drivers/clk/baikal-t1/clk-ccu-div.c @@ -76,6 +76,16 @@ .divider = _divider \ } +#define CCU_DIV_BUF_INFO(_id, _name, _pname, _base, _flags) \ + { \ + .id = _id, \ + .name = _name, \ + .parent_name = _pname, \ + .base = _base, \ + .type = CCU_DIV_BUF, \ + .flags = _flags \ + } + #define CCU_DIV_FIXED_INFO(_id, _name, _pname, _divider) \ { \ .id = _id, \ @@ -188,11 +198,14 @@ static const struct ccu_div_rst_map axi_rst_map[] = { * for the SoC devices registers IO-operations. */ static const struct ccu_div_info sys_info[] = { - CCU_DIV_VAR_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk", + CCU_DIV_VAR_INFO(CCU_SYS_SATA_CLK, "sys_sata_clk", "sata_clk", CCU_SYS_SATA_REF_BASE, 4, CLK_SET_RATE_GATE, CCU_DIV_SKIP_ONE | CCU_DIV_LOCK_SHIFTED | CCU_DIV_RESET_DOMAIN), + CCU_DIV_BUF_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk", + "sys_sata_clk", CCU_SYS_SATA_REF_BASE, + CLK_SET_RATE_PARENT), CCU_DIV_VAR_INFO(CCU_SYS_APB_CLK, "sys_apb_clk", "pcie_clk", CCU_SYS_APB_BASE, 5, CLK_IS_CRITICAL, CCU_DIV_RESET_DOMAIN), @@ -204,10 +217,12 @@ static const struct ccu_div_info sys_info[] = { "eth_clk", CCU_SYS_GMAC1_BASE, 5), CCU_DIV_FIXED_INFO(CCU_SYS_GMAC1_PTP_CLK, "sys_gmac1_ptp_clk", "eth_clk", 10), - CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk", - "eth_clk", CCU_SYS_XGMAC_BASE, 8), + CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_CLK, "sys_xgmac_clk", + "eth_clk", CCU_SYS_XGMAC_BASE, 1), + CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk", + "sys_xgmac_clk", 8), CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_PTP_CLK, "sys_xgmac_ptp_clk", - "eth_clk", 10), + "sys_xgmac_clk", 8), CCU_DIV_GATE_INFO(CCU_SYS_USB_CLK, "sys_usb_clk", "eth_clk", CCU_SYS_USB_BASE, 10), CCU_DIV_VAR_INFO(CCU_SYS_PVT_CLK, "sys_pvt_clk", @@ -396,6 +411,9 @@ static int ccu_div_clk_register(struct ccu_div_data *data) init.base = info->base; init.sys_regs = data->sys_regs; init.divider = info->divider; + } else if (init.type == CCU_DIV_BUF) { + init.base = info->base; + init.sys_regs = data->sys_regs; } else { init.divider = info->divider; } diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 178886823b90..b7f89873fcf5 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -968,9 +968,9 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw, return div; } -static long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock, - unsigned long parent_rate, - u32 div) +static unsigned long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock, + unsigned long parent_rate, + u32 div) { const struct bcm2835_clock_data *data = clock->data; u64 temp; @@ -1786,7 +1786,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .load_mask = CM_PLLC_LOADPER, .hold_mask = CM_PLLC_HOLDPER, .fixed_divider = 1, - .flags = CLK_SET_RATE_PARENT), + .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT), /* * PLLD is the display PLL, used to drive DSI display panels. diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c index 274441e2ddb2..8f0619f362e3 100644 --- a/drivers/clk/bcm/clk-iproc-pll.c +++ b/drivers/clk/bcm/clk-iproc-pll.c @@ -736,6 +736,7 @@ void iproc_pll_clk_setup(struct device_node *node, const char *parent_name; struct iproc_clk *iclk_array; struct clk_hw_onecell_data *clk_data; + const char *clk_name; if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl)) return; @@ -783,7 +784,12 @@ void iproc_pll_clk_setup(struct device_node *node, iclk = &iclk_array[0]; iclk->pll = pll; - init.name = node->name; + ret = of_property_read_string_index(node, "clock-output-names", + 0, &clk_name); + if (WARN_ON(ret)) + goto err_pll_register; + + init.name = clk_name; init.ops = &iproc_pll_ops; init.flags = 0; parent_name = of_clk_get_parent_name(node, 0); @@ -803,13 +809,11 @@ void iproc_pll_clk_setup(struct device_node *node, goto err_pll_register; clk_data->hws[0] = &iclk->hw; + parent_name = clk_name; /* now initialize and register all leaf clocks */ for (i = 1; i < num_clks; i++) { - const char *clk_name; - memset(&init, 0, sizeof(init)); - parent_name = node->name; ret = of_property_read_string_index(node, "clock-output-names", i, &clk_name); diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c index f89b9cfc4309..969227e2df21 100644 --- a/drivers/clk/bcm/clk-raspberrypi.c +++ b/drivers/clk/bcm/clk-raspberrypi.c @@ -139,7 +139,7 @@ static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw, ret = raspberrypi_clock_property(rpi->firmware, data, RPI_FIRMWARE_GET_CLOCK_RATE, &val); if (ret) - return ret; + return 0; return val; } @@ -156,7 +156,7 @@ static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate, ret = raspberrypi_clock_property(rpi->firmware, data, RPI_FIRMWARE_SET_CLOCK_RATE, &_rate); if (ret) - dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d", + dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d\n", clk_hw_get_name(hw), ret); return ret; @@ -208,7 +208,7 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi, RPI_FIRMWARE_GET_MIN_CLOCK_RATE, &min_rate); if (ret) { - dev_err(rpi->dev, "Failed to get clock %d min freq: %d", + dev_err(rpi->dev, "Failed to get clock %d min freq: %d\n", id, ret); return ERR_PTR(ret); } @@ -251,8 +251,13 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi, struct rpi_firmware_get_clocks_response *clks; int ret; + /* + * The firmware doesn't guarantee that the last element of + * RPI_FIRMWARE_GET_CLOCKS is zeroed. So allocate an additional + * zero element as sentinel. + */ clks = devm_kcalloc(rpi->dev, - sizeof(*clks), RPI_FIRMWARE_NUM_CLK_ID, + RPI_FIRMWARE_NUM_CLK_ID + 1, sizeof(*clks), GFP_KERNEL); if (!clks) return -ENOMEM; diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c index bccdfa00fd37..67a9edbba29c 100644 --- a/drivers/clk/berlin/bg2.c +++ b/drivers/clk/berlin/bg2.c @@ -500,12 +500,15 @@ static void __init berlin2_clock_setup(struct device_node *np) int n, ret; clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL); - if (!clk_data) + if (!clk_data) { + of_node_put(parent_np); return; + } clk_data->num = MAX_CLKS; hws = clk_data->hws; gbase = of_iomap(parent_np, 0); + of_node_put(parent_np); if (!gbase) return; diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c index e9518d35f262..dd2784bb75b6 100644 --- a/drivers/clk/berlin/bg2q.c +++ b/drivers/clk/berlin/bg2q.c @@ -286,19 +286,23 @@ static void __init berlin2q_clock_setup(struct device_node *np) int n, ret; clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL); - if (!clk_data) + if (!clk_data) { + of_node_put(parent_np); return; + } clk_data->num = MAX_CLKS; hws = clk_data->hws; gbase = of_iomap(parent_np, 0); if (!gbase) { + of_node_put(parent_np); pr_err("%pOF: Unable to map global base\n", np); return; } /* BG2Q CPU PLL is not part of global registers */ cpupll_base = of_iomap(parent_np, 1); + of_node_put(parent_np); if (!cpupll_base) { pr_err("%pOF: Unable to map cpupll base\n", np); iounmap(gbase); diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c index 24dab2312bc6..9c3305bcb27a 100644 --- a/drivers/clk/clk-ast2600.c +++ b/drivers/clk/clk-ast2600.c @@ -622,7 +622,7 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev) regmap_write(map, 0x308, 0x12000); /* 3x3 = 9 */ /* P-Bus (BCLK) clock divider */ - hw = clk_hw_register_divider_table(dev, "bclk", "hpll", 0, + hw = clk_hw_register_divider_table(dev, "bclk", "epll", 0, scu_g6_base + ASPEED_G6_CLK_SELECTION1, 20, 3, 0, ast2600_div_table, &aspeed_g6_clk_lock); diff --git a/drivers/clk/clk-oxnas.c b/drivers/clk/clk-oxnas.c index 78d5ea669fea..2fe36f579ac5 100644 --- a/drivers/clk/clk-oxnas.c +++ b/drivers/clk/clk-oxnas.c @@ -207,7 +207,7 @@ static const struct of_device_id oxnas_stdclk_dt_ids[] = { static int oxnas_stdclk_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; + struct device_node *np = pdev->dev.of_node, *parent_np; const struct oxnas_stdclk_data *data; const struct of_device_id *id; struct regmap *regmap; @@ -219,7 +219,9 @@ static int oxnas_stdclk_probe(struct platform_device *pdev) return -ENODEV; data = id->data; - regmap = syscon_node_to_regmap(of_get_parent(np)); + parent_np = of_get_parent(np); + regmap = syscon_node_to_regmap(parent_np); + of_node_put(parent_np); if (IS_ERR(regmap)) { dev_err(&pdev->dev, "failed to have parent regmap\n"); return PTR_ERR(regmap); diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 46101c6a20f2..585b9ac11881 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -1038,8 +1038,13 @@ static void __init _clockgen_init(struct device_node *np, bool legacy); */ static void __init legacy_init_clockgen(struct device_node *np) { - if (!clockgen.node) - _clockgen_init(of_get_parent(np), true); + if (!clockgen.node) { + struct device_node *parent_np; + + parent_np = of_get_parent(np); + _clockgen_init(parent_np, true); + of_node_put(parent_np); + } } /* Legacy node */ @@ -1134,6 +1139,7 @@ static struct clk * __init create_sysclk(const char *name) sysclk = of_get_child_by_name(clockgen.node, "sysclk"); if (sysclk) { clk = sysclk_from_fixed(sysclk, name); + of_node_put(sysclk); if (!IS_ERR(clk)) return clk; } diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c index 772b48ad0cd7..382a0619a048 100644 --- a/drivers/clk/clk-si5341.c +++ b/drivers/clk/clk-si5341.c @@ -789,6 +789,15 @@ static unsigned long si5341_output_clk_recalc_rate(struct clk_hw *hw, u32 r_divider; u8 r[3]; + err = regmap_read(output->data->regmap, + SI5341_OUT_CONFIG(output), &val); + if (err < 0) + return err; + + /* If SI5341_OUT_CFG_RDIV_FORCE2 is set, r_divider is 2 */ + if (val & SI5341_OUT_CFG_RDIV_FORCE2) + return parent_rate / 2; + err = regmap_bulk_read(output->data->regmap, SI5341_OUT_R_REG(output), r, 3); if (err < 0) @@ -805,13 +814,6 @@ static unsigned long si5341_output_clk_recalc_rate(struct clk_hw *hw, r_divider += 1; r_divider <<= 1; - err = regmap_read(output->data->regmap, - SI5341_OUT_CONFIG(output), &val); - if (err < 0) - return err; - - if (val & SI5341_OUT_CFG_RDIV_FORCE2) - r_divider = 2; return parent_rate / r_divider; } diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c index 4e741f94baf0..eb597ea7bb87 100644 --- a/drivers/clk/clk-versaclock5.c +++ b/drivers/clk/clk-versaclock5.c @@ -1116,7 +1116,7 @@ static const struct vc5_chip_info idt_5p49v6901_info = { .model = IDT_VC6_5P49V6901, .clk_fod_cnt = 4, .clk_out_cnt = 5, - .flags = VC5_HAS_PFD_FREQ_DBL, + .flags = VC5_HAS_PFD_FREQ_DBL | VC5_HAS_BYPASS_SYNC_BIT, }; static const struct vc5_chip_info idt_5p49v6965_info = { diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 5b5b4ddde72c..3d82193fce86 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -633,6 +633,24 @@ static void clk_core_get_boundaries(struct clk_core *core, *max_rate = min(*max_rate, clk_user->max_rate); } +static bool clk_core_check_boundaries(struct clk_core *core, + unsigned long min_rate, + unsigned long max_rate) +{ + struct clk *user; + + lockdep_assert_held(&prepare_lock); + + if (min_rate > core->max_rate || max_rate < core->min_rate) + return false; + + hlist_for_each_entry(user, &core->clks, clks_node) + if (min_rate > user->max_rate || max_rate < user->min_rate) + return false; + + return true; +} + void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, unsigned long max_rate) { @@ -830,10 +848,9 @@ static void clk_core_unprepare(struct clk_core *core) if (core->ops->unprepare) core->ops->unprepare(core->hw); - clk_pm_runtime_put(core); - trace_clk_unprepare_complete(core); clk_core_unprepare(core->parent); + clk_pm_runtime_put(core); } static void clk_core_unprepare_lock(struct clk_core *core) @@ -2411,6 +2428,11 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) clk->min_rate = min; clk->max_rate = max; + if (!clk_core_check_boundaries(clk->core, min, max)) { + ret = -EINVAL; + goto out; + } + rate = clk_core_get_rate_nolock(clk->core); if (rate < min || rate > max) { /* @@ -2439,6 +2461,7 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) } } +out: if (clk->exclusive_count) clk_core_rate_protect(clk->core); @@ -3808,8 +3831,9 @@ struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw, struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id) { struct device *dev = hw->core->dev; + const char *name = dev ? dev_name(dev) : NULL; - return clk_hw_create_clk(dev, hw, dev_name(dev), con_id); + return clk_hw_create_clk(dev, hw, name, con_id); } EXPORT_SYMBOL(clk_hw_get_clk); diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c index fc1bd23d4583..598f3cf4eba4 100644 --- a/drivers/clk/imx/clk-imx6sx.c +++ b/drivers/clk/imx/clk-imx6sx.c @@ -280,13 +280,13 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node) hws[IMX6SX_CLK_SSI3_SEL] = imx_clk_hw_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); hws[IMX6SX_CLK_SSI2_SEL] = imx_clk_hw_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); hws[IMX6SX_CLK_SSI1_SEL] = imx_clk_hw_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); - hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux_flags("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels)); hws[IMX6SX_CLK_PERCLK_SEL] = imx_clk_hw_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels)); hws[IMX6SX_CLK_VID_SEL] = imx_clk_hw_mux("vid_sel", base + 0x20, 21, 3, vid_sels, ARRAY_SIZE(vid_sels)); hws[IMX6SX_CLK_ESAI_SEL] = imx_clk_hw_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); hws[IMX6SX_CLK_CAN_SEL] = imx_clk_hw_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels)); hws[IMX6SX_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); - hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux_flags("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels)); hws[IMX6SX_CLK_SPDIF_SEL] = imx_clk_hw_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); hws[IMX6SX_CLK_AUDIO_SEL] = imx_clk_hw_mux("audio_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); hws[IMX6SX_CLK_ENET_PRE_SEL] = imx_clk_hw_mux("enet_pre_sel", base + 0x34, 15, 3, enet_pre_sels, ARRAY_SIZE(enet_pre_sels)); diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index 0391f5bda5e4..36e8d619e334 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -691,7 +691,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0); hws[IMX8MP_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", ccm_base + 0x44b0, 0); hws[IMX8MP_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", ccm_base + 0x44c0, 0); - hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "osc_32k", ccm_base + 0x44d0, 0); + hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0); hws[IMX8MP_CLK_USB_PHY_ROOT] = imx_clk_hw_gate4("usb_phy_root_clk", "usb_phy_ref", ccm_base + 0x44f0, 0); hws[IMX8MP_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", ccm_base + 0x4510, 0); hws[IMX8MP_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", ccm_base + 0x4520, 0); diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c index 9382dc3aa27e..1999c114f465 100644 --- a/drivers/clk/ingenic/tcu.c +++ b/drivers/clk/ingenic/tcu.c @@ -100,15 +100,11 @@ static bool ingenic_tcu_enable_regs(struct clk_hw *hw) bool enabled = false; /* - * If the SoC has no global TCU clock, we must ungate the channel's - * clock to be able to access its registers. - * If we have a TCU clock, it will be enabled automatically as it has - * been attached to the regmap. + * According to the programming manual, a timer channel's registers can + * only be accessed when the channel's stop bit is clear. */ - if (!tcu->clk) { - enabled = !!ingenic_tcu_is_enabled(hw); - regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit)); - } + enabled = !!ingenic_tcu_is_enabled(hw); + regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit)); return enabled; } @@ -119,8 +115,7 @@ static void ingenic_tcu_disable_regs(struct clk_hw *hw) const struct ingenic_tcu_clk_info *info = tcu_clk->info; struct ingenic_tcu *tcu = tcu_clk->tcu; - if (!tcu->clk) - regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit)); + regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit)); } static u8 ingenic_tcu_get_parent(struct clk_hw *hw) diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c index 37b4162c5882..3a33014eee7f 100644 --- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c +++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c @@ -18,9 +18,9 @@ static const struct mtk_gate_regs mfg_cg_regs = { .sta_ofs = 0x0, }; -#define GATE_MFG(_id, _name, _parent, _shift) \ - GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, \ - &mtk_clk_gate_ops_setclr) +#define GATE_MFG(_id, _name, _parent, _shift) \ + GATE_MTK_FLAGS(_id, _name, _parent, &mfg_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr, CLK_SET_RATE_PARENT) static const struct mtk_gate mfg_clks[] = { GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0) diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c index cb939c071b0c..89916acf0bc3 100644 --- a/drivers/clk/mediatek/reset.c +++ b/drivers/clk/mediatek/reset.c @@ -25,7 +25,7 @@ static int mtk_reset_assert_set_clr(struct reset_controller_dev *rcdev, struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev); unsigned int reg = data->regofs + ((id / 32) << 4); - return regmap_write(data->regmap, reg, 1); + return regmap_write(data->regmap, reg, BIT(id % 32)); } static int mtk_reset_deassert_set_clr(struct reset_controller_dev *rcdev, @@ -34,7 +34,7 @@ static int mtk_reset_deassert_set_clr(struct reset_controller_dev *rcdev, struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev); unsigned int reg = data->regofs + ((id / 32) << 4) + 0x4; - return regmap_write(data->regmap, reg, 1); + return regmap_write(data->regmap, reg, BIT(id % 32)); } static int mtk_reset_assert(struct reset_controller_dev *rcdev, diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c index 27cd2c1f3f61..434cd8f9de82 100644 --- a/drivers/clk/meson/meson-aoclk.c +++ b/drivers/clk/meson/meson-aoclk.c @@ -38,6 +38,7 @@ int meson_aoclkc_probe(struct platform_device *pdev) struct meson_aoclk_reset_controller *rstc; struct meson_aoclk_data *data; struct device *dev = &pdev->dev; + struct device_node *np; struct regmap *regmap; int ret, clkid; @@ -49,7 +50,9 @@ int meson_aoclkc_probe(struct platform_device *pdev) if (!rstc) return -ENOMEM; - regmap = syscon_node_to_regmap(of_get_parent(dev->of_node)); + np = of_get_parent(dev->of_node); + regmap = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(regmap)) { dev_err(dev, "failed to get regmap\n"); return PTR_ERR(regmap); diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c index 8d5a5dab955a..0e5e6b57eb20 100644 --- a/drivers/clk/meson/meson-eeclk.c +++ b/drivers/clk/meson/meson-eeclk.c @@ -18,6 +18,7 @@ int meson_eeclkc_probe(struct platform_device *pdev) { const struct meson_eeclkc_data *data; struct device *dev = &pdev->dev; + struct device_node *np; struct regmap *map; int ret, i; @@ -26,7 +27,9 @@ int meson_eeclkc_probe(struct platform_device *pdev) return -EINVAL; /* Get the hhi system controller node */ - map = syscon_node_to_regmap(of_get_parent(dev->of_node)); + np = of_get_parent(dev->of_node); + map = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(map)) { dev_err(dev, "failed to get HHI regmap\n"); diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 862f0756b50f..1da9d212f8b7 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -3735,13 +3735,16 @@ static void __init meson8b_clkc_init_common(struct device_node *np, struct clk_hw_onecell_data *clk_hw_onecell_data) { struct meson8b_clk_reset *rstc; + struct device_node *parent_np; const char *notifier_clk_name; struct clk *notifier_clk; void __iomem *clk_base; struct regmap *map; int i, ret; - map = syscon_node_to_regmap(of_get_parent(np)); + parent_np = of_get_parent(np); + map = syscon_node_to_regmap(parent_np); + of_node_put(parent_np); if (IS_ERR(map)) { pr_info("failed to get HHI regmap - Trying obsolete regs\n"); diff --git a/drivers/clk/qcom/apss-ipq6018.c b/drivers/clk/qcom/apss-ipq6018.c index d78ff2f310bf..b5d93657e1ee 100644 --- a/drivers/clk/qcom/apss-ipq6018.c +++ b/drivers/clk/qcom/apss-ipq6018.c @@ -57,7 +57,7 @@ static struct clk_branch apcs_alias0_core_clk = { .parent_hws = (const struct clk_hw *[]){ &apcs_alias0_clk_src.clkr.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, + .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c index 1b2cefef7431..a8a2cfa83290 100644 --- a/drivers/clk/qcom/camcc-sdm845.c +++ b/drivers/clk/qcom/camcc-sdm845.c @@ -1521,6 +1521,8 @@ static struct clk_branch cam_cc_sys_tmr_clk = { }, }; +static struct gdsc titan_top_gdsc; + static struct gdsc bps_gdsc = { .gdscr = 0x6004, .pd = { @@ -1554,6 +1556,7 @@ static struct gdsc ife_0_gdsc = { .name = "ife_0_gdsc", }, .flags = POLL_CFG_GDSCR, + .parent = &titan_top_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, }; @@ -1563,6 +1566,7 @@ static struct gdsc ife_1_gdsc = { .name = "ife_1_gdsc", }, .flags = POLL_CFG_GDSCR, + .parent = &titan_top_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, }; diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index 1a571c04a76c..cf265ab035ea 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -1379,7 +1379,7 @@ const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = { EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops); /** - * clk_lucid_pll_configure - configure the lucid pll + * clk_trion_pll_configure - configure the trion pll * * @pll: clk alpha pll * @regmap: register map diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c index 59f1af415b58..90046428693c 100644 --- a/drivers/clk/qcom/clk-krait.c +++ b/drivers/clk/qcom/clk-krait.c @@ -32,11 +32,16 @@ static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel) regval |= (sel & mux->mask) << (mux->shift + LPL_SHIFT); } krait_set_l2_indirect_reg(mux->offset, regval); - spin_unlock_irqrestore(&krait_clock_reg_lock, flags); /* Wait for switch to complete. */ mb(); udelay(1); + + /* + * Unlock now to make sure the mux register is not + * modified while switching to the new parent. + */ + spin_unlock_irqrestore(&krait_clock_reg_lock, flags); } static int krait_mux_set_parent(struct clk_hw *hw, u8 index) diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c index 541016db3c4b..d6d5defb82c9 100644 --- a/drivers/clk/qcom/gcc-ipq8074.c +++ b/drivers/clk/qcom/gcc-ipq8074.c @@ -662,6 +662,7 @@ static struct clk_branch gcc_sleep_clk_src = { }, .num_parents = 1, .ops = &clk_branch2_ops, + .flags = CLK_IS_CRITICAL, }, }, }; @@ -1788,8 +1789,10 @@ static struct clk_regmap_div nss_port4_tx_div_clk_src = { static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(25000000, P_UNIPHY1_RX, 12.5, 0, 0), + F(25000000, P_UNIPHY0_RX, 5, 0, 0), F(78125000, P_UNIPHY1_RX, 4, 0, 0), F(125000000, P_UNIPHY1_RX, 2.5, 0, 0), + F(125000000, P_UNIPHY0_RX, 1, 0, 0), F(156250000, P_UNIPHY1_RX, 2, 0, 0), F(312500000, P_UNIPHY1_RX, 1, 0, 0), { } @@ -1828,8 +1831,10 @@ static struct clk_regmap_div nss_port5_rx_div_clk_src = { static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(25000000, P_UNIPHY1_TX, 12.5, 0, 0), + F(25000000, P_UNIPHY0_TX, 5, 0, 0), F(78125000, P_UNIPHY1_TX, 4, 0, 0), F(125000000, P_UNIPHY1_TX, 2.5, 0, 0), + F(125000000, P_UNIPHY0_TX, 1, 0, 0), F(156250000, P_UNIPHY1_TX, 2, 0, 0), F(312500000, P_UNIPHY1_TX, 1, 0, 0), { } @@ -1867,8 +1872,10 @@ static struct clk_regmap_div nss_port5_tx_div_clk_src = { static const struct freq_tbl ftbl_nss_port6_rx_clk_src[] = { F(19200000, P_XO, 1, 0, 0), + F(25000000, P_UNIPHY2_RX, 5, 0, 0), F(25000000, P_UNIPHY2_RX, 12.5, 0, 0), F(78125000, P_UNIPHY2_RX, 4, 0, 0), + F(125000000, P_UNIPHY2_RX, 1, 0, 0), F(125000000, P_UNIPHY2_RX, 2.5, 0, 0), F(156250000, P_UNIPHY2_RX, 2, 0, 0), F(312500000, P_UNIPHY2_RX, 1, 0, 0), @@ -1907,8 +1914,10 @@ static struct clk_regmap_div nss_port6_rx_div_clk_src = { static const struct freq_tbl ftbl_nss_port6_tx_clk_src[] = { F(19200000, P_XO, 1, 0, 0), + F(25000000, P_UNIPHY2_TX, 5, 0, 0), F(25000000, P_UNIPHY2_TX, 12.5, 0, 0), F(78125000, P_UNIPHY2_TX, 4, 0, 0), + F(125000000, P_UNIPHY2_TX, 1, 0, 0), F(125000000, P_UNIPHY2_TX, 2.5, 0, 0), F(156250000, P_UNIPHY2_TX, 2, 0, 0), F(312500000, P_UNIPHY2_TX, 1, 0, 0), @@ -3346,6 +3355,7 @@ static struct clk_branch gcc_nssnoc_ubi1_ahb_clk = { static struct clk_branch gcc_ubi0_ahb_clk = { .halt_reg = 0x6820c, + .halt_check = BRANCH_HALT_DELAY, .clkr = { .enable_reg = 0x6820c, .enable_mask = BIT(0), @@ -3363,6 +3373,7 @@ static struct clk_branch gcc_ubi0_ahb_clk = { static struct clk_branch gcc_ubi0_axi_clk = { .halt_reg = 0x68200, + .halt_check = BRANCH_HALT_DELAY, .clkr = { .enable_reg = 0x68200, .enable_mask = BIT(0), @@ -3380,6 +3391,7 @@ static struct clk_branch gcc_ubi0_axi_clk = { static struct clk_branch gcc_ubi0_nc_axi_clk = { .halt_reg = 0x68204, + .halt_check = BRANCH_HALT_DELAY, .clkr = { .enable_reg = 0x68204, .enable_mask = BIT(0), @@ -3397,6 +3409,7 @@ static struct clk_branch gcc_ubi0_nc_axi_clk = { static struct clk_branch gcc_ubi0_core_clk = { .halt_reg = 0x68210, + .halt_check = BRANCH_HALT_DELAY, .clkr = { .enable_reg = 0x68210, .enable_mask = BIT(0), @@ -3414,6 +3427,7 @@ static struct clk_branch gcc_ubi0_core_clk = { static struct clk_branch gcc_ubi0_mpt_clk = { .halt_reg = 0x68208, + .halt_check = BRANCH_HALT_DELAY, .clkr = { .enable_reg = 0x68208, .enable_mask = BIT(0), @@ -3431,6 +3445,7 @@ static struct clk_branch gcc_ubi0_mpt_clk = { static struct clk_branch gcc_ubi1_ahb_clk = { .halt_reg = 0x6822c, + .halt_check = BRANCH_HALT_DELAY, .clkr = { .enable_reg = 0x6822c, .enable_mask = BIT(0), @@ -3448,6 +3463,7 @@ static struct clk_branch gcc_ubi1_ahb_clk = { static struct clk_branch gcc_ubi1_axi_clk = { .halt_reg = 0x68220, + .halt_check = BRANCH_HALT_DELAY, .clkr = { .enable_reg = 0x68220, .enable_mask = BIT(0), @@ -3465,6 +3481,7 @@ static struct clk_branch gcc_ubi1_axi_clk = { static struct clk_branch gcc_ubi1_nc_axi_clk = { .halt_reg = 0x68224, + .halt_check = BRANCH_HALT_DELAY, .clkr = { .enable_reg = 0x68224, .enable_mask = BIT(0), @@ -3482,6 +3499,7 @@ static struct clk_branch gcc_ubi1_nc_axi_clk = { static struct clk_branch gcc_ubi1_core_clk = { .halt_reg = 0x68230, + .halt_check = BRANCH_HALT_DELAY, .clkr = { .enable_reg = 0x68230, .enable_mask = BIT(0), @@ -3499,6 +3517,7 @@ static struct clk_branch gcc_ubi1_core_clk = { static struct clk_branch gcc_ubi1_mpt_clk = { .halt_reg = 0x68228, + .halt_check = BRANCH_HALT_DELAY, .clkr = { .enable_reg = 0x68228, .enable_mask = BIT(0), @@ -4371,6 +4390,33 @@ static struct clk_branch gcc_pcie0_axi_s_bridge_clk = { }, }; +static const struct alpha_pll_config ubi32_pll_config = { + .l = 0x4e, + .config_ctl_val = 0x200d4aa8, + .config_ctl_hi_val = 0x3c2, + .main_output_mask = BIT(0), + .aux_output_mask = BIT(1), + .pre_div_val = 0x0, + .pre_div_mask = BIT(12), + .post_div_val = 0x0, + .post_div_mask = GENMASK(9, 8), +}; + +static const struct alpha_pll_config nss_crypto_pll_config = { + .l = 0x3e, + .alpha = 0x0, + .alpha_hi = 0x80, + .config_ctl_val = 0x4001055b, + .main_output_mask = BIT(0), + .pre_div_val = 0x0, + .pre_div_mask = GENMASK(14, 12), + .post_div_val = 0x1 << 8, + .post_div_mask = GENMASK(11, 8), + .vco_mask = GENMASK(21, 20), + .vco_val = 0x0, + .alpha_en_mask = BIT(24), +}; + static struct clk_hw *gcc_ipq8074_hws[] = { &gpll0_out_main_div2.hw, &gpll6_out_main_div2.hw, @@ -4772,7 +4818,20 @@ static const struct qcom_cc_desc gcc_ipq8074_desc = { static int gcc_ipq8074_probe(struct platform_device *pdev) { - return qcom_cc_probe(pdev, &gcc_ipq8074_desc); + struct regmap *regmap; + + regmap = qcom_cc_map(pdev, &gcc_ipq8074_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + /* SW Workaround for UBI32 Huayra PLL */ + regmap_update_bits(regmap, 0x2501c, BIT(26), BIT(26)); + + clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config); + clk_alpha_pll_configure(&nss_crypto_pll_main, regmap, + &nss_crypto_pll_config); + + return qcom_cc_really_probe(pdev, &gcc_ipq8074_desc, regmap); } static struct platform_driver gcc_ipq8074_driver = { diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index 892e91b92f2c..245150a5484a 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -286,8 +286,8 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = { .name = "uart_group_012", .type = K_BITSEL, .source = 1 + R9A06G032_DIV_UART, - /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */ - .dual.sel = ((0xec / 4) << 5) | 24, + /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */ + .dual.sel = ((0x34 / 4) << 5) | 30, .dual.group = 0, }, { @@ -295,8 +295,8 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = { .name = "uart_group_34567", .type = K_BITSEL, .source = 1 + R9A06G032_DIV_P2_PG, - /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */ - .dual.sel = ((0x34 / 4) << 5) | 30, + /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */ + .dual.sel = ((0xec / 4) << 5) | 24, .dual.group = 1, }, D_UGATE(CLK_UART0, "clk_uart0", UART_GROUP_012, 0, 0, 0x1b2, 0x1b3, 0x1b4, 0x1b5), diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c index d620bbbcdfc8..ce81e4087a8f 100644 --- a/drivers/clk/sprd/common.c +++ b/drivers/clk/sprd/common.c @@ -41,7 +41,7 @@ int sprd_clk_regmap_init(struct platform_device *pdev, { void __iomem *base; struct device *dev = &pdev->dev; - struct device_node *node = dev->of_node; + struct device_node *node = dev->of_node, *np; struct regmap *regmap; if (of_find_property(node, "sprd,syscon", NULL)) { @@ -50,9 +50,10 @@ int sprd_clk_regmap_init(struct platform_device *pdev, pr_err("%s: failed to get syscon regmap\n", __func__); return PTR_ERR(regmap); } - } else if (of_device_is_compatible(of_get_parent(dev->of_node), - "syscon")) { - regmap = device_node_to_regmap(of_get_parent(dev->of_node)); + } else if (of_device_is_compatible(np = of_get_parent(node), "syscon") || + (of_node_put(np), 0)) { + regmap = device_node_to_regmap(np); + of_node_put(np); if (IS_ERR(regmap)) { dev_err(dev, "failed to get regmap from its parent.\n"); return PTR_ERR(regmap); diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c index 542b31d6e96d..636bcf2439ef 100644 --- a/drivers/clk/sunxi/clk-sun9i-mmc.c +++ b/drivers/clk/sunxi/clk-sun9i-mmc.c @@ -109,6 +109,8 @@ static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev) spin_lock_init(&data->lock); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) + return -EINVAL; /* one clock/reset pair per word */ count = DIV_ROUND_UP((resource_size(r)), SUN9I_MMC_WIDTH); data->membase = devm_ioremap_resource(&pdev->dev, r); diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c index bc9e47a4cb60..4e2b26e3e573 100644 --- a/drivers/clk/tegra/clk-tegra114.c +++ b/drivers/clk/tegra/clk-tegra114.c @@ -1317,6 +1317,7 @@ static void __init tegra114_clock_init(struct device_node *np) } pmc_base = of_iomap(node, 0); + of_node_put(node); if (!pmc_base) { pr_err("Can't map pmc registers\n"); WARN_ON(1); diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index 3efc651b42e3..d60ee6e318a5 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c @@ -1128,6 +1128,7 @@ static void __init tegra20_clock_init(struct device_node *np) } pmc_base = of_iomap(node, 0); + of_node_put(node); if (!pmc_base) { pr_err("Can't map pmc registers\n"); BUG(); diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 68cbb98af567..1a0016d07f88 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -3697,6 +3697,7 @@ static void __init tegra210_clock_init(struct device_node *np) } pmc_base = of_iomap(node, 0); + of_node_put(node); if (!pmc_base) { pr_err("Can't map pmc registers\n"); WARN_ON(1); diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index 8d4c08b034bd..e2e59d78c173 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -251,14 +251,16 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) if (rc) { pr_err("%s: failed to lookup atl clock %d\n", __func__, i); - return -EINVAL; + ret = -EINVAL; + goto pm_put; } clk = of_clk_get_from_provider(&clkspec); if (IS_ERR(clk)) { pr_err("%s: failed to get atl clock %d from provider\n", __func__, i); - return PTR_ERR(clk); + ret = PTR_ERR(clk); + goto pm_put; } cdesc = to_atl_desc(__clk_get_hw(clk)); @@ -291,8 +293,9 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) if (cdesc->enabled) atl_clk_enable(__clk_get_hw(clk)); } - pm_runtime_put_sync(cinfo->dev); +pm_put: + pm_runtime_put_sync(cinfo->dev); return ret; } diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 3da33c786d77..29eafab4353e 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -131,7 +131,7 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops) void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) { struct ti_dt_clk *c; - struct device_node *node, *parent; + struct device_node *node, *parent, *child; struct clk *clk; struct of_phandle_args clkspec; char buf[64]; @@ -171,10 +171,13 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) node = of_find_node_by_name(NULL, buf); if (num_args && compat_mode) { parent = node; - node = of_get_child_by_name(parent, "clock"); - if (!node) - node = of_get_child_by_name(parent, "clk"); - of_node_put(parent); + child = of_get_child_by_name(parent, "clock"); + if (!child) + child = of_get_child_by_name(parent, "clk"); + if (child) { + of_node_put(parent); + node = child; + } } clkspec.np = node; diff --git a/drivers/clk/versatile/Kconfig b/drivers/clk/versatile/Kconfig index 792315d893db..481de5657d85 100644 --- a/drivers/clk/versatile/Kconfig +++ b/drivers/clk/versatile/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only menu "Clock driver for ARM Reference designs" + depends on HAS_IOMEM config ICST bool "Clock driver for ARM Reference designs ICST" diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c index db8d0d7161ce..9c82ae240c40 100644 --- a/drivers/clk/zynqmp/clkc.c +++ b/drivers/clk/zynqmp/clkc.c @@ -687,6 +687,13 @@ static void zynqmp_get_clock_info(void) FIELD_PREP(CLK_ATTR_NODE_INDEX, i); zynqmp_pm_clock_get_name(clock[i].clk_id, &name); + + /* + * Terminate with NULL character in case name provided by firmware + * is longer and truncated due to size limit. + */ + name.name[sizeof(name.name) - 1] = '\0'; + if (!strcmp(name.name, RESERVED_CLK_NAME)) continue; strncpy(clock[i].clk_name, name.name, MAX_NAME_LEN); diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c index abe6afbf3407..2ae7f9129b07 100644 --- a/drivers/clk/zynqmp/pll.c +++ b/drivers/clk/zynqmp/pll.c @@ -99,26 +99,25 @@ static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { u32 fbdiv; - long rate_div, f; + u32 mult, div; - /* Enable the fractional mode if needed */ - rate_div = (rate * FRAC_DIV) / *prate; - f = rate_div % FRAC_DIV; - if (f) { - if (rate > PS_PLL_VCO_MAX) { - fbdiv = rate / PS_PLL_VCO_MAX; - rate = rate / (fbdiv + 1); - } - if (rate < PS_PLL_VCO_MIN) { - fbdiv = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate); - rate = rate * fbdiv; - } - return rate; + /* Let rate fall inside the range PS_PLL_VCO_MIN ~ PS_PLL_VCO_MAX */ + if (rate > PS_PLL_VCO_MAX) { + div = DIV_ROUND_UP(rate, PS_PLL_VCO_MAX); + rate = rate / div; + } + if (rate < PS_PLL_VCO_MIN) { + mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate); + rate = rate * mult; } fbdiv = DIV_ROUND_CLOSEST(rate, *prate); - fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX); - return *prate * fbdiv; + if (fbdiv < PLL_FBDIV_MIN || fbdiv > PLL_FBDIV_MAX) { + fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX); + rate = *prate * fbdiv; + } + + return rate; } /** diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index ba04cb381cd3..7c617d8dff3f 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -472,4 +472,3 @@ void __init hv_init_clocksource(void) hv_sched_clock_offset = hv_read_reference_counter(); hv_setup_sched_clock(read_hv_sched_clock_msr); } -EXPORT_SYMBOL_GPL(hv_init_clocksource); diff --git a/drivers/clocksource/timer-ixp4xx.c b/drivers/clocksource/timer-ixp4xx.c index 9396745e1c17..ad904bbbac6f 100644 --- a/drivers/clocksource/timer-ixp4xx.c +++ b/drivers/clocksource/timer-ixp4xx.c @@ -258,7 +258,6 @@ void __init ixp4xx_timer_setup(resource_size_t timerbase, } ixp4xx_timer_register(base, timer_irq, timer_freq); } -EXPORT_SYMBOL_GPL(ixp4xx_timer_setup); #ifdef CONFIG_OF static __init int ixp4xx_of_timer_init(struct device_node *np) diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c index 56c0cc32d0ac..d514b44e67dd 100644 --- a/drivers/clocksource/timer-oxnas-rps.c +++ b/drivers/clocksource/timer-oxnas-rps.c @@ -236,7 +236,7 @@ static int __init oxnas_rps_timer_init(struct device_node *np) } rps->irq = irq_of_parse_and_map(np, 0); - if (rps->irq < 0) { + if (!rps->irq) { ret = -EINVAL; goto err_iomap; } diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c index 6e8ad4a4ea3c..bedd3570474b 100644 --- a/drivers/clocksource/timer-sp804.c +++ b/drivers/clocksource/timer-sp804.c @@ -274,6 +274,11 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time struct clk *clk1, *clk2; const char *name = of_get_property(np, "compatible", NULL); + if (initialized) { + pr_debug("%pOF: skipping further SP804 timer device\n", np); + return 0; + } + base = of_iomap(np, 0); if (!base) return -ENXIO; @@ -285,11 +290,6 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time writel(0, timer1_base + timer->ctrl); writel(0, timer2_base + timer->ctrl); - if (initialized || !of_device_is_available(np)) { - ret = -EINVAL; - goto err; - } - clk1 = of_clk_get(np, 0); if (IS_ERR(clk1)) clk1 = NULL; diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c index 710acc0a3704..85fbbac06d31 100644 --- a/drivers/counter/microchip-tcb-capture.c +++ b/drivers/counter/microchip-tcb-capture.c @@ -29,7 +29,6 @@ struct mchp_tc_data { int qdec_mode; int num_channels; int channel[2]; - bool trig_inverted; }; enum mchp_tc_count_function { @@ -163,7 +162,7 @@ static int mchp_tc_count_signal_read(struct counter_device *counter, regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], SR), &sr); - if (priv->trig_inverted) + if (signal->id == 1) sigstatus = (sr & ATMEL_TC_MTIOB); else sigstatus = (sr & ATMEL_TC_MTIOA); @@ -181,6 +180,17 @@ static int mchp_tc_count_action_get(struct counter_device *counter, struct mchp_tc_data *const priv = counter->priv; u32 cmr; + if (priv->qdec_mode) { + *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; + return 0; + } + + /* Only TIOA signal is evaluated in non-QDEC mode */ + if (synapse->signal->id != 0) { + *action = COUNTER_SYNAPSE_ACTION_NONE; + return 0; + } + regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CMR), &cmr); switch (cmr & ATMEL_TC_ETRGEDG) { @@ -209,8 +219,8 @@ static int mchp_tc_count_action_set(struct counter_device *counter, struct mchp_tc_data *const priv = counter->priv; u32 edge = ATMEL_TC_ETRGEDG_NONE; - /* QDEC mode is rising edge only */ - if (priv->qdec_mode) + /* QDEC mode is rising edge only; only TIOA handled in non-QDEC mode */ + if (priv->qdec_mode || synapse->signal->id != 0) return -EINVAL; switch (action) { diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index a310372dc53e..82f6592bbadb 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -44,6 +44,8 @@ struct mtk_cpu_dvfs_info { bool need_voltage_tracking; }; +static struct platform_device *cpufreq_pdev; + static LIST_HEAD(dvfs_info_list); static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu) @@ -546,7 +548,6 @@ static int __init mtk_cpufreq_driver_init(void) { struct device_node *np; const struct of_device_id *match; - struct platform_device *pdev; int err; np = of_find_node_by_path("/"); @@ -570,15 +571,23 @@ static int __init mtk_cpufreq_driver_init(void) * and the device registration codes are put here to handle defer * probing. */ - pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0); - if (IS_ERR(pdev)) { + cpufreq_pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0); + if (IS_ERR(cpufreq_pdev)) { pr_err("failed to register mtk-cpufreq platform device\n"); - return PTR_ERR(pdev); + platform_driver_unregister(&mtk_cpufreq_platdrv); + return PTR_ERR(cpufreq_pdev); } return 0; } -device_initcall(mtk_cpufreq_driver_init); +module_init(mtk_cpufreq_driver_init) + +static void __exit mtk_cpufreq_driver_exit(void) +{ + platform_device_unregister(cpufreq_pdev); + platform_driver_unregister(&mtk_cpufreq_platdrv); +} +module_exit(mtk_cpufreq_driver_exit) MODULE_DESCRIPTION("MediaTek CPUFreq driver"); MODULE_AUTHOR("Pi-Cheng Chen "); diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c index 73621bc11976..3704476bb83a 100644 --- a/drivers/cpufreq/pmac32-cpufreq.c +++ b/drivers/cpufreq/pmac32-cpufreq.c @@ -471,6 +471,10 @@ static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode) if (slew_done_gpio_np) slew_done_gpio = read_gpio(slew_done_gpio_np); + of_node_put(volt_gpio_np); + of_node_put(freq_gpio_np); + of_node_put(slew_done_gpio_np); + /* If we use the frequency GPIOs, calculate the min/max speeds based * on the bus frequencies */ diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index 7fdd30e92e42..9b3d24721d7b 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -215,6 +215,7 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, int speed = 0, pvs = 0, pvs_ver = 0; u8 *speedbin; size_t len; + int ret = 0; speedbin = nvmem_cell_read(speedbin_nvmem, &len); @@ -232,7 +233,8 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, break; default: dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n"); - return -ENODEV; + ret = -ENODEV; + goto len_error; } snprintf(*pvs_name, sizeof("speedXX-pvsXX-vXX"), "speed%d-pvs%d-v%d", @@ -240,8 +242,9 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, drv->versions = (1 << speed); +len_error: kfree(speedbin); - return 0; + return ret; } static const struct qcom_cpufreq_match_data match_data_kryo = { @@ -264,7 +267,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) struct nvmem_cell *speedbin_nvmem; struct device_node *np; struct device *cpu_dev; - char *pvs_name = "speedXX-pvsXX-vXX"; + char pvs_name_buffer[] = "speedXX-pvsXX-vXX"; + char *pvs_name = pvs_name_buffer; unsigned cpu; const struct of_device_id *match; int ret; diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c index 6b6b20da2bcf..573b417e1483 100644 --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c @@ -275,6 +275,7 @@ static int qoriq_cpufreq_probe(struct platform_device *pdev) np = of_find_matching_node(NULL, qoriq_cpufreq_blacklist); if (np) { + of_node_put(np); dev_info(&pdev->dev, "Disabling due to erratum A-008083"); return -ENODEV; } diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c index 2deed8d8773f..75e1bf3a08f7 100644 --- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c +++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c @@ -98,8 +98,10 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev) return -ENOMEM; ret = sun50i_cpufreq_get_efuse(&speed); - if (ret) + if (ret) { + kfree(opp_tables); return ret; + } snprintf(name, MAX_NAME_LEN, "speed%d", speed); diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index efc063ffc683..95ec4a688aa7 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -68,10 +68,12 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev, if (ret) return -1; - trace_android_vh_cpuidle_psci_enter(dev, s2idle); /* Do runtime PM to manage a hierarchical CPU toplogy. */ rcu_irq_enter_irqson(); + + trace_android_vh_cpuidle_psci_enter(dev, s2idle); + if (s2idle) dev_pm_genpd_suspend(pd_dev); else @@ -89,10 +91,11 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev, dev_pm_genpd_resume(pd_dev); else pm_runtime_get_sync(pd_dev); - rcu_irq_exit_irqson(); trace_android_vh_cpuidle_psci_exit(dev, s2idle); + rcu_irq_exit_irqson(); + cpu_pm_exit(); /* Clear the domain state to start fresh when back from idle. */ diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index f783748462f9..d0954993e2e3 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -93,6 +93,69 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) return err; } +static int sun8i_ss_setup_ivs(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ss_dev *ss = op->ss; + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + struct scatterlist *sg = areq->src; + unsigned int todo, offset; + unsigned int len = areq->cryptlen; + unsigned int ivsize = crypto_skcipher_ivsize(tfm); + struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; + int i = 0; + u32 a; + int err; + + rctx->ivlen = ivsize; + if (rctx->op_dir & SS_DECRYPTION) { + offset = areq->cryptlen - ivsize; + scatterwalk_map_and_copy(sf->biv, areq->src, offset, + ivsize, 0); + } + + /* we need to copy all IVs from source in case DMA is bi-directionnal */ + while (sg && len) { + if (sg_dma_len(sg) == 0) { + sg = sg_next(sg); + continue; + } + if (i == 0) + memcpy(sf->iv[0], areq->iv, ivsize); + a = dma_map_single(ss->dev, sf->iv[i], ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(ss->dev, a)) { + memzero_explicit(sf->iv[i], ivsize); + dev_err(ss->dev, "Cannot DMA MAP IV\n"); + err = -EFAULT; + goto dma_iv_error; + } + rctx->p_iv[i] = a; + /* we need to setup all others IVs only in the decrypt way */ + if (rctx->op_dir & SS_ENCRYPTION) + return 0; + todo = min(len, sg_dma_len(sg)); + len -= todo; + i++; + if (i < MAX_SG) { + offset = sg->length - ivsize; + scatterwalk_map_and_copy(sf->iv[i], sg, offset, ivsize, 0); + } + rctx->niv = i; + sg = sg_next(sg); + } + + return 0; +dma_iv_error: + i--; + while (i >= 0) { + dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE); + memzero_explicit(sf->iv[i], ivsize); + i--; + } + return err; +} + static int sun8i_ss_cipher(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); @@ -101,9 +164,9 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun8i_ss_alg_template *algt; + struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; struct scatterlist *sg; unsigned int todo, len, offset, ivsize; - void *backup_iv = NULL; int nr_sgs = 0; int nr_sgd = 0; int err = 0; @@ -134,30 +197,9 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) ivsize = crypto_skcipher_ivsize(tfm); if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { - rctx->ivlen = ivsize; - rctx->biv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA); - if (!rctx->biv) { - err = -ENOMEM; + err = sun8i_ss_setup_ivs(areq); + if (err) goto theend_key; - } - if (rctx->op_dir & SS_DECRYPTION) { - backup_iv = kzalloc(ivsize, GFP_KERNEL); - if (!backup_iv) { - err = -ENOMEM; - goto theend_key; - } - offset = areq->cryptlen - ivsize; - scatterwalk_map_and_copy(backup_iv, areq->src, offset, - ivsize, 0); - } - memcpy(rctx->biv, areq->iv, ivsize); - rctx->p_iv = dma_map_single(ss->dev, rctx->biv, rctx->ivlen, - DMA_TO_DEVICE); - if (dma_mapping_error(ss->dev, rctx->p_iv)) { - dev_err(ss->dev, "Cannot DMA MAP IV\n"); - err = -ENOMEM; - goto theend_iv; - } } if (areq->src == areq->dst) { nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), @@ -240,21 +282,19 @@ theend_sgs: } theend_iv: - if (rctx->p_iv) - dma_unmap_single(ss->dev, rctx->p_iv, rctx->ivlen, - DMA_TO_DEVICE); - if (areq->iv && ivsize > 0) { - if (rctx->biv) { - offset = areq->cryptlen - ivsize; - if (rctx->op_dir & SS_DECRYPTION) { - memcpy(areq->iv, backup_iv, ivsize); - kfree_sensitive(backup_iv); - } else { - scatterwalk_map_and_copy(areq->iv, areq->dst, offset, - ivsize, 0); - } - kfree(rctx->biv); + for (i = 0; i < rctx->niv; i++) { + dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE); + memzero_explicit(sf->iv[i], ivsize); + } + + offset = areq->cryptlen - ivsize; + if (rctx->op_dir & SS_DECRYPTION) { + memcpy(areq->iv, sf->biv, ivsize); + memzero_explicit(sf->biv, ivsize); + } else { + scatterwalk_map_and_copy(areq->iv, areq->dst, offset, + ivsize, 0); } } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index 319fe3279a71..47b5828e35c3 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -66,6 +66,7 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx const char *name) { int flow = rctx->flow; + unsigned int ivlen = rctx->ivlen; u32 v = SS_START; int i; @@ -104,15 +105,14 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx mutex_lock(&ss->mlock); writel(rctx->p_key, ss->base + SS_KEY_ADR_REG); - if (i == 0) { - if (rctx->p_iv) - writel(rctx->p_iv, ss->base + SS_IV_ADR_REG); - } else { - if (rctx->biv) { - if (rctx->op_dir == SS_ENCRYPTION) - writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG); + if (ivlen) { + if (rctx->op_dir == SS_ENCRYPTION) { + if (i == 0) + writel(rctx->p_iv[0], ss->base + SS_IV_ADR_REG); else - writel(rctx->t_src[i - 1].addr + rctx->t_src[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG); + writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - ivlen, ss->base + SS_IV_ADR_REG); + } else { + writel(rctx->p_iv[i], ss->base + SS_IV_ADR_REG); } } @@ -464,7 +464,7 @@ static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i) */ static int allocate_flows(struct sun8i_ss_dev *ss) { - int i, err; + int i, j, err; ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow), GFP_KERNEL); @@ -474,6 +474,36 @@ static int allocate_flows(struct sun8i_ss_dev *ss) for (i = 0; i < MAXFLOW; i++) { init_completion(&ss->flows[i].complete); + ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, + GFP_KERNEL | GFP_DMA); + if (!ss->flows[i].biv) { + err = -ENOMEM; + goto error_engine; + } + + for (j = 0; j < MAX_SG; j++) { + ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, + GFP_KERNEL | GFP_DMA); + if (!ss->flows[i].iv[j]) { + err = -ENOMEM; + goto error_engine; + } + } + + /* the padding could be up to two block. */ + ss->flows[i].pad = devm_kmalloc(ss->dev, SHA256_BLOCK_SIZE * 2, + GFP_KERNEL | GFP_DMA); + if (!ss->flows[i].pad) { + err = -ENOMEM; + goto error_engine; + } + ss->flows[i].result = devm_kmalloc(ss->dev, SHA256_DIGEST_SIZE, + GFP_KERNEL | GFP_DMA); + if (!ss->flows[i].result) { + err = -ENOMEM; + goto error_engine; + } + ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true); if (!ss->flows[i].engine) { dev_err(ss->dev, "Cannot allocate engine\n"); diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index c9edecd43ef9..98040794acdc 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -341,18 +341,11 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) if (digestsize == SHA224_DIGEST_SIZE) digestsize = SHA256_DIGEST_SIZE; - /* the padding could be up to two block. */ - pad = kzalloc(algt->alg.hash.halg.base.cra_blocksize * 2, GFP_KERNEL | GFP_DMA); - if (!pad) - return -ENOMEM; + result = ss->flows[rctx->flow].result; + pad = ss->flows[rctx->flow].pad; + memset(pad, 0, algt->alg.hash.halg.base.cra_blocksize * 2); bf = (__le32 *)pad; - result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA); - if (!result) { - kfree(pad); - return -ENOMEM; - } - for (i = 0; i < MAX_SG; i++) { rctx->t_dst[i].addr = 0; rctx->t_dst[i].len = 0; @@ -379,13 +372,21 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) } len = areq->nbytes; - for_each_sg(areq->src, sg, nr_sgs, i) { + sg = areq->src; + i = 0; + while (len > 0 && sg) { + if (sg_dma_len(sg) == 0) { + sg = sg_next(sg); + continue; + } rctx->t_src[i].addr = sg_dma_address(sg); todo = min(len, sg_dma_len(sg)); rctx->t_src[i].len = todo / 4; len -= todo; rctx->t_dst[i].addr = addr_res; rctx->t_dst[i].len = digestsize / 4; + sg = sg_next(sg); + i++; } if (len > 0) { dev_err(ss->dev, "remaining len %d\n", len); @@ -439,8 +440,6 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) memcpy(areq->result, result, algt->alg.hash.halg.digestsize); theend: - kfree(pad); - kfree(result); local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); local_bh_enable(); diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h index 1a66457f4a20..a97a790ae451 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h @@ -120,11 +120,19 @@ struct sginfo { * @complete: completion for the current task on this flow * @status: set to 1 by interrupt if task is done * @stat_req: number of request done by this flow + * @iv: list of IV to use for each step + * @biv: buffer which contain the backuped IV + * @pad: padding buffer for hash operations + * @result: buffer for storing the result of hash operations */ struct sun8i_ss_flow { struct crypto_engine *engine; struct completion complete; int status; + u8 *iv[MAX_SG]; + u8 *biv; + void *pad; + void *result; #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG unsigned long stat_req; #endif @@ -163,28 +171,28 @@ struct sun8i_ss_dev { * @t_src: list of mapped SGs with their size * @t_dst: list of mapped SGs with their size * @p_key: DMA address of the key - * @p_iv: DMA address of the IV + * @p_iv: DMA address of the IVs + * @niv: Number of IVs DMA mapped * @method: current algorithm for this request * @op_mode: op_mode for this request * @op_dir: direction (encrypt vs decrypt) for this request * @flow: the flow to use for this request - * @ivlen: size of biv + * @ivlen: size of IVs * @keylen: keylen for this request - * @biv: buffer which contain the IV * @fallback_req: request struct for invoking the fallback skcipher TFM */ struct sun8i_cipher_req_ctx { struct sginfo t_src[MAX_SG]; struct sginfo t_dst[MAX_SG]; u32 p_key; - u32 p_iv; + u32 p_iv[MAX_SG]; + int niv; u32 method; u32 op_mode; u32 op_dir; int flow; unsigned int ivlen; unsigned int keylen; - void *biv; struct skcipher_request fallback_req; // keep at the end }; diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index ca0361b2dbb0..f87aa2169e5f 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -609,6 +609,13 @@ static bool check_version(struct fsl_mc_version *mc_version, u32 major, } #endif +static bool needs_entropy_delay_adjustment(void) +{ + if (of_machine_is_compatible("fsl,imx6sx")) + return true; + return false; +} + /* Probe routine for CAAM top (controller) level */ static int caam_probe(struct platform_device *pdev) { @@ -855,6 +862,8 @@ static int caam_probe(struct platform_device *pdev) * Also, if a handle was instantiated, do not change * the TRNG parameters. */ + if (needs_entropy_delay_adjustment()) + ent_delay = 12000; if (!(ctrlpriv->rng4_sh_init || inst_handles)) { dev_info(dev, "Entropy delay = %u\n", @@ -871,6 +880,15 @@ static int caam_probe(struct platform_device *pdev) */ ret = instantiate_rng(dev, inst_handles, gen_sk); + /* + * Entropy delay is determined via TRNG characterization. + * TRNG characterization is run across different voltages + * and temperatures. + * If worst case value for ent_dly is identified, + * the loop can be skipped for that platform. + */ + if (needs_entropy_delay_adjustment()) + break; if (ret == -EAGAIN) /* * if here, the loop will rerun, diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c index 781949027451..d9362199423f 100644 --- a/drivers/crypto/cavium/cpt/cptpf_main.c +++ b/drivers/crypto/cavium/cpt/cptpf_main.c @@ -254,6 +254,7 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) const struct firmware *fw_entry; struct device *dev = &cpt->pdev->dev; struct ucode_header *ucode; + unsigned int code_length; struct microcode *mcode; int j, ret = 0; @@ -264,11 +265,12 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) ucode = (struct ucode_header *)fw_entry->data; mcode = &cpt->mcode[cpt->next_mc_idx]; memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ); - mcode->code_size = ntohl(ucode->code_length) * 2; - if (!mcode->code_size) { + code_length = ntohl(ucode->code_length); + if (code_length == 0 || code_length >= INT_MAX / 2) { ret = -EINVAL; goto fw_release; } + mcode->code_size = code_length * 2; mcode->is_ae = is_ae; mcode->core_mask = 0ULL; diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index b3eea329f840..b9299defb431 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -642,6 +642,10 @@ static void ccp_dma_release(struct ccp_device *ccp) for (i = 0; i < ccp->cmd_q_count; i++) { chan = ccp->ccp_dma_chan + i; dma_chan = &chan->dma_chan; + + if (dma_chan->client_count) + dma_release_channel(dma_chan); + tasklet_kill(&chan->cleanup_tasklet); list_del_rcu(&dma_chan->device_node); } @@ -767,8 +771,8 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp) if (!dmaengine) return; - dma_async_device_unregister(dma_dev); ccp_dma_release(ccp); + dma_async_device_unregister(dma_dev); kmem_cache_destroy(ccp->dma_desc_cache); kmem_cache_destroy(ccp->dma_cmd_cache); diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 57b57d4db500..ed39a22e1b2b 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -278,7 +278,7 @@ static int __sev_platform_shutdown_locked(int *error) struct sev_device *sev = psp_master->sev_data; int ret; - if (sev->state == SEV_STATE_UNINIT) + if (!sev || sev->state == SEV_STATE_UNINIT) return 0; ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index 11e0278c8631..6140e4927322 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -356,12 +356,14 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx, req_ctx->mlli_params.mlli_dma_addr); } - dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); - dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); - if (src != dst) { - dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE); + dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE); dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst)); + dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); + } else { + dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); + dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); } } @@ -377,6 +379,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, u32 dummy = 0; int rc = 0; u32 mapped_nents = 0; + int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); req_ctx->dma_buf_type = CC_DMA_BUF_DLLI; mlli_params->curr_pool = NULL; @@ -399,7 +402,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, } /* Map the src SGL */ - rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, + rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) goto cipher_exit; @@ -416,7 +419,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, } } else { /* Map the dst sg */ - rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, + rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE, &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) @@ -456,6 +459,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) struct aead_req_ctx *areq_ctx = aead_request_ctx(req); unsigned int hw_iv_size = areq_ctx->hw_iv_size; struct cc_drvdata *drvdata = dev_get_drvdata(dev); + int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); if (areq_ctx->mac_buf_dma_addr) { dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, @@ -514,13 +518,11 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, areq_ctx->assoclen, req->cryptlen); - dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, - DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction); if (req->src != req->dst) { dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", sg_virt(req->dst)); - dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, - DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE); } if (drvdata->coherent && areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && @@ -843,7 +845,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, else size_for_map -= authsize; - rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, + rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE, &areq_ctx->dst.mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, &dst_mapped_nents); @@ -1056,7 +1058,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) size_to_map += authsize; } - rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, + rc = cc_map_sg(dev, req->src, size_to_map, + (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL), &areq_ctx->src.mapped_nents, (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES), diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index a87f9904087a..90c13ebe7e83 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -210,7 +210,7 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req, if (unlikely(shift < 0)) return -EINVAL; - ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL); + ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC); if (unlikely(!ptr)) return -ENOMEM; diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index 8ca945ac297e..2066f8d40c5a 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -449,7 +449,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp, */ } - mutex_lock(&ctx->queue->queuelock); + spin_lock_bh(&ctx->queue->queuelock); /* Put the IV in place for chained cases */ switch (ctx->cipher_alg) { case SEC_C_AES_CBC_128: @@ -509,7 +509,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp, list_del(&backlog_req->backlog_head); } } - mutex_unlock(&ctx->queue->queuelock); + spin_unlock_bh(&ctx->queue->queuelock); mutex_lock(&sec_req->lock); list_del(&sec_req_el->head); @@ -798,7 +798,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, */ /* Grab a big lock for a long time to avoid concurrency issues */ - mutex_lock(&queue->queuelock); + spin_lock_bh(&queue->queuelock); /* * Can go on to queue if we have space in either: @@ -814,15 +814,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, ret = -EBUSY; if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { list_add_tail(&sec_req->backlog_head, &ctx->backlog); - mutex_unlock(&queue->queuelock); + spin_unlock_bh(&queue->queuelock); goto out; } - mutex_unlock(&queue->queuelock); + spin_unlock_bh(&queue->queuelock); goto err_free_elements; } ret = sec_send_request(sec_req, queue); - mutex_unlock(&queue->queuelock); + spin_unlock_bh(&queue->queuelock); if (ret) goto err_free_elements; @@ -881,7 +881,7 @@ static int sec_alg_skcipher_init(struct crypto_skcipher *tfm) if (IS_ERR(ctx->queue)) return PTR_ERR(ctx->queue); - mutex_init(&ctx->queue->queuelock); + spin_lock_init(&ctx->queue->queuelock); ctx->queue->havesoftqueue = false; return 0; diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h index 4d9063a8b10b..0bf4d7c3856c 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.h +++ b/drivers/crypto/hisilicon/sec/sec_drv.h @@ -347,7 +347,7 @@ struct sec_queue { DECLARE_BITMAP(unprocessed, SEC_QUEUE_LEN); DECLARE_KFIFO_PTR(softqueue, typeof(struct sec_request_el *)); bool havesoftqueue; - struct mutex queuelock; + spinlock_t queuelock; void *shadow[SEC_QUEUE_LEN]; }; diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 037762b531e2..249735b7ceca 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -4,8 +4,6 @@ #ifndef __HISI_SEC_V2_H #define __HISI_SEC_V2_H -#include - #include "../qm.h" #include "sec_crypto.h" @@ -50,7 +48,7 @@ struct sec_req { int err_type; int req_id; - int flag; + u32 flag; /* Status of the SEC request */ bool fake_busy; @@ -105,7 +103,7 @@ struct sec_qp_ctx { struct idr req_idr; struct sec_alg_res res[QM_Q_DEPTH]; struct sec_ctx *ctx; - struct mutex req_lock; + spinlock_t req_lock; struct list_head backlog; struct hisi_acc_sgl_pool *c_in_pool; struct hisi_acc_sgl_pool *c_out_pool; @@ -140,6 +138,7 @@ struct sec_ctx { bool pbuf_supported; struct sec_cipher_ctx c_ctx; struct sec_auth_ctx a_ctx; + struct device *dev; }; enum sec_endian { diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 630dcb59ad56..2dbec638cca8 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -42,7 +42,6 @@ #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH) #define SEC_SGL_SGE_NR 128 -#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev) #define SEC_CIPHER_AUTH 0xfe #define SEC_AUTH_CIPHER 0x1 #define SEC_MAX_MAC_LEN 64 @@ -89,13 +88,13 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) { int req_id; - mutex_lock(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->req_lock); req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC); - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); if (unlikely(req_id < 0)) { - dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n"); + dev_err(req->ctx->dev, "alloc req id fail!\n"); return req_id; } @@ -110,16 +109,16 @@ static void sec_free_req_id(struct sec_req *req) int req_id = req->req_id; if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) { - dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n"); + dev_err(req->ctx->dev, "free request id invalid!\n"); return; } qp_ctx->req_list[req_id] = NULL; req->qp_ctx = NULL; - mutex_lock(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->req_lock); idr_remove(&qp_ctx->req_idr, req_id); - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); } static int sec_aead_verify(struct sec_req *req) @@ -136,7 +135,7 @@ static int sec_aead_verify(struct sec_req *req) aead_req->cryptlen + aead_req->assoclen - authsize); if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) { - dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n"); + dev_err(req->ctx->dev, "aead verify failure!\n"); return -EBADMSG; } @@ -175,7 +174,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) if (unlikely(req->err_type || done != SEC_SQE_DONE || (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) || (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) { - dev_err(SEC_CTX_DEV(ctx), + dev_err_ratelimited(ctx->dev, "err_type[%d],done[%d],flag[%d]\n", req->err_type, done, flag); err = -EIO; @@ -202,7 +201,7 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) return -EBUSY; - mutex_lock(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->req_lock); ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); if (ctx->fake_req_limit <= @@ -210,10 +209,10 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) list_add_tail(&req->backlog_head, &qp_ctx->backlog); atomic64_inc(&ctx->sec->debug.dfx.send_cnt); atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); return -EBUSY; } - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); if (unlikely(ret == -EBUSY)) return -ENOBUFS; @@ -323,8 +322,8 @@ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) static int sec_alg_resource_alloc(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct device *dev = SEC_CTX_DEV(ctx); struct sec_alg_res *res = qp_ctx->res; + struct device *dev = ctx->dev; int ret; ret = sec_alloc_civ_resource(dev, res); @@ -357,7 +356,7 @@ alloc_fail: static void sec_alg_resource_free(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; sec_free_civ_resource(dev, qp_ctx->res); @@ -370,7 +369,7 @@ static void sec_alg_resource_free(struct sec_ctx *ctx, static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, int qp_ctx_id, int alg_type) { - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; struct sec_qp_ctx *qp_ctx; struct hisi_qp *qp; int ret = -ENOMEM; @@ -383,7 +382,7 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, qp_ctx->qp = qp; qp_ctx->ctx = ctx; - mutex_init(&qp_ctx->req_lock); + spin_lock_init(&qp_ctx->req_lock); idr_init(&qp_ctx->req_idr); INIT_LIST_HEAD(&qp_ctx->backlog); @@ -426,7 +425,7 @@ err_destroy_idr: static void sec_release_qp_ctx(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; hisi_qm_stop_qp(qp_ctx->qp); sec_alg_resource_free(ctx, qp_ctx); @@ -450,6 +449,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx) sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); ctx->sec = sec; + ctx->dev = &sec->qm.pdev->dev; ctx->hlf_q_num = sec->ctx_q_num >> 1; ctx->pbuf_supported = ctx->sec->iommu_used; @@ -474,11 +474,9 @@ static int sec_ctx_base_init(struct sec_ctx *ctx) err_sec_release_qp_ctx: for (i = i - 1; i >= 0; i--) sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); - kfree(ctx->qp_ctx); err_destroy_qps: sec_destroy_qps(ctx->qps, sec->ctx_q_num); - return ret; } @@ -497,7 +495,7 @@ static int sec_cipher_init(struct sec_ctx *ctx) { struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; - c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, &c_ctx->c_key_dma, GFP_KERNEL); if (!c_ctx->c_key) return -ENOMEM; @@ -510,7 +508,7 @@ static void sec_cipher_uninit(struct sec_ctx *ctx) struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); - dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, c_ctx->c_key, c_ctx->c_key_dma); } @@ -518,7 +516,7 @@ static int sec_auth_init(struct sec_ctx *ctx) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, &a_ctx->a_key_dma, GFP_KERNEL); if (!a_ctx->a_key) return -ENOMEM; @@ -530,8 +528,8 @@ static void sec_auth_uninit(struct sec_ctx *ctx) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE); - dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE); + dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, a_ctx->a_key, a_ctx->a_key_dma); } @@ -631,12 +629,13 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct device *dev = ctx->dev; int ret; if (c_mode == SEC_CMODE_XTS) { ret = xts_verify_key(tfm, key, keylen); if (ret) { - dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n"); + dev_err(dev, "xts mode key err!\n"); return ret; } } @@ -657,7 +656,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, } if (ret) { - dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n"); + dev_err(dev, "set sec key err!\n"); return ret; } @@ -689,7 +688,7 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, struct aead_request *aead_req = req->aead_req.aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; int copy_size, pbuf_length; int req_id = req->req_id; @@ -699,9 +698,8 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, copy_size = c_req->c_len; pbuf_length = sg_copy_to_buffer(src, sg_nents(src), - qp_ctx->res[req_id].pbuf, - copy_size); - + qp_ctx->res[req_id].pbuf, + copy_size); if (unlikely(pbuf_length != copy_size)) { dev_err(dev, "copy src data to pbuf error!\n"); return -EINVAL; @@ -725,7 +723,7 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, struct aead_request *aead_req = req->aead_req.aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; int copy_size, pbuf_length; int req_id = req->req_id; @@ -737,7 +735,6 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), qp_ctx->res[req_id].pbuf, copy_size); - if (unlikely(pbuf_length != copy_size)) dev_err(dev, "copy pbuf data to dst error!\n"); @@ -750,7 +747,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, struct sec_aead_req *a_req = &req->aead_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; struct sec_alg_res *res = &qp_ctx->res[req->req_id]; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; int ret; if (req->use_pbuf) { @@ -805,7 +802,7 @@ static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, struct scatterlist *src, struct scatterlist *dst) { struct sec_cipher_req *c_req = &req->c_req; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; if (req->use_pbuf) { sec_cipher_pbuf_unmap(ctx, req, dst); @@ -889,6 +886,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, { struct sec_ctx *ctx = crypto_aead_ctx(tfm); struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct device *dev = ctx->dev; struct crypto_authenc_keys keys; int ret; @@ -902,13 +900,13 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, ret = sec_aead_aes_set_key(c_ctx, &keys); if (ret) { - dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n"); + dev_err(dev, "set sec cipher key err!\n"); goto bad_key; } ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys); if (ret) { - dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n"); + dev_err(dev, "set sec auth key err!\n"); goto bad_key; } @@ -1061,7 +1059,7 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, cryptlen - iv_size); if (unlikely(sz != iv_size)) - dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n"); + dev_err(req->ctx->dev, "copy output iv error!\n"); } static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, @@ -1069,7 +1067,7 @@ static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, { struct sec_req *backlog_req = NULL; - mutex_lock(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->req_lock); if (ctx->fake_req_limit >= atomic_read(&qp_ctx->qp->qp_status.used) && !list_empty(&qp_ctx->backlog)) { @@ -1077,7 +1075,7 @@ static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, typeof(*backlog_req), backlog_head); list_del(&backlog_req->backlog_head); } - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); return backlog_req; } @@ -1160,7 +1158,7 @@ static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req) ret = sec_skcipher_bd_fill(ctx, req); if (unlikely(ret)) { - dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n"); + dev_err(ctx->dev, "skcipher bd fill is error!\n"); return ret; } @@ -1194,7 +1192,7 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) a_req->assoclen); if (unlikely(sz != authsize)) { - dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n"); + dev_err(c->dev, "copy out mac err!\n"); err = -EINVAL; } } @@ -1259,7 +1257,7 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req) ret = ctx->req_op->bd_send(ctx, req); if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { - dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n"); + dev_err_ratelimited(ctx->dev, "send sec request failed!\n"); goto err_send_req; } @@ -1326,7 +1324,7 @@ static int sec_aead_init(struct crypto_aead *tfm) ctx->alg_type = SEC_AEAD; ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { - dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n"); + dev_err(ctx->dev, "get error aead iv size!\n"); return -EINVAL; } @@ -1376,7 +1374,7 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); if (IS_ERR(auth_ctx->hash_tfm)) { - dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n"); + dev_err(ctx->dev, "aead alloc shash error!\n"); sec_aead_exit(tfm); return PTR_ERR(auth_ctx->hash_tfm); } @@ -1410,7 +1408,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) { struct skcipher_request *sk_req = sreq->c_req.sk_req; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; if (unlikely(!sk_req->src || !sk_req->dst)) { @@ -1533,14 +1531,15 @@ static struct skcipher_alg sec_skciphers[] = { static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) { - u8 c_alg = ctx->c_ctx.c_alg; struct aead_request *req = sreq->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(req); size_t authsize = crypto_aead_authsize(tfm); + struct device *dev = ctx->dev; + u8 c_alg = ctx->c_ctx.c_alg; if (unlikely(!req->src || !req->dst || !req->cryptlen || req->assoclen > SEC_MAX_AAD_LEN)) { - dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n"); + dev_err(dev, "aead input param error!\n"); return -EINVAL; } @@ -1552,7 +1551,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) /* Support AES only */ if (unlikely(c_alg != SEC_CALG_AES)) { - dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n"); + dev_err(dev, "aead crypto alg error!\n"); return -EINVAL; } @@ -1562,7 +1561,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) sreq->c_req.c_len = req->cryptlen - authsize; if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { - dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n"); + dev_err(dev, "aead crypto length error!\n"); return -EINVAL; } diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h index b2786e17d8fe..20f11e5bbf1d 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.h +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -6,6 +6,7 @@ #define SEC_IV_SIZE 24 #define SEC_MAX_KEY_SIZE 64 +#define SEC_MAX_AKEY_SIZE 128 #define SEC_COMM_SCENE 0 enum sec_calg { @@ -64,7 +65,6 @@ enum sec_addr_type { }; struct sec_sqe_type2 { - /* * mac_len: 0~4 bits * a_key_len: 5~10 bits @@ -120,7 +120,6 @@ struct sec_sqe_type2 { /* c_pad_len_field: 0~1 bits */ __le16 c_pad_len_field; - __le64 long_a_data_len; __le64 a_ivin_addr; __le64 a_key_addr; diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 08b4660b014c..5db7cdea994a 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -107,12 +107,12 @@ static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp) if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX) return -EINVAL; - return param_set_int(val, kp); + return param_set_ushort(val, kp); } static const struct kernel_param_ops sgl_sge_nr_ops = { .set = sgl_sge_nr_set, - .get = param_get_int, + .get = param_get_ushort, }; static u16 sgl_sge_nr = HZIP_SGL_SGE_NR; diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 30aedfcfee7c..bcfb3afeb4e2 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1834,6 +1834,8 @@ static const struct of_device_id safexcel_of_match_table[] = { {}, }; +MODULE_DEVICE_TABLE(of, safexcel_of_match_table); + static struct platform_driver crypto_safexcel = { .probe = safexcel_probe, .remove = safexcel_remove, diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index b2d5f0aa4243..31515c63bcfe 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -382,7 +382,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, u32 x; x = ipad[i] ^ ipad[i + 4]; - cache[i] ^= swab(x); + cache[i] ^= swab32(x); } } cache_len = AES_BLOCK_SIZE; @@ -820,7 +820,7 @@ static int safexcel_ahash_final(struct ahash_request *areq) u32 *result = (void *)areq->result; /* K3 */ - result[i] = swab(ctx->base.ipad.word[i + 4]); + result[i] = swab32(ctx->base.ipad.word[i + 4]); } areq->result[0] ^= 0x80; // 10- padding crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result); @@ -2105,7 +2105,7 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key, crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE, "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3"); for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++) - ctx->base.ipad.word[i] = swab(key_tmp[i]); + ctx->base.ipad.word[i] = swab32(key_tmp[i]); crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK); crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) & @@ -2188,7 +2188,7 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, return ret; for (i = 0; i < len / sizeof(u32); i++) - ctx->base.ipad.word[i + 8] = swab(aes.key_enc[i]); + ctx->base.ipad.word[i + 8] = swab32(aes.key_enc[i]); /* precompute the CMAC key material */ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK); diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c index b4a6ff9dd6d5..596a8c74e40a 100644 --- a/drivers/crypto/marvell/cesa/cipher.c +++ b/drivers/crypto/marvell/cesa/cipher.c @@ -614,7 +614,6 @@ struct skcipher_alg mv_cesa_ecb_des3_ede_alg = { .decrypt = mv_cesa_ecb_des3_ede_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, .base = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "mv-ecb-des3-ede", diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c index 40b482198ebc..a765eefb18c2 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c @@ -286,6 +286,7 @@ static int process_tar_file(struct device *dev, struct tar_ucode_info_t *tar_info; struct otx_cpt_ucode_hdr *ucode_hdr; int ucode_type, ucode_size; + unsigned int code_length; /* * If size is less than microcode header size then don't report @@ -303,7 +304,13 @@ static int process_tar_file(struct device *dev, if (get_ucode_type(ucode_hdr, &ucode_type)) return 0; - ucode_size = ntohl(ucode_hdr->code_length) * 2; + code_length = ntohl(ucode_hdr->code_length); + if (code_length >= INT_MAX / 2) { + dev_err(dev, "Invalid code_length %u\n", code_length); + return -EINVAL; + } + + ucode_size = code_length * 2; if (!ucode_size || (size < round_up(ucode_size, 16) + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) { dev_err(dev, "Ucode %s invalid size\n", filename); @@ -886,6 +893,7 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode, { struct otx_cpt_ucode_hdr *ucode_hdr; const struct firmware *fw; + unsigned int code_length; int ret; set_ucode_filename(ucode, ucode_filename); @@ -896,7 +904,13 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode, ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data; memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ); ucode->ver_num = ucode_hdr->ver_num; - ucode->size = ntohl(ucode_hdr->code_length) * 2; + code_length = ntohl(ucode_hdr->code_length); + if (code_length >= INT_MAX / 2) { + dev_err(dev, "Ucode invalid code_length %u\n", code_length); + ret = -EINVAL; + goto release_fw; + } + ucode->size = code_length * 2; if (!ucode->size || (fw->size < round_up(ucode->size, 16) + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) { dev_err(dev, "Ucode %s invalid size\n", ucode_filename); diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c index 13c65deda8e9..8a4f10bb3fcd 100644 --- a/drivers/crypto/nx/nx-common-powernv.c +++ b/drivers/crypto/nx/nx-common-powernv.c @@ -827,7 +827,7 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id, goto err_out; vas_init_rx_win_attr(&rxattr, coproc->ct); - rxattr.rx_fifo = (void *)rx_fifo; + rxattr.rx_fifo = rx_fifo; rxattr.rx_fifo_size = fifo_size; rxattr.lnotify_lpid = lpid; rxattr.lnotify_pid = pid; diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index b939f0099c12..9f6966648ca2 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -35,19 +35,6 @@ static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; -struct qat_alg_buf { - u32 len; - u32 resrvd; - u64 addr; -} __packed; - -struct qat_alg_buf_list { - u64 resrvd; - u32 num_bufs; - u32 num_mapped_bufs; - struct qat_alg_buf bufers[]; -} __packed __aligned(64); - /* Common content descriptor */ struct qat_alg_cd { union { @@ -638,14 +625,20 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, dma_addr_t blpout = qat_req->buf.bloutp; size_t sz = qat_req->buf.sz; size_t sz_out = qat_req->buf.sz_out; + int bl_dma_dir; int i; + bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + for (i = 0; i < bl->num_bufs; i++) dma_unmap_single(dev, bl->bufers[i].addr, - bl->bufers[i].len, DMA_BIDIRECTIONAL); + bl->bufers[i].len, bl_dma_dir); dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); - kfree(bl); + + if (!qat_req->buf.sgl_src_valid) + kfree(bl); + if (blp != blpout) { /* If out of place operation dma unmap only data */ int bufless = blout->num_bufs - blout->num_mapped_bufs; @@ -653,10 +646,12 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, for (i = bufless; i < blout->num_bufs; i++) { dma_unmap_single(dev, blout->bufers[i].addr, blout->bufers[i].len, - DMA_BIDIRECTIONAL); + DMA_FROM_DEVICE); } dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); - kfree(blout); + + if (!qat_req->buf.sgl_dst_valid) + kfree(blout); } } @@ -670,26 +665,34 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, int n = sg_nents(sgl); struct qat_alg_buf_list *bufl; struct qat_alg_buf_list *buflout = NULL; - dma_addr_t blp; - dma_addr_t bloutp; + dma_addr_t blp = DMA_MAPPING_ERROR; + dma_addr_t bloutp = DMA_MAPPING_ERROR; struct scatterlist *sg; - size_t sz_out, sz = struct_size(bufl, bufers, n + 1); + size_t sz_out, sz = struct_size(bufl, bufers, n); + int node = dev_to_node(&GET_DEV(inst->accel_dev)); + int bufl_dma_dir; if (unlikely(!n)) return -EINVAL; - bufl = kzalloc_node(sz, GFP_ATOMIC, - dev_to_node(&GET_DEV(inst->accel_dev))); - if (unlikely(!bufl)) - return -ENOMEM; + qat_req->buf.sgl_src_valid = false; + qat_req->buf.sgl_dst_valid = false; + + if (n > QAT_MAX_BUFF_DESC) { + bufl = kzalloc_node(sz, GFP_ATOMIC, node); + if (unlikely(!bufl)) + return -ENOMEM; + } else { + bufl = &qat_req->buf.sgl_src.sgl_hdr; + memset(bufl, 0, sizeof(struct qat_alg_buf_list)); + qat_req->buf.sgl_src_valid = true; + } + + bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; for_each_sg(sgl, sg, n, i) bufl->bufers[i].addr = DMA_MAPPING_ERROR; - blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, blp))) - goto err_in; - for_each_sg(sgl, sg, n, i) { int y = sg_nctr; @@ -698,13 +701,16 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), sg->length, - DMA_BIDIRECTIONAL); + bufl_dma_dir); bufl->bufers[y].len = sg->length; if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) goto err_in; sg_nctr++; } bufl->num_bufs = sg_nctr; + blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, blp))) + goto err_in; qat_req->buf.bl = bufl; qat_req->buf.blp = blp; qat_req->buf.sz = sz; @@ -713,20 +719,23 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, struct qat_alg_buf *bufers; n = sg_nents(sglout); - sz_out = struct_size(buflout, bufers, n + 1); + sz_out = struct_size(buflout, bufers, n); sg_nctr = 0; - buflout = kzalloc_node(sz_out, GFP_ATOMIC, - dev_to_node(&GET_DEV(inst->accel_dev))); - if (unlikely(!buflout)) - goto err_in; + + if (n > QAT_MAX_BUFF_DESC) { + buflout = kzalloc_node(sz_out, GFP_ATOMIC, node); + if (unlikely(!buflout)) + goto err_in; + } else { + buflout = &qat_req->buf.sgl_dst.sgl_hdr; + memset(buflout, 0, sizeof(struct qat_alg_buf_list)); + qat_req->buf.sgl_dst_valid = true; + } bufers = buflout->bufers; for_each_sg(sglout, sg, n, i) bufers[i].addr = DMA_MAPPING_ERROR; - bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, bloutp))) - goto err_out; for_each_sg(sglout, sg, n, i) { int y = sg_nctr; @@ -735,7 +744,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, bufers[y].addr = dma_map_single(dev, sg_virt(sg), sg->length, - DMA_BIDIRECTIONAL); + DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(dev, bufers[y].addr))) goto err_out; bufers[y].len = sg->length; @@ -743,6 +752,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, } buflout->num_bufs = sg_nctr; buflout->num_mapped_bufs = sg_nctr; + bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, bloutp))) + goto err_out; qat_req->buf.blout = buflout; qat_req->buf.bloutp = bloutp; qat_req->buf.sz_out = sz_out; @@ -754,27 +766,32 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, return 0; err_out: + if (!dma_mapping_error(dev, bloutp)) + dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); + n = sg_nents(sglout); for (i = 0; i < n; i++) if (!dma_mapping_error(dev, buflout->bufers[i].addr)) dma_unmap_single(dev, buflout->bufers[i].addr, buflout->bufers[i].len, - DMA_BIDIRECTIONAL); - if (!dma_mapping_error(dev, bloutp)) - dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); - kfree(buflout); + DMA_FROM_DEVICE); + + if (!qat_req->buf.sgl_dst_valid) + kfree(buflout); err_in: + if (!dma_mapping_error(dev, blp)) + dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); + n = sg_nents(sgl); for (i = 0; i < n; i++) if (!dma_mapping_error(dev, bufl->bufers[i].addr)) dma_unmap_single(dev, bufl->bufers[i].addr, bufl->bufers[i].len, - DMA_BIDIRECTIONAL); + bufl_dma_dir); - if (!dma_mapping_error(dev, blp)) - dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); - kfree(bufl); + if (!qat_req->buf.sgl_src_valid) + kfree(bufl); dev_err(dev, "Failed to map buf for dma\n"); return -ENOMEM; diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h index 12682d1e9f5f..5f9328201ba4 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.h +++ b/drivers/crypto/qat/qat_common/qat_crypto.h @@ -20,6 +20,26 @@ struct qat_crypto_instance { atomic_t refctr; }; +#define QAT_MAX_BUFF_DESC 4 + +struct qat_alg_buf { + u32 len; + u32 resrvd; + u64 addr; +} __packed; + +struct qat_alg_buf_list { + u64 resrvd; + u32 num_bufs; + u32 num_mapped_bufs; + struct qat_alg_buf bufers[]; +} __packed; + +struct qat_alg_fixed_buf_list { + struct qat_alg_buf_list sgl_hdr; + struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC]; +} __packed __aligned(64); + struct qat_crypto_request_buffs { struct qat_alg_buf_list *bl; dma_addr_t blp; @@ -27,6 +47,10 @@ struct qat_crypto_request_buffs { dma_addr_t bloutp; size_t sz; size_t sz_out; + bool sgl_src_valid; + bool sgl_dst_valid; + struct qat_alg_fixed_buf_list sgl_src; + struct qat_alg_fixed_buf_list sgl_dst; }; struct qat_crypto_request; diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c index 11f30fd48c14..031b5f701a0a 100644 --- a/drivers/crypto/qcom-rng.c +++ b/drivers/crypto/qcom-rng.c @@ -65,6 +65,7 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max) } else { /* copy only remaining bytes */ memcpy(data, &val, max - currsize); + break; } } while (currsize < max); diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index d60679c79822..2043dd061121 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -25,10 +25,10 @@ #include #include #include -#include #include #include #include +#include #define SHA_BUFFER_LEN PAGE_SIZE #define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE @@ -195,7 +195,7 @@ struct sahara_dev { void __iomem *regs_base; struct clk *clk_ipg; struct clk *clk_ahb; - struct mutex queue_mutex; + spinlock_t queue_spinlock; struct task_struct *kthread; struct completion dma_completion; @@ -641,9 +641,9 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) rctx->mode = mode; - mutex_lock(&dev->queue_mutex); + spin_lock_bh(&dev->queue_spinlock); err = crypto_enqueue_request(&dev->queue, &req->base); - mutex_unlock(&dev->queue_mutex); + spin_unlock_bh(&dev->queue_spinlock); wake_up_process(dev->kthread); @@ -1042,10 +1042,10 @@ static int sahara_queue_manage(void *data) do { __set_current_state(TASK_INTERRUPTIBLE); - mutex_lock(&dev->queue_mutex); + spin_lock_bh(&dev->queue_spinlock); backlog = crypto_get_backlog(&dev->queue); async_req = crypto_dequeue_request(&dev->queue); - mutex_unlock(&dev->queue_mutex); + spin_unlock_bh(&dev->queue_spinlock); if (backlog) backlog->complete(backlog, -EINPROGRESS); @@ -1091,9 +1091,9 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last) rctx->first = 1; } - mutex_lock(&dev->queue_mutex); + spin_lock_bh(&dev->queue_spinlock); ret = crypto_enqueue_request(&dev->queue, &req->base); - mutex_unlock(&dev->queue_mutex); + spin_unlock_bh(&dev->queue_spinlock); wake_up_process(dev->kthread); @@ -1454,7 +1454,7 @@ static int sahara_probe(struct platform_device *pdev) crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH); - mutex_init(&dev->queue_mutex); + spin_lock_init(&dev->queue_spinlock); dev_ptr = dev; diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index be1bf39a317d..90a920e7f664 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -384,8 +384,10 @@ static int stm32_crc_remove(struct platform_device *pdev) struct stm32_crc *crc = platform_get_drvdata(pdev); int ret = pm_runtime_get_sync(crc->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(crc->dev); return ret; + } spin_lock(&crc_list.lock); list_del(&crc->list); diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c index cb6401c9e9a4..acf31cc1dbcc 100644 --- a/drivers/dax/hmem/device.c +++ b/drivers/dax/hmem/device.c @@ -15,6 +15,7 @@ void hmem_register_device(int target_nid, struct resource *r) .start = r->start, .end = r->end, .flags = IORESOURCE_MEM, + .desc = IORES_DESC_SOFT_RESERVED, }; struct platform_device *pdev; struct memregion_info info; diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index 17ed980d9099..d6da9c3e3106 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c @@ -514,15 +514,19 @@ static int of_get_devfreq_events(struct device_node *np, count = of_get_child_count(events_np); desc = devm_kcalloc(dev, count, sizeof(*desc), GFP_KERNEL); - if (!desc) + if (!desc) { + of_node_put(events_np); return -ENOMEM; + } info->num_events = count; of_id = of_match_device(exynos_ppmu_id_match, dev); if (of_id) info->ppmu_type = (enum exynos_ppmu_type)of_id->data; - else + else { + of_node_put(events_np); return -EINVAL; + } j = 0; for_each_child_of_node(events_np, node) { diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c index 2e912166a993..7e52375d9818 100644 --- a/drivers/devfreq/rk3399_dmc.c +++ b/drivers/devfreq/rk3399_dmc.c @@ -485,6 +485,8 @@ static int rk3399_dmcfreq_remove(struct platform_device *pdev) { struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev); + devfreq_event_disable_edev(dmcfreq->edev); + /* * Before remove the opp table we need to unregister the opp notifier. */ diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index 0b2d943ac59b..b5f2a3e11e2d 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -107,7 +107,7 @@ menuconfig DMABUF_HEAPS menuconfig DMABUF_SYSFS_STATS bool "DMA-BUF sysfs statistics" - select DMA_SHARED_BUFFER + depends on DMA_SHARED_BUFFER help Choose this option to enable DMA-BUF sysfs statistics in location /sys/kernel/dmabuf/buffers. diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c index 2389a363bd3a..5c8efa55e3aa 100644 --- a/drivers/dma-buf/dma-buf-sysfs-stats.c +++ b/drivers/dma-buf/dma-buf-sysfs-stats.c @@ -11,6 +11,9 @@ #include #include #include +#include + +#include #include "dma-buf-sysfs-stats.h" @@ -87,13 +90,17 @@ static struct kobj_type dma_buf_ktype = { void dma_buf_stats_teardown(struct dma_buf *dmabuf) { struct dma_buf_sysfs_entry *sysfs_entry; + bool skip_sysfs_release = false; sysfs_entry = dmabuf->sysfs_entry; if (!sysfs_entry) return; - kobject_del(&sysfs_entry->kobj); - kobject_put(&sysfs_entry->kobj); + trace_android_rvh_dma_buf_stats_teardown(sysfs_entry, &skip_sysfs_release); + if (!skip_sysfs_release) { + kobject_del(&sysfs_entry->kobj); + kobject_put(&sysfs_entry->kobj); + } } /* @@ -135,10 +142,58 @@ void dma_buf_uninit_sysfs_statistics(void) kset_unregister(dma_buf_stats_kset); } +struct dma_buf_create_sysfs_entry { + struct dma_buf *dmabuf; + struct work_struct work; +}; + +union dma_buf_create_sysfs_work_entry { + struct dma_buf_create_sysfs_entry create_entry; + struct dma_buf_sysfs_entry sysfs_entry; +}; + +static void sysfs_add_workfn(struct work_struct *work) +{ + struct dma_buf_create_sysfs_entry *create_entry = + container_of(work, struct dma_buf_create_sysfs_entry, work); + struct dma_buf *dmabuf = create_entry->dmabuf; + + /* + * A dmabuf is ref-counted via its file member. If this handler holds the only + * reference to the dmabuf, there is no need for sysfs kobject creation. This is an + * optimization and a race; when the reference count drops to 1 immediately after + * this check it is not harmful as the sysfs entry will still get cleaned up in + * dma_buf_stats_teardown, which won't get called until the final dmabuf reference + * is released, and that can't happen until the end of this function. + */ + if (file_count(dmabuf->file) > 1) { + dmabuf->sysfs_entry->dmabuf = dmabuf; + /* + * kobject_init_and_add expects kobject to be zero-filled, but we have populated it + * to trigger this work function. + */ + memset(&dmabuf->sysfs_entry->kobj, 0, sizeof(dmabuf->sysfs_entry->kobj)); + dmabuf->sysfs_entry->kobj.kset = dma_buf_per_buffer_stats_kset; + if (kobject_init_and_add(&dmabuf->sysfs_entry->kobj, &dma_buf_ktype, NULL, + "%lu", file_inode(dmabuf->file)->i_ino)) { + kobject_put(&dmabuf->sysfs_entry->kobj); + dmabuf->sysfs_entry = NULL; + } + } else { + /* + * Free the sysfs_entry and reset the pointer so dma_buf_stats_teardown doesn't + * attempt to operate on it. + */ + kfree(dmabuf->sysfs_entry); + dmabuf->sysfs_entry = NULL; + } + dma_buf_put(dmabuf); +} + int dma_buf_stats_setup(struct dma_buf *dmabuf) { - struct dma_buf_sysfs_entry *sysfs_entry; - int ret; + struct dma_buf_create_sysfs_entry *create_entry; + union dma_buf_create_sysfs_work_entry *work_entry; if (!dmabuf || !dmabuf->file) return -EINVAL; @@ -148,25 +203,18 @@ int dma_buf_stats_setup(struct dma_buf *dmabuf) return -EINVAL; } - sysfs_entry = kzalloc(sizeof(struct dma_buf_sysfs_entry), GFP_KERNEL); - if (!sysfs_entry) + work_entry = kmalloc(sizeof(union dma_buf_create_sysfs_work_entry), GFP_KERNEL); + if (!work_entry) return -ENOMEM; - sysfs_entry->kobj.kset = dma_buf_per_buffer_stats_kset; - sysfs_entry->dmabuf = dmabuf; + dmabuf->sysfs_entry = &work_entry->sysfs_entry; - dmabuf->sysfs_entry = sysfs_entry; + create_entry = &work_entry->create_entry; + create_entry->dmabuf = dmabuf; - /* create the directory for buffer stats */ - ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_ktype, NULL, - "%lu", file_inode(dmabuf->file)->i_ino); - if (ret) - goto err_sysfs_dmabuf; + INIT_WORK(&create_entry->work, sysfs_add_workfn); + get_dma_buf(dmabuf); /* This reference will be dropped in sysfs_add_workfn. */ + schedule_work(&create_entry->work); return 0; - -err_sysfs_dmabuf: - kobject_put(&sysfs_entry->kobj); - dmabuf->sysfs_entry = NULL; - return ret; } diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index e7f6f9dd3c67..771be95730da 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -575,6 +575,7 @@ EXPORT_SYMBOL_GPL(is_dma_buf_file); static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) { + static atomic64_t dmabuf_inode = ATOMIC64_INIT(0); struct file *file; struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); @@ -584,6 +585,13 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) inode->i_size = dmabuf->size; inode_set_bytes(inode, dmabuf->size); + /* + * The ->i_ino acquired from get_next_ino() is not unique thus + * not suitable for using it as dentry name by dmabuf stats. + * Override ->i_ino with the unique and dmabuffs specific + * value. + */ + inode->i_ino = atomic64_add_return(1, &dmabuf_inode); file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", flags, &dma_buf_fops); if (IS_ERR(file)) diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c index 85aae0a01c49..004f9609c134 100644 --- a/drivers/dma-buf/dma-heap.c +++ b/drivers/dma-buf/dma-heap.c @@ -314,7 +314,7 @@ EXPORT_SYMBOL_GPL(dma_heap_get_name); struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) { - struct dma_heap *heap, *err_ret; + struct dma_heap *heap, *h, *err_ret; unsigned int minor; int ret; @@ -328,15 +328,6 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) return ERR_PTR(-EINVAL); } - /* check the name is unique */ - heap = dma_heap_find(exp_info->name); - if (heap) { - pr_err("dma_heap: Already registered heap named %s\n", - exp_info->name); - dma_heap_put(heap); - return ERR_PTR(-EINVAL); - } - heap = kzalloc(sizeof(*heap), GFP_KERNEL); if (!heap) return ERR_PTR(-ENOMEM); @@ -380,13 +371,27 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) /* Make sure it doesn't disappear on us */ heap->heap_dev = get_device(heap->heap_dev); - /* Add heap to the list */ mutex_lock(&heap_list_lock); + /* check the name is unique */ + list_for_each_entry(h, &heap_list, list) { + if (!strcmp(h->name, exp_info->name)) { + mutex_unlock(&heap_list_lock); + pr_err("dma_heap: Already registered heap named %s\n", + exp_info->name); + err_ret = ERR_PTR(-EINVAL); + put_device(heap->heap_dev); + goto err3; + } + } + + /* Add heap to the list */ list_add(&heap->list, &heap_list); mutex_unlock(&heap_list_lock); return heap; +err3: + device_destroy(dma_heap_class, heap->heap_devt); err2: cdev_del(&heap->heap_cdev); err1: diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index fd564aa70ee9..511d678ecbfc 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -126,10 +126,11 @@ static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, struct cma_heap_buffer *buffer = dmabuf->priv; struct dma_heap_attachment *a; + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); - mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { if (!a->mapped) continue; @@ -146,10 +147,11 @@ static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, struct cma_heap_buffer *buffer = dmabuf->priv; struct dma_heap_attachment *a; + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) flush_kernel_vmap_range(buffer->vaddr, buffer->len); - mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { if (!a->mapped) continue; diff --git a/drivers/dma-buf/heaps/page_pool.c b/drivers/dma-buf/heaps/page_pool.c index b79e737bac95..de9d72809412 100644 --- a/drivers/dma-buf/heaps/page_pool.c +++ b/drivers/dma-buf/heaps/page_pool.c @@ -11,10 +11,16 @@ #include #include #include +#include #include #include #include "page_pool.h" +struct dmabuf_page_pool_with_spinlock { + struct dmabuf_page_pool pool; + struct spinlock spinlock; +}; + static LIST_HEAD(pool_list); static DEFINE_MUTEX(pool_list_lock); @@ -35,34 +41,41 @@ static inline void dmabuf_page_pool_free_pages(struct dmabuf_page_pool *pool, static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *page) { int index; + struct dmabuf_page_pool_with_spinlock *container_pool = + container_of(pool, struct dmabuf_page_pool_with_spinlock, pool); if (PageHighMem(page)) index = POOL_HIGHPAGE; else index = POOL_LOWPAGE; - mutex_lock(&pool->mutex); + spin_lock(&container_pool->spinlock); list_add_tail(&page->lru, &pool->items[index]); pool->count[index]++; + spin_unlock(&container_pool->spinlock); mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE, 1 << pool->order); - mutex_unlock(&pool->mutex); } static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index) { struct page *page; + struct dmabuf_page_pool_with_spinlock *container_pool = + container_of(pool, struct dmabuf_page_pool_with_spinlock, pool); - mutex_lock(&pool->mutex); + spin_lock(&container_pool->spinlock); page = list_first_entry_or_null(&pool->items[index], struct page, lru); if (page) { pool->count[index]--; list_del(&page->lru); + spin_unlock(&container_pool->spinlock); mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE, -(1 << pool->order)); + goto out; } - mutex_unlock(&pool->mutex); + spin_unlock(&container_pool->spinlock); +out: return page; } @@ -113,19 +126,25 @@ static int dmabuf_page_pool_total(struct dmabuf_page_pool *pool, bool high) struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int order) { - struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL); + struct dmabuf_page_pool *pool; + struct dmabuf_page_pool_with_spinlock *container_pool = + kmalloc(sizeof(*container_pool), GFP_KERNEL); int i; - if (!pool) + if (!container_pool) return NULL; + spin_lock_init(&container_pool->spinlock); + pool = &container_pool->pool; + for (i = 0; i < POOL_TYPE_SIZE; i++) { pool->count[i] = 0; INIT_LIST_HEAD(&pool->items[i]); } pool->gfp_mask = gfp_mask | __GFP_COMP; pool->order = order; - mutex_init(&pool->mutex); + mutex_init(&pool->mutex); /* No longer used! */ + mutex_lock(&pool->mutex); /* Make sure anyone who attempts to acquire this hangs */ mutex_lock(&pool_list_lock); list_add(&pool->list, &pool_list); @@ -138,6 +157,7 @@ EXPORT_SYMBOL_GPL(dmabuf_page_pool_create); void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool) { struct page *page; + struct dmabuf_page_pool_with_spinlock *container_pool; int i; /* Remove us from the pool list */ @@ -151,7 +171,8 @@ void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool) dmabuf_page_pool_free_pages(pool, page); } - kfree(pool); + container_pool = container_of(pool, struct dmabuf_page_pool_with_spinlock, pool); + kfree(container_pool); } EXPORT_SYMBOL_GPL(dmabuf_page_pool_destroy); diff --git a/drivers/dma-buf/heaps/page_pool.h b/drivers/dma-buf/heaps/page_pool.h index 6b083b04f195..b578e18dc1ed 100644 --- a/drivers/dma-buf/heaps/page_pool.h +++ b/drivers/dma-buf/heaps/page_pool.h @@ -40,7 +40,7 @@ enum { struct dmabuf_page_pool { int count[POOL_TYPE_SIZE]; struct list_head items[POOL_TYPE_SIZE]; - struct mutex mutex; + struct mutex mutex; /* No longer used! */ gfp_t gfp_mask; unsigned int order; struct list_head list; diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index cfbf10128aae..e359c5c6c4df 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -26,8 +26,11 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct udmabuf *ubuf = vma->vm_private_data; + pgoff_t pgoff = vmf->pgoff; - vmf->page = ubuf->pages[vmf->pgoff]; + if (pgoff >= ubuf->pagecount) + return VM_FAULT_SIGBUS; + vmf->page = ubuf->pages[pgoff]; get_page(vmf->page); return 0; } @@ -115,17 +118,20 @@ static int begin_cpu_udmabuf(struct dma_buf *buf, { struct udmabuf *ubuf = buf->priv; struct device *dev = ubuf->device->this_device; + int ret = 0; if (!ubuf->sg) { ubuf->sg = get_sg_table(dev, buf, direction); - if (IS_ERR(ubuf->sg)) - return PTR_ERR(ubuf->sg); + if (IS_ERR(ubuf->sg)) { + ret = PTR_ERR(ubuf->sg); + ubuf->sg = NULL; + } } else { dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction); } - return 0; + return ret; } static int end_cpu_udmabuf(struct dma_buf *buf, @@ -324,7 +330,23 @@ static struct miscdevice udmabuf_misc = { static int __init udmabuf_dev_init(void) { - return misc_register(&udmabuf_misc); + int ret; + + ret = misc_register(&udmabuf_misc); + if (ret < 0) { + pr_err("Could not initialize udmabuf device\n"); + return ret; + } + + ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device, + DMA_BIT_MASK(64)); + if (ret < 0) { + pr_err("Could not setup DMA mask for udmabuf device\n"); + misc_deregister(&udmabuf_misc); + return ret; + } + + return 0; } static void __exit udmabuf_dev_exit(void) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 7eaee5b705b1..6a4f9697b574 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -237,6 +237,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) ATC_SPIP_BOUNDARY(first->boundary)); channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) | ATC_DPIP_BOUNDARY(first->boundary)); + /* Don't allow CPU to reorder channel enable. */ + wmb(); dma_writel(atdma, CHER, atchan->mask); vdbg_dump_regs(atchan); @@ -297,7 +299,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) struct at_desc *desc_first = atc_first_active(atchan); struct at_desc *desc; int ret; - u32 ctrla, dscr, trials; + u32 ctrla, dscr; + unsigned int i; /* * If the cookie doesn't match to the currently running transfer then @@ -367,7 +370,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) dscr = channel_readl(atchan, DSCR); rmb(); /* ensure DSCR is read before CTRLA */ ctrla = channel_readl(atchan, CTRLA); - for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) { + for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) { u32 new_dscr; rmb(); /* ensure DSCR is read after CTRLA */ @@ -393,7 +396,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) rmb(); /* ensure DSCR is read before CTRLA */ ctrla = channel_readl(atchan, CTRLA); } - if (unlikely(trials >= ATC_MAX_DSCR_TRIALS)) + if (unlikely(i == ATC_MAX_DSCR_TRIALS)) return -ETIMEDOUT; /* for the first descriptor we can be more accurate */ @@ -443,18 +446,6 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) if (!atc_chan_is_cyclic(atchan)) dma_cookie_complete(txd); - /* If the transfer was a memset, free our temporary buffer */ - if (desc->memset_buffer) { - dma_pool_free(atdma->memset_pool, desc->memset_vaddr, - desc->memset_paddr); - desc->memset_buffer = false; - } - - /* move children to free_list */ - list_splice_init(&desc->tx_list, &atchan->free_list); - /* move myself to free_list */ - list_move(&desc->desc_node, &atchan->free_list); - spin_unlock_irqrestore(&atchan->lock, flags); dma_descriptor_unmap(txd); @@ -464,42 +455,20 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) dmaengine_desc_get_callback_invoke(txd, NULL); dma_run_dependencies(txd); -} - -/** - * atc_complete_all - finish work for all transactions - * @atchan: channel to complete transactions for - * - * Eventually submit queued descriptors if any - * - * Assume channel is idle while calling this function - * Called with atchan->lock held and bh disabled - */ -static void atc_complete_all(struct at_dma_chan *atchan) -{ - struct at_desc *desc, *_desc; - LIST_HEAD(list); - unsigned long flags; - - dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); spin_lock_irqsave(&atchan->lock, flags); - - /* - * Submit queued descriptors ASAP, i.e. before we go through - * the completed ones. - */ - if (!list_empty(&atchan->queue)) - atc_dostart(atchan, atc_first_queued(atchan)); - /* empty active_list now it is completed */ - list_splice_init(&atchan->active_list, &list); - /* empty queue list by moving descriptors (if any) to active_list */ - list_splice_init(&atchan->queue, &atchan->active_list); - + /* move children to free_list */ + list_splice_init(&desc->tx_list, &atchan->free_list); + /* add myself to free_list */ + list_add(&desc->desc_node, &atchan->free_list); spin_unlock_irqrestore(&atchan->lock, flags); - list_for_each_entry_safe(desc, _desc, &list, desc_node) - atc_chain_complete(atchan, desc); + /* If the transfer was a memset, free our temporary buffer */ + if (desc->memset_buffer) { + dma_pool_free(atdma->memset_pool, desc->memset_vaddr, + desc->memset_paddr); + desc->memset_buffer = false; + } } /** @@ -508,26 +477,28 @@ static void atc_complete_all(struct at_dma_chan *atchan) */ static void atc_advance_work(struct at_dma_chan *atchan) { + struct at_desc *desc; unsigned long flags; - int ret; dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); spin_lock_irqsave(&atchan->lock, flags); - ret = atc_chan_is_enabled(atchan); + if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list)) + return spin_unlock_irqrestore(&atchan->lock, flags); + + desc = atc_first_active(atchan); + /* Remove the transfer node from the active list. */ + list_del_init(&desc->desc_node); spin_unlock_irqrestore(&atchan->lock, flags); - if (ret) - return; - - if (list_empty(&atchan->active_list) || - list_is_singular(&atchan->active_list)) - return atc_complete_all(atchan); - - atc_chain_complete(atchan, atc_first_active(atchan)); + atc_chain_complete(atchan, desc); /* advance work */ spin_lock_irqsave(&atchan->lock, flags); - atc_dostart(atchan, atc_first_active(atchan)); + if (!list_empty(&atchan->active_list)) { + desc = atc_first_queued(atchan); + list_move_tail(&desc->desc_node, &atchan->active_list); + atc_dostart(atchan, desc); + } spin_unlock_irqrestore(&atchan->lock, flags); } @@ -539,6 +510,7 @@ static void atc_advance_work(struct at_dma_chan *atchan) static void atc_handle_error(struct at_dma_chan *atchan) { struct at_desc *bad_desc; + struct at_desc *desc; struct at_desc *child; unsigned long flags; @@ -551,13 +523,12 @@ static void atc_handle_error(struct at_dma_chan *atchan) bad_desc = atc_first_active(atchan); list_del_init(&bad_desc->desc_node); - /* As we are stopped, take advantage to push queued descriptors - * in active_list */ - list_splice_init(&atchan->queue, atchan->active_list.prev); - /* Try to restart the controller */ - if (!list_empty(&atchan->active_list)) - atc_dostart(atchan, atc_first_active(atchan)); + if (!list_empty(&atchan->active_list)) { + desc = atc_first_queued(atchan); + list_move_tail(&desc->desc_node, &atchan->active_list); + atc_dostart(atchan, desc); + } /* * KERN_CRITICAL may seem harsh, but since this only happens @@ -672,19 +643,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&atchan->lock, flags); cookie = dma_cookie_assign(tx); - if (list_empty(&atchan->active_list)) { - dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", - desc->txd.cookie); - atc_dostart(atchan, desc); - list_add_tail(&desc->desc_node, &atchan->active_list); - } else { - dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", - desc->txd.cookie); - list_add_tail(&desc->desc_node, &atchan->queue); - } - + list_add_tail(&desc->desc_node, &atchan->queue); spin_unlock_irqrestore(&atchan->lock, flags); + dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", + desc->txd.cookie); return cookie; } @@ -1418,11 +1381,8 @@ static int atc_terminate_all(struct dma_chan *chan) struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); int chan_id = atchan->chan_common.chan_id; - struct at_desc *desc, *_desc; unsigned long flags; - LIST_HEAD(list); - dev_vdbg(chan2dev(chan), "%s\n", __func__); /* @@ -1441,19 +1401,15 @@ static int atc_terminate_all(struct dma_chan *chan) cpu_relax(); /* active_list entries will end up before queued entries */ - list_splice_init(&atchan->queue, &list); - list_splice_init(&atchan->active_list, &list); - - spin_unlock_irqrestore(&atchan->lock, flags); - - /* Flush all pending and queued descriptors */ - list_for_each_entry_safe(desc, _desc, &list, desc_node) - atc_chain_complete(atchan, desc); + list_splice_tail_init(&atchan->queue, &atchan->free_list); + list_splice_tail_init(&atchan->active_list, &atchan->free_list); clear_bit(ATC_IS_PAUSED, &atchan->status); /* if channel dedicated to cyclic operations, free it */ clear_bit(ATC_IS_CYCLIC, &atchan->status); + spin_unlock_irqrestore(&atchan->lock, flags); + return 0; } @@ -1508,20 +1464,26 @@ atc_tx_status(struct dma_chan *chan, } /** - * atc_issue_pending - try to finish work + * atc_issue_pending - takes the first transaction descriptor in the pending + * queue and starts the transfer. * @chan: target DMA channel */ static void atc_issue_pending(struct dma_chan *chan) { - struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_desc *desc; + unsigned long flags; dev_vdbg(chan2dev(chan), "issue_pending\n"); - /* Not needed for cyclic transfers */ - if (atc_chan_is_cyclic(atchan)) - return; + spin_lock_irqsave(&atchan->lock, flags); + if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue)) + return spin_unlock_irqrestore(&atchan->lock, flags); - atc_advance_work(atchan); + desc = atc_first_queued(atchan); + list_move_tail(&desc->desc_node, &atchan->active_list); + atc_dostart(atchan, desc); + spin_unlock_irqrestore(&atchan->lock, flags); } /** @@ -1939,7 +1901,11 @@ static int __init at_dma_probe(struct platform_device *pdev) dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", plat_dat->nr_channels); - dma_async_device_register(&atdma->dma_common); + err = dma_async_device_register(&atdma->dma_common); + if (err) { + dev_err(&pdev->dev, "Unable to register: %d.\n", err); + goto err_dma_async_device_register; + } /* * Do not return an error if the dmac node is not present in order to @@ -1959,6 +1925,7 @@ static int __init at_dma_probe(struct platform_device *pdev) err_of_dma_controller_register: dma_async_device_unregister(&atdma->dma_common); +err_dma_async_device_register: dma_pool_destroy(atdma->memset_pool); err_memset_pool_create: dma_pool_destroy(atdma->dma_desc_pool); diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 80fc2fe8c77e..8dc82c7b1dcf 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -164,13 +164,13 @@ /* LLI == Linked List Item; aka DMA buffer descriptor */ struct at_lli { /* values that are not changed by hardware */ - dma_addr_t saddr; - dma_addr_t daddr; + u32 saddr; + u32 daddr; /* value that may get written back: */ - u32 ctrla; + u32 ctrla; /* more values that are not changed by hardware */ - u32 ctrlb; - dma_addr_t dscr; /* chain to next lli */ + u32 ctrlb; + u32 dscr; /* chain to next lli */ }; /** diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 90afba0b36fe..b5d691ae45dc 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1390,7 +1390,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); - struct at_xdmac_desc *desc, *_desc; + struct at_xdmac_desc *desc, *_desc, *iter; struct list_head *descs_list; enum dma_status ret; int residue, retry; @@ -1505,11 +1505,13 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, * microblock. */ descs_list = &desc->descs_list; - list_for_each_entry_safe(desc, _desc, descs_list, desc_node) { - dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); - residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth; - if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) + list_for_each_entry_safe(iter, _desc, descs_list, desc_node) { + dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg); + residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth; + if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) { + desc = iter; break; + } } residue += cur_ubc << dwidth; @@ -1836,6 +1838,11 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) for (i = 0; i < init_nr_desc_per_channel; i++) { desc = at_xdmac_alloc_desc(chan, GFP_KERNEL); if (!desc) { + if (i == 0) { + dev_warn(chan2dev(chan), + "can't allocate any descriptors\n"); + return -EIO; + } dev_warn(chan2dev(chan), "only %d descriptors have been allocated\n", i); break; diff --git a/drivers/dma/bestcomm/ata.c b/drivers/dma/bestcomm/ata.c index 2fd87f83cf90..e169f18da551 100644 --- a/drivers/dma/bestcomm/ata.c +++ b/drivers/dma/bestcomm/ata.c @@ -133,7 +133,7 @@ void bcom_ata_reset_bd(struct bcom_task *tsk) struct bcom_ata_var *var; /* Reset all BD */ - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); tsk->index = 0; tsk->outdex = 0; diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c index d91cbbe7a48f..8c42e5ca00a9 100644 --- a/drivers/dma/bestcomm/bestcomm.c +++ b/drivers/dma/bestcomm/bestcomm.c @@ -95,7 +95,7 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size) tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa); if (!tsk->bd) goto error; - memset(tsk->bd, 0x00, bd_count * bd_size); + memset_io(tsk->bd, 0x00, bd_count * bd_size); tsk->num_bd = bd_count; tsk->bd_size = bd_size; @@ -186,16 +186,16 @@ bcom_load_image(int task, u32 *task_image) inc = bcom_task_inc(task); /* Clear & copy */ - memset(var, 0x00, BCOM_VAR_SIZE); - memset(inc, 0x00, BCOM_INC_SIZE); + memset_io(var, 0x00, BCOM_VAR_SIZE); + memset_io(inc, 0x00, BCOM_INC_SIZE); desc_src = (u32 *)(hdr + 1); var_src = desc_src + hdr->desc_size; inc_src = var_src + hdr->var_size; - memcpy(desc, desc_src, hdr->desc_size * sizeof(u32)); - memcpy(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32)); - memcpy(inc, inc_src, hdr->inc_size * sizeof(u32)); + memcpy_toio(desc, desc_src, hdr->desc_size * sizeof(u32)); + memcpy_toio(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32)); + memcpy_toio(inc, inc_src, hdr->inc_size * sizeof(u32)); return 0; } @@ -302,13 +302,13 @@ static int bcom_engine_init(void) return -ENOMEM; } - memset(bcom_eng->tdt, 0x00, tdt_size); - memset(bcom_eng->ctx, 0x00, ctx_size); - memset(bcom_eng->var, 0x00, var_size); - memset(bcom_eng->fdt, 0x00, fdt_size); + memset_io(bcom_eng->tdt, 0x00, tdt_size); + memset_io(bcom_eng->ctx, 0x00, ctx_size); + memset_io(bcom_eng->var, 0x00, var_size); + memset_io(bcom_eng->fdt, 0x00, fdt_size); /* Copy the FDT for the EU#3 */ - memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops)); + memcpy_toio(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops)); /* Initialize Task base structure */ for (task=0; taskindex = 0; tsk->outdex = 0; - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA); @@ -241,7 +241,7 @@ bcom_fec_tx_reset(struct bcom_task *tsk) tsk->index = 0; tsk->outdex = 0; - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA); diff --git a/drivers/dma/bestcomm/gen_bd.c b/drivers/dma/bestcomm/gen_bd.c index 906ddba6a6f5..8a24a5cbc263 100644 --- a/drivers/dma/bestcomm/gen_bd.c +++ b/drivers/dma/bestcomm/gen_bd.c @@ -142,7 +142,7 @@ bcom_gen_bd_rx_reset(struct bcom_task *tsk) tsk->index = 0; tsk->outdex = 0; - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA); @@ -226,7 +226,7 @@ bcom_gen_bd_tx_reset(struct bcom_task *tsk) tsk->index = 0; tsk->outdex = 0; - memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); /* Configure some stuff */ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA); diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index 58c8cc8fe0e1..d7ed50f8b929 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -400,7 +400,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) chunk->ll_region.sz += burst->sz; desc->alloc_sz += burst->sz; - if (chan->dir == EDMA_DIR_WRITE) { + if (dir == DMA_DEV_TO_MEM) { burst->sar = src_addr; if (xfer->cyclic) { burst->dar = xfer->xfer.cyclic.paddr; diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c index 3e83769615d1..8f1651367310 100644 --- a/drivers/dma/hisi_dma.c +++ b/drivers/dma/hisi_dma.c @@ -185,7 +185,8 @@ static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index) hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0); } -static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan) +static void hisi_dma_reset_or_disable_hw_chan(struct hisi_dma_chan *chan, + bool disable) { struct hisi_dma_dev *hdma_dev = chan->hdma_dev; u32 index = chan->qp_num, tmp; @@ -206,8 +207,11 @@ static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan) hisi_dma_do_reset(hdma_dev, index); hisi_dma_reset_qp_point(hdma_dev, index); hisi_dma_pause_dma(hdma_dev, index, false); - hisi_dma_enable_dma(hdma_dev, index, true); - hisi_dma_unmask_irq(hdma_dev, index); + + if (!disable) { + hisi_dma_enable_dma(hdma_dev, index, true); + hisi_dma_unmask_irq(hdma_dev, index); + } ret = readl_relaxed_poll_timeout(hdma_dev->base + HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp, @@ -223,7 +227,7 @@ static void hisi_dma_free_chan_resources(struct dma_chan *c) struct hisi_dma_chan *chan = to_hisi_dma_chan(c); struct hisi_dma_dev *hdma_dev = chan->hdma_dev; - hisi_dma_reset_hw_chan(chan); + hisi_dma_reset_or_disable_hw_chan(chan, false); vchan_free_chan_resources(&chan->vc); memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth); @@ -272,7 +276,6 @@ static void hisi_dma_start_transfer(struct hisi_dma_chan *chan) vd = vchan_next_desc(&chan->vc); if (!vd) { - dev_err(&hdma_dev->pdev->dev, "no issued task!\n"); chan->desc = NULL; return; } @@ -304,7 +307,7 @@ static void hisi_dma_issue_pending(struct dma_chan *c) spin_lock_irqsave(&chan->vc.lock, flags); - if (vchan_issue_pending(&chan->vc)) + if (vchan_issue_pending(&chan->vc) && !chan->desc) hisi_dma_start_transfer(chan); spin_unlock_irqrestore(&chan->vc.lock, flags); @@ -399,7 +402,7 @@ static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) { - hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]); + hisi_dma_reset_or_disable_hw_chan(&hdma_dev->chan[qp_index], true); } static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev) @@ -438,18 +441,15 @@ static irqreturn_t hisi_dma_irq(int irq, void *data) desc = chan->desc; cqe = chan->cq + chan->cq_head; if (desc) { + chan->cq_head = (chan->cq_head + 1) % hdma_dev->chan_depth; + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, + chan->qp_num, chan->cq_head); if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) { - chan->cq_head = (chan->cq_head + 1) % - hdma_dev->chan_depth; - hisi_dma_chan_write(hdma_dev->base, - HISI_DMA_CQ_HEAD_PTR, chan->qp_num, - chan->cq_head); vchan_cookie_complete(&desc->vd); + hisi_dma_start_transfer(chan); } else { dev_err(&hdma_dev->pdev->dev, "task error!\n"); } - - chan->desc = NULL; } spin_unlock_irqrestore(&chan->vc.lock, flags); diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 4da88578ed64..ae65eb90afab 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -266,10 +266,16 @@ int idxd_cdev_register(void) rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK, ictx[i].name); if (rc) - return rc; + goto err_free_chrdev_region; } return 0; + +err_free_chrdev_region: + for (i--; i >= 0; i--) + unregister_chrdev_region(ictx[i].devt, MINORMASK); + + return rc; } void idxd_cdev_remove(void) diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index aa7435555de9..09ad37bbd98b 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -82,6 +82,27 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq, hw->int_handle = wq->vec_ptr; } +static struct dma_async_tx_descriptor * +idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags) +{ + struct idxd_wq *wq = to_idxd_wq(c); + u32 desc_flags; + struct idxd_desc *desc; + + if (wq->state != IDXD_WQ_ENABLED) + return NULL; + + op_flag_setup(flags, &desc_flags); + desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); + if (IS_ERR(desc)) + return NULL; + + idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP, + 0, 0, 0, desc->compl_dma, desc_flags); + desc->txd.flags = flags; + return &desc->txd; +} + static struct dma_async_tx_descriptor * idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) @@ -188,10 +209,12 @@ int idxd_register_dma_device(struct idxd_device *idxd) INIT_LIST_HEAD(&dma->channels); dma->dev = dev; + dma_cap_set(DMA_INTERRUPT, dma->cap_mask); dma_cap_set(DMA_PRIVATE, dma->cap_mask); dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask); dma->device_release = idxd_dma_release; + dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt; if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { dma_cap_set(DMA_MEMCPY, dma->cap_mask); dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy; diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 7b41cdff1a2c..51af0dfc3c63 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -1098,6 +1098,9 @@ static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attr u64 xfer_size; int rc; + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + if (wq->state != IDXD_WQ_DISABLED) return -EPERM; @@ -1132,6 +1135,9 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu u64 batch_size; int rc; + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + if (wq->state != IDXD_WQ_DISABLED) return -EPERM; diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 306f93e4b26a..2283dcd8bf91 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1789,7 +1789,7 @@ static int sdma_event_remap(struct sdma_engine *sdma) u32 reg, val, shift, num_map, i; int ret = 0; - if (IS_ERR(np) || IS_ERR(gpr_np)) + if (IS_ERR(np) || !gpr_np) goto out; event_remap = of_find_property(np, propname, NULL); @@ -1837,7 +1837,7 @@ static int sdma_event_remap(struct sdma_engine *sdma) } out: - if (!IS_ERR(gpr_np)) + if (gpr_np) of_node_put(gpr_np); return ret; @@ -2212,7 +2212,7 @@ MODULE_DESCRIPTION("i.MX SDMA driver"); #if IS_ENABLED(CONFIG_SOC_IMX6Q) MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin"); #endif -#if IS_ENABLED(CONFIG_SOC_IMX7D) +#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M) MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin"); #endif MODULE_LICENSE("GPL"); diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 37ff4ec7db76..e2070df6cad2 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -656,7 +656,7 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) if (active - i == 0) { dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n", __func__); - mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); + mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); } /* microsecond delay by sysfs variable per pending descriptor */ @@ -682,7 +682,7 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan) if (chanerr & (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) { - mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); + mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); ioat_eh(ioat_chan); } } @@ -879,7 +879,7 @@ static void check_active(struct ioatdma_chan *ioat_chan) } if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state)) - mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); + mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); } static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan) diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c index 375e7e647df6..a1517ef1f4a0 100644 --- a/drivers/dma/mediatek/mtk-uart-apdma.c +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan) unsigned int status; int ret; - ret = pm_runtime_get_sync(mtkd->ddev.dev); + ret = pm_runtime_resume_and_get(mtkd->ddev.dev); if (ret < 0) { pm_runtime_put_noidle(chan->device->dev); return ret; @@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan) ret = readx_poll_timeout(readl, c->base + VFF_EN, status, !status, 10, 100); if (ret) - return ret; + goto err_pm; ret = request_irq(c->irq, mtk_uart_apdma_irq_handler, IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan); if (ret < 0) { dev_err(chan->device->dev, "Can't request dma IRQ\n"); - return -EINVAL; + ret = -EINVAL; + goto err_pm; } if (mtkd->support_33bits) mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B); +err_pm: + pm_runtime_put_noidle(mtkd->ddev.dev); return ret; } diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index 9b0d463f89bb..4800c596433a 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -899,6 +899,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev) tasklet_kill(&xor_dev->irq_tasklet); clk_disable_unprepare(xor_dev->clk); + clk_disable_unprepare(xor_dev->reg_clk); return 0; } diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 65f816b40c32..dc147cc2436e 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -167,29 +167,11 @@ static struct mxs_dma_type mxs_dma_types[] = { } }; -static const struct platform_device_id mxs_dma_ids[] = { - { - .name = "imx23-dma-apbh", - .driver_data = (kernel_ulong_t) &mxs_dma_types[0], - }, { - .name = "imx23-dma-apbx", - .driver_data = (kernel_ulong_t) &mxs_dma_types[1], - }, { - .name = "imx28-dma-apbh", - .driver_data = (kernel_ulong_t) &mxs_dma_types[2], - }, { - .name = "imx28-dma-apbx", - .driver_data = (kernel_ulong_t) &mxs_dma_types[3], - }, { - /* end of list */ - } -}; - static const struct of_device_id mxs_dma_dt_ids[] = { - { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], }, - { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], }, - { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], }, - { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], }, + { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_types[0], }, + { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_types[1], }, + { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_types[2], }, + { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_types[3], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids); @@ -688,7 +670,7 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, return mxs_chan->status; } -static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) +static int mxs_dma_init(struct mxs_dma_engine *mxs_dma) { int ret; @@ -759,11 +741,9 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec, ofdma->of_node); } -static int __init mxs_dma_probe(struct platform_device *pdev) +static int mxs_dma_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - const struct platform_device_id *id_entry; - const struct of_device_id *of_id; const struct mxs_dma_type *dma_type; struct mxs_dma_engine *mxs_dma; struct resource *iores; @@ -779,13 +759,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev) return ret; } - of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev); - if (of_id) - id_entry = of_id->data; - else - id_entry = platform_get_device_id(pdev); - - dma_type = (struct mxs_dma_type *)id_entry->driver_data; + dma_type = (struct mxs_dma_type *)of_device_get_match_data(&pdev->dev); mxs_dma->type = dma_type->type; mxs_dma->dev_id = dma_type->id; @@ -865,11 +839,7 @@ static struct platform_driver mxs_dma_driver = { .name = "mxs-dma", .of_match_table = mxs_dma_dt_ids, }, - .id_table = mxs_dma_ids, + .probe = mxs_dma_probe, }; -static int __init mxs_dma_module_init(void) -{ - return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe); -} -subsys_initcall(mxs_dma_module_init); +builtin_platform_driver(mxs_dma_driver); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 2e9440955b66..91592809d496 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2800,7 +2800,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) /* If the DMAC pool is empty, alloc new */ if (!desc) { - DEFINE_SPINLOCK(lock); + static DEFINE_SPINLOCK(lock); LIST_HEAD(pool); if (!add_desc(&pool, &lock, GFP_ATOMIC, 1)) diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index b4ef4f19f7de..68d9d60c051d 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -1249,14 +1249,14 @@ static int pxad_init_phys(struct platform_device *op, return -ENOMEM; for (i = 0; i < nb_phy_chans; i++) - if (platform_get_irq(op, i) > 0) + if (platform_get_irq_optional(op, i) > 0) nr_irq++; for (i = 0; i < nb_phy_chans; i++) { phy = &pdev->phys[i]; phy->base = pdev->base; phy->idx = i; - irq = platform_get_irq(op, i); + irq = platform_get_irq_optional(op, i); if ((nr_irq > 1) && (irq > 0)) ret = devm_request_irq(&op->dev, irq, pxad_chan_handler, diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index 528deb5d9f31..5c615a8b514b 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -52,16 +52,6 @@ static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd) static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan) { struct sf_pdma_desc *desc; - unsigned long flags; - - spin_lock_irqsave(&chan->lock, flags); - - if (chan->desc && !chan->desc->in_use) { - spin_unlock_irqrestore(&chan->lock, flags); - return chan->desc; - } - - spin_unlock_irqrestore(&chan->lock, flags); desc = kzalloc(sizeof(*desc), GFP_NOWAIT); if (!desc) @@ -94,6 +84,7 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src, { struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); struct sf_pdma_desc *desc; + unsigned long iflags; if (chan && (!len || !dest || !src)) { dev_err(chan->pdma->dma_dev.dev, @@ -109,10 +100,9 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src, desc->dirn = DMA_MEM_TO_MEM; desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); - spin_lock_irqsave(&chan->vchan.lock, flags); - chan->desc = desc; + spin_lock_irqsave(&chan->vchan.lock, iflags); sf_pdma_fill_desc(desc, dest, src, len); - spin_unlock_irqrestore(&chan->vchan.lock, flags); + spin_unlock_irqrestore(&chan->vchan.lock, iflags); return desc->async_tx; } @@ -169,11 +159,17 @@ static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan, unsigned long flags; u64 residue = 0; struct sf_pdma_desc *desc; - struct dma_async_tx_descriptor *tx; + struct dma_async_tx_descriptor *tx = NULL; spin_lock_irqsave(&chan->vchan.lock, flags); - tx = &chan->desc->vdesc.tx; + list_for_each_entry(vd, &chan->vchan.desc_submitted, node) + if (vd->tx.cookie == cookie) + tx = &vd->tx; + + if (!tx) + goto out; + if (cookie == tx->chan->completed_cookie) goto out; @@ -240,6 +236,19 @@ static void sf_pdma_enable_request(struct sf_pdma_chan *chan) writel(v, regs->ctrl); } +static struct sf_pdma_desc *sf_pdma_get_first_pending_desc(struct sf_pdma_chan *chan) +{ + struct virt_dma_chan *vchan = &chan->vchan; + struct virt_dma_desc *vdesc; + + if (list_empty(&vchan->desc_issued)) + return NULL; + + vdesc = list_first_entry(&vchan->desc_issued, struct virt_dma_desc, node); + + return container_of(vdesc, struct sf_pdma_desc, vdesc); +} + static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan) { struct sf_pdma_desc *desc = chan->desc; @@ -267,8 +276,11 @@ static void sf_pdma_issue_pending(struct dma_chan *dchan) spin_lock_irqsave(&chan->vchan.lock, flags); - if (vchan_issue_pending(&chan->vchan) && chan->desc) + if (!chan->desc && vchan_issue_pending(&chan->vchan)) { + /* vchan_issue_pending has made a check that desc in not NULL */ + chan->desc = sf_pdma_get_first_pending_desc(chan); sf_pdma_xfer_desc(chan); + } spin_unlock_irqrestore(&chan->vchan.lock, flags); } @@ -297,6 +309,11 @@ static void sf_pdma_donebh_tasklet(struct tasklet_struct *t) spin_lock_irqsave(&chan->vchan.lock, flags); list_del(&chan->desc->vdesc.node); vchan_cookie_complete(&chan->desc->vdesc); + + chan->desc = sf_pdma_get_first_pending_desc(chan); + if (chan->desc) + sf_pdma_xfer_desc(chan); + spin_unlock_irqrestore(&chan->vchan.lock, flags); } diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 19ac95c0098f..7f72b3f4cd1a 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -115,10 +115,8 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) ret = pm_runtime_get(schan->dev); spin_unlock_irq(&schan->chan_lock); - if (ret < 0) { + if (ret < 0) dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); - pm_runtime_put(schan->dev); - } pm_runtime_barrier(schan->dev); diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 4357d2395e6b..60115d8d4083 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -1236,11 +1236,8 @@ static int sprd_dma_remove(struct platform_device *pdev) { struct sprd_dma_dev *sdev = platform_get_drvdata(pdev); struct sprd_dma_chn *c, *cn; - int ret; - ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) - return ret; + pm_runtime_get_sync(&pdev->dev); /* explicitly free the irq */ if (sdev->irq > 0) diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index fe36738f2dd7..9d54746c422c 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -40,7 +40,6 @@ STM32_MDMA_SHIFT(mask)) #define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */ -#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */ /* MDMA Channel x interrupt/status register */ #define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */ @@ -196,7 +195,7 @@ #define STM32_MDMA_MAX_BUF_LEN 128 #define STM32_MDMA_MAX_BLOCK_LEN 65536 -#define STM32_MDMA_MAX_CHANNELS 63 +#define STM32_MDMA_MAX_CHANNELS 32 #define STM32_MDMA_MAX_REQUESTS 256 #define STM32_MDMA_MAX_BURST 128 #define STM32_MDMA_VERY_HIGH_PRIORITY 0x11 @@ -1345,90 +1344,84 @@ static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan) static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid) { struct stm32_mdma_device *dmadev = devid; - struct stm32_mdma_chan *chan = devid; - u32 reg, id, ien, status, flag; + struct stm32_mdma_chan *chan; + u32 reg, id, ccr, ien, status; /* Find out which channel generates the interrupt */ status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0); - if (status) { - id = __ffs(status); - } else { - status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1); - if (!status) { - dev_dbg(mdma2dev(dmadev), "spurious it\n"); - return IRQ_NONE; - } - id = __ffs(status); - /* - * As GISR0 provides status for channel id from 0 to 31, - * so GISR1 provides status for channel id from 32 to 62 - */ - id += 32; + if (!status) { + dev_dbg(mdma2dev(dmadev), "spurious it\n"); + return IRQ_NONE; } + id = __ffs(status); chan = &dmadev->chan[id]; if (!chan) { - dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n"); - goto exit; + dev_warn(mdma2dev(dmadev), "MDMA channel not initialized\n"); + return IRQ_NONE; } /* Handle interrupt for the channel */ spin_lock(&chan->vchan.lock); - status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id)); - ien = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)); - ien &= STM32_MDMA_CCR_IRQ_MASK; - ien >>= 1; + status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id)); + /* Mask Channel ReQuest Active bit which can be set in case of MEM2MEM */ + status &= ~STM32_MDMA_CISR_CRQA; + ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(id)); + ien = (ccr & STM32_MDMA_CCR_IRQ_MASK) >> 1; if (!(status & ien)) { spin_unlock(&chan->vchan.lock); - dev_dbg(chan2dev(chan), - "spurious it (status=0x%04x, ien=0x%04x)\n", - status, ien); + dev_warn(chan2dev(chan), + "spurious it (status=0x%04x, ien=0x%04x)\n", + status, ien); return IRQ_NONE; } - flag = __ffs(status & ien); - reg = STM32_MDMA_CIFCR(chan->id); + reg = STM32_MDMA_CIFCR(id); - switch (1 << flag) { - case STM32_MDMA_CISR_TEIF: - id = chan->id; - status = readl_relaxed(dmadev->base + STM32_MDMA_CESR(id)); - dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n", status); + if (status & STM32_MDMA_CISR_TEIF) { + dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n", + readl_relaxed(dmadev->base + STM32_MDMA_CESR(id))); stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CTEIF); - break; + status &= ~STM32_MDMA_CISR_TEIF; + } - case STM32_MDMA_CISR_CTCIF: + if (status & STM32_MDMA_CISR_CTCIF) { stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CCTCIF); + status &= ~STM32_MDMA_CISR_CTCIF; stm32_mdma_xfer_end(chan); - break; + } - case STM32_MDMA_CISR_BRTIF: + if (status & STM32_MDMA_CISR_BRTIF) { stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBRTIF); - break; + status &= ~STM32_MDMA_CISR_BRTIF; + } - case STM32_MDMA_CISR_BTIF: + if (status & STM32_MDMA_CISR_BTIF) { stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBTIF); + status &= ~STM32_MDMA_CISR_BTIF; chan->curr_hwdesc++; if (chan->desc && chan->desc->cyclic) { if (chan->curr_hwdesc == chan->desc->count) chan->curr_hwdesc = 0; vchan_cyclic_callback(&chan->desc->vdesc); } - break; + } - case STM32_MDMA_CISR_TCIF: + if (status & STM32_MDMA_CISR_TCIF) { stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CLTCIF); - break; + status &= ~STM32_MDMA_CISR_TCIF; + } - default: - dev_err(chan2dev(chan), "it %d unhandled (status=0x%04x)\n", - 1 << flag, status); + if (status) { + stm32_mdma_set_bits(dmadev, reg, status); + dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status); + if (!(ccr & STM32_MDMA_CCR_EN)) + dev_err(chan2dev(chan), "chan disabled by HW\n"); } spin_unlock(&chan->vchan.lock); -exit: return IRQ_HANDLED; } diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c index 4ba8fa5d9c36..86ced7f2d771 100644 --- a/drivers/dma/ti/dma-crossbar.c +++ b/drivers/dma/ti/dma-crossbar.c @@ -245,6 +245,7 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, if (dma_spec->args[0] >= xbar->xbar_requests) { dev_err(&pdev->dev, "Invalid XBAR request number: %d\n", dma_spec->args[0]); + put_device(&pdev->dev); return ERR_PTR(-EINVAL); } @@ -252,12 +253,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); if (!dma_spec->np) { dev_err(&pdev->dev, "Can't get DMA master\n"); + put_device(&pdev->dev); return ERR_PTR(-EINVAL); } map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) { of_node_put(dma_spec->np); + put_device(&pdev->dev); return ERR_PTR(-ENOMEM); } @@ -268,6 +271,8 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, mutex_unlock(&xbar->mutex); dev_err(&pdev->dev, "Run out of free DMA requests\n"); kfree(map); + of_node_put(dma_spec->np); + put_device(&pdev->dev); return ERR_PTR(-ENOMEM); } set_bit(map->xbar_out, xbar->dma_inuse); diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c index 8563a392f30b..dadab2feca08 100644 --- a/drivers/dma/ti/k3-udma-private.c +++ b/drivers/dma/ti/k3-udma-private.c @@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property) } pdev = of_find_device_by_node(udma_node); + if (np != udma_node) + of_node_put(udma_node); + if (!pdev) { pr_debug("UDMA device not found\n"); return ERR_PTR(-EPROBE_DEFER); } - if (np != udma_node) - of_node_put(udma_node); - ud = platform_get_drvdata(pdev); if (!ud) { pr_debug("UDMA has not been probed\n"); diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index cab4719e4cf9..e76adc31ab66 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -3020,9 +3020,10 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Request and map I/O memory */ xdev->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(xdev->regs)) - return PTR_ERR(xdev->regs); - + if (IS_ERR(xdev->regs)) { + err = PTR_ERR(xdev->regs); + goto disable_clks; + } /* Retrieve the DMA engine properties from the device tree */ xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2; @@ -3050,7 +3051,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) if (err < 0) { dev_err(xdev->dev, "missing xlnx,num-fstores property\n"); - return err; + goto disable_clks; } err = of_property_read_u32(node, "xlnx,flush-fsync", @@ -3070,7 +3071,11 @@ static int xilinx_dma_probe(struct platform_device *pdev) xdev->ext_addr = false; /* Set the dma mask bits */ - dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width)); + err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width)); + if (err < 0) { + dev_err(xdev->dev, "DMA mask error %d\n", err); + goto disable_clks; + } /* Initialize the DMA engine */ xdev->common.dev = &pdev->dev; @@ -3115,7 +3120,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) for_each_child_of_node(node, child) { err = xilinx_dma_child_probe(xdev, child); if (err < 0) - goto disable_clks; + goto error; } if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { @@ -3150,12 +3155,12 @@ static int xilinx_dma_probe(struct platform_device *pdev) return 0; -disable_clks: - xdma_disable_allclks(xdev); error: for (i = 0; i < xdev->dma_config->max_channels; i++) if (xdev->chan[i]) xilinx_dma_chan_remove(xdev->chan[i]); +disable_clks: + xdma_disable_allclks(xdev); return err; } diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 5fecf5aa6e85..7e6be076e9d3 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -232,7 +232,7 @@ struct zynqmp_dma_chan { bool is_dmacoherent; struct tasklet_struct tasklet; bool idle; - u32 desc_size; + size_t desc_size; bool err; u32 bus_width; u32 src_burst_len; @@ -490,7 +490,8 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) } chan->desc_pool_v = dma_alloc_coherent(chan->dev, - (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), + (2 * ZYNQMP_DMA_DESC_SIZE(chan) * + ZYNQMP_DMA_NUM_DESCS), &chan->desc_pool_p, GFP_KERNEL); if (!chan->desc_pool_v) return -ENOMEM; diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c index b8a7d9594afd..1fa5ca57e9ec 100644 --- a/drivers/edac/dmc520_edac.c +++ b/drivers/edac/dmc520_edac.c @@ -489,7 +489,7 @@ static int dmc520_edac_probe(struct platform_device *pdev) dev = &pdev->dev; for (idx = 0; idx < NUMBER_OF_IRQS; idx++) { - irq = platform_get_irq_byname(pdev, dmc520_irq_configs[idx].name); + irq = platform_get_irq_byname_optional(pdev, dmc520_irq_configs[idx].name); irqs[idx] = irq; masks[idx] = dmc520_irq_configs[idx].mask; if (irq >= 0) { diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c index a918ca93e4f7..df5897c90bec 100644 --- a/drivers/edac/ghes_edac.c +++ b/drivers/edac/ghes_edac.c @@ -101,9 +101,14 @@ static void dimm_setup_label(struct dimm_info *dimm, u16 handle) dmi_memdev_name(handle, &bank, &device); - /* both strings must be non-zero */ - if (bank && *bank && device && *device) - snprintf(dimm->label, sizeof(dimm->label), "%s %s", bank, device); + /* + * Set to a NULL string when both bank and device are zero. In this case, + * the label assigned by default will be preserved. + */ + snprintf(dimm->label, sizeof(dimm->label), "%s%s%s", + (bank && *bank) ? bank : "", + (bank && *bank && device && *device) ? " " : "", + (device && *device) ? device : ""); } static void assign_dmi_dimm_info(struct dimm_info *dimm, struct memdev_dmi_entry *entry) diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index 92906b56b1a2..fea44dc0484b 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -163,6 +163,11 @@ #define ECC_STAT_CECNT_SHIFT 8 #define ECC_STAT_BITNUM_MASK 0x7F +/* ECC error count register definitions */ +#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000 +#define ECC_ERRCNT_UECNT_SHIFT 16 +#define ECC_ERRCNT_CECNT_MASK 0xFFFF + /* DDR QOS Interrupt register definitions */ #define DDR_QOS_IRQ_STAT_OFST 0x20200 #define DDR_QOSUE_MASK 0x4 @@ -418,15 +423,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv) base = priv->baseaddr; p = &priv->stat; + regval = readl(base + ECC_ERRCNT_OFST); + p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK; + p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT; + if (!p->ce_cnt) + goto ue_err; + regval = readl(base + ECC_STAT_OFST); if (!regval) return 1; - p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT; - p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT; - if (!p->ce_cnt) - goto ue_err; - p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK); regval = readl(base + ECC_CEADDR0_OFST); diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c index 5b9a3cf8df26..2a7874108df8 100644 --- a/drivers/extcon/extcon-ptn5150.c +++ b/drivers/extcon/extcon-ptn5150.c @@ -194,6 +194,13 @@ static int ptn5150_init_dev_type(struct ptn5150_info *info) return 0; } +static void ptn5150_work_sync_and_put(void *data) +{ + struct ptn5150_info *info = data; + + cancel_work_sync(&info->irq_work); +} + static int ptn5150_i2c_probe(struct i2c_client *i2c) { struct device *dev = &i2c->dev; @@ -284,6 +291,10 @@ static int ptn5150_i2c_probe(struct i2c_client *i2c) if (ret) return -EINVAL; + ret = devm_add_action_or_reset(dev, ptn5150_work_sync_and_put, info); + if (ret) + return ret; + /* * Update current extcon state if for example OTG connection was there * before the probe diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index dfdb151364de..a11aaafa6c97 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -1245,19 +1245,14 @@ int extcon_dev_register(struct extcon_dev *edev) edev->dev.type = &edev->extcon_dev_type; } - ret = device_register(&edev->dev); - if (ret) { - put_device(&edev->dev); - goto err_dev; - } - spin_lock_init(&edev->lock); - edev->nh = devm_kcalloc(&edev->dev, edev->max_supported, - sizeof(*edev->nh), GFP_KERNEL); - if (!edev->nh) { - ret = -ENOMEM; - device_unregister(&edev->dev); - goto err_dev; + if (edev->max_supported) { + edev->nh = kcalloc(edev->max_supported, sizeof(*edev->nh), + GFP_KERNEL); + if (!edev->nh) { + ret = -ENOMEM; + goto err_alloc_nh; + } } for (index = 0; index < edev->max_supported; index++) @@ -1268,6 +1263,12 @@ int extcon_dev_register(struct extcon_dev *edev) dev_set_drvdata(&edev->dev, edev); edev->state = 0; + ret = device_register(&edev->dev); + if (ret) { + put_device(&edev->dev); + goto err_dev; + } + mutex_lock(&extcon_dev_list_lock); list_add(&edev->entry, &extcon_dev_list); mutex_unlock(&extcon_dev_list_lock); @@ -1275,6 +1276,9 @@ int extcon_dev_register(struct extcon_dev *edev) return 0; err_dev: + if (edev->max_supported) + kfree(edev->nh); +err_alloc_nh: if (edev->max_supported) kfree(edev->extcon_dev_type.groups); err_alloc_groups: @@ -1335,6 +1339,7 @@ void extcon_dev_unregister(struct extcon_dev *edev) if (edev->max_supported) { kfree(edev->extcon_dev_type.groups); kfree(edev->cables); + kfree(edev->nh); } put_device(&edev->dev); diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 54be88167c60..f3b3953cac83 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c @@ -668,6 +668,7 @@ EXPORT_SYMBOL_GPL(fw_card_release); void fw_core_remove_card(struct fw_card *card) { struct fw_card_driver dummy_driver = dummy_driver_template; + unsigned long flags; card->driver->update_phy_reg(card, 4, PHY_LINK_ACTIVE | PHY_CONTENDER, 0); @@ -682,7 +683,9 @@ void fw_core_remove_card(struct fw_card *card) dummy_driver.stop_iso = card->driver->stop_iso; card->driver = &dummy_driver; + spin_lock_irqsave(&card->lock, flags); fw_destroy_nodes(card); + spin_unlock_irqrestore(&card->lock, flags); /* Wait for all users, especially device workqueue jobs, to finish. */ fw_card_put(card); diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index fb6c651214f3..b0cc3f1e9bb0 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -1480,6 +1480,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet, { struct outbound_phy_packet_event *e = container_of(packet, struct outbound_phy_packet_event, p); + struct client *e_client; switch (status) { /* expected: */ @@ -1496,9 +1497,10 @@ static void outbound_phy_packet_callback(struct fw_packet *packet, } e->phy_packet.data[0] = packet->timestamp; + e_client = e->client; queue_event(e->client, &e->event, &e->phy_packet, sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0); - client_put(e->client); + client_put(e_client); } static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index ec68ed27b0a5..4cdbfef79f2e 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c @@ -374,16 +374,13 @@ static void report_found_node(struct fw_card *card, card->bm_retries = 0; } +/* Must be called with card->lock held */ void fw_destroy_nodes(struct fw_card *card) { - unsigned long flags; - - spin_lock_irqsave(&card->lock, flags); card->color++; if (card->local_node != NULL) for_each_fw_node(card, card->local_node, report_lost_node); card->local_node = NULL; - spin_unlock_irqrestore(&card->lock, flags); } static void move_tree(struct fw_node *node0, struct fw_node *node1, int port) @@ -509,6 +506,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, struct fw_node *local_node; unsigned long flags; + spin_lock_irqsave(&card->lock, flags); + /* * If the selfID buffer is not the immediate successor of the * previously processed one, we cannot reliably compare the @@ -520,8 +519,6 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, card->bm_retries = 0; } - spin_lock_irqsave(&card->lock, flags); - card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated; card->node_id = node_id; /* diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index ac487c96bb71..6c20815cc8d1 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -73,24 +73,25 @@ static int try_cancel_split_timeout(struct fw_transaction *t) static int close_transaction(struct fw_transaction *transaction, struct fw_card *card, int rcode) { - struct fw_transaction *t; + struct fw_transaction *t = NULL, *iter; unsigned long flags; spin_lock_irqsave(&card->lock, flags); - list_for_each_entry(t, &card->transaction_list, link) { - if (t == transaction) { - if (!try_cancel_split_timeout(t)) { + list_for_each_entry(iter, &card->transaction_list, link) { + if (iter == transaction) { + if (!try_cancel_split_timeout(iter)) { spin_unlock_irqrestore(&card->lock, flags); goto timed_out; } - list_del_init(&t->link); - card->tlabel_mask &= ~(1ULL << t->tlabel); + list_del_init(&iter->link); + card->tlabel_mask &= ~(1ULL << iter->tlabel); + t = iter; break; } } spin_unlock_irqrestore(&card->lock, flags); - if (&t->link != &card->transaction_list) { + if (t) { t->callback(card, rcode, NULL, 0, t->callback_data); return 0; } @@ -935,7 +936,7 @@ EXPORT_SYMBOL(fw_core_handle_request); void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) { - struct fw_transaction *t; + struct fw_transaction *t = NULL, *iter; unsigned long flags; u32 *data; size_t data_length; @@ -947,20 +948,21 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) rcode = HEADER_GET_RCODE(p->header[1]); spin_lock_irqsave(&card->lock, flags); - list_for_each_entry(t, &card->transaction_list, link) { - if (t->node_id == source && t->tlabel == tlabel) { - if (!try_cancel_split_timeout(t)) { + list_for_each_entry(iter, &card->transaction_list, link) { + if (iter->node_id == source && iter->tlabel == tlabel) { + if (!try_cancel_split_timeout(iter)) { spin_unlock_irqrestore(&card->lock, flags); goto timed_out; } - list_del_init(&t->link); - card->tlabel_mask &= ~(1ULL << t->tlabel); + list_del_init(&iter->link); + card->tlabel_mask &= ~(1ULL << iter->tlabel); + t = iter; break; } } spin_unlock_irqrestore(&card->lock, flags); - if (&t->link == &card->transaction_list) { + if (!t) { timed_out: fw_notice(card, "unsolicited response (source %x, tlabel %x)\n", source, tlabel); diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 4d5054211550..2ceed9287435 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -408,7 +408,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, void *payload, size_t length, void *callback_data) { struct sbp2_logical_unit *lu = callback_data; - struct sbp2_orb *orb; + struct sbp2_orb *orb = NULL, *iter; struct sbp2_status status; unsigned long flags; @@ -433,17 +433,18 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, /* Lookup the orb corresponding to this status write. */ spin_lock_irqsave(&lu->tgt->lock, flags); - list_for_each_entry(orb, &lu->orb_list, link) { + list_for_each_entry(iter, &lu->orb_list, link) { if (STATUS_GET_ORB_HIGH(status) == 0 && - STATUS_GET_ORB_LOW(status) == orb->request_bus) { - orb->rcode = RCODE_COMPLETE; - list_del(&orb->link); + STATUS_GET_ORB_LOW(status) == iter->request_bus) { + iter->rcode = RCODE_COMPLETE; + list_del(&iter->link); + orb = iter; break; } } spin_unlock_irqrestore(&lu->tgt->lock, flags); - if (&orb->link != &lu->orb_list) { + if (orb) { orb->callback(orb, &status); kref_put(&orb->kref, free_orb); /* orb callback reference */ } else { diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c index b70d741f8465..439b2099abeb 100644 --- a/drivers/firmware/arm_scmi/base.c +++ b/drivers/firmware/arm_scmi/base.c @@ -191,7 +191,7 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph, break; loop_num_ret = le32_to_cpu(*num_ret); - if (tot_num_ret + loop_num_ret > MAX_PROTOCOLS_IMP) { + if (loop_num_ret > MAX_PROTOCOLS_IMP - tot_num_ret) { dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP"); break; } diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index bd364d3a56d2..7fa1a8a624a2 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -204,7 +204,8 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id, if (rate_discrete && rate) { clk->list.num_rates = tot_rate_cnt; - sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL); + sort(clk->list.rates, tot_rate_cnt, sizeof(*rate), + rate_cmp_func, NULL); } clk->rate_discrete = rate_discrete; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 06350ca94891..babadc7b4c64 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -1126,8 +1126,12 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) { int ret = scmi_chan_setup(info, dev, prot_id, true); - if (!ret) /* Rx is optional, hence no error check */ - scmi_chan_setup(info, dev, prot_id, false); + if (!ret) { + /* Rx is optional, report only memory errors */ + ret = scmi_chan_setup(info, dev, prot_id, false); + if (ret && ret != -ENOMEM) + ret = 0; + } return ret; } @@ -1608,6 +1612,7 @@ MODULE_DEVICE_TABLE(of, scmi_of_match); static struct platform_driver scmi_driver = { .driver = { .name = "arm-scmi", + .suppress_bind_attrs = true, .of_match_table = scmi_of_match, .dev_groups = versions_groups, }, diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c index bcc5cf1c99d6..10dccc14f4ce 100644 --- a/drivers/firmware/arm_scmi/scmi_pm_domain.c +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c @@ -112,9 +112,28 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) scmi_pd_data->domains = domains; scmi_pd_data->num_domains = num_domains; + dev_set_drvdata(dev, scmi_pd_data); + return of_genpd_add_provider_onecell(np, scmi_pd_data); } +static void scmi_pm_domain_remove(struct scmi_device *sdev) +{ + int i; + struct genpd_onecell_data *scmi_pd_data; + struct device *dev = &sdev->dev; + struct device_node *np = dev->of_node; + + of_genpd_del_provider(np); + + scmi_pd_data = dev_get_drvdata(dev); + for (i = 0; i < scmi_pd_data->num_domains; i++) { + if (!scmi_pd_data->domains[i]) + continue; + pm_genpd_remove(scmi_pd_data->domains[i]); + } +} + static const struct scmi_device_id scmi_id_table[] = { { SCMI_PROTOCOL_POWER, "genpd" }, { }, @@ -124,6 +143,7 @@ MODULE_DEVICE_TABLE(scmi, scmi_id_table); static struct scmi_driver scmi_power_domain_driver = { .name = "scmi-power-domain", .probe = scmi_pm_domain_probe, + .remove = scmi_pm_domain_remove, .id_table = scmi_id_table, }; module_scmi_driver(scmi_power_domain_driver); diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 4ceba5ef7895..36391cb5130e 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -815,7 +815,7 @@ static int scpi_init_versions(struct scpi_drvinfo *info) info->firmware_version = le32_to_cpu(caps.platform_version); } /* Ignore error if not implemented */ - if (scpi_info->is_legacy && ret == -EOPNOTSUPP) + if (info->is_legacy && ret == -EOPNOTSUPP) return 0; return ret; @@ -905,13 +905,14 @@ static int scpi_probe(struct platform_device *pdev) struct resource res; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; + struct scpi_drvinfo *scpi_drvinfo; - scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL); - if (!scpi_info) + scpi_drvinfo = devm_kzalloc(dev, sizeof(*scpi_drvinfo), GFP_KERNEL); + if (!scpi_drvinfo) return -ENOMEM; if (of_match_device(legacy_scpi_of_match, &pdev->dev)) - scpi_info->is_legacy = true; + scpi_drvinfo->is_legacy = true; count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); if (count < 0) { @@ -919,19 +920,19 @@ static int scpi_probe(struct platform_device *pdev) return -ENODEV; } - scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan), - GFP_KERNEL); - if (!scpi_info->channels) + scpi_drvinfo->channels = + devm_kcalloc(dev, count, sizeof(struct scpi_chan), GFP_KERNEL); + if (!scpi_drvinfo->channels) return -ENOMEM; - ret = devm_add_action(dev, scpi_free_channels, scpi_info); + ret = devm_add_action(dev, scpi_free_channels, scpi_drvinfo); if (ret) return ret; - for (; scpi_info->num_chans < count; scpi_info->num_chans++) { + for (; scpi_drvinfo->num_chans < count; scpi_drvinfo->num_chans++) { resource_size_t size; - int idx = scpi_info->num_chans; - struct scpi_chan *pchan = scpi_info->channels + idx; + int idx = scpi_drvinfo->num_chans; + struct scpi_chan *pchan = scpi_drvinfo->channels + idx; struct mbox_client *cl = &pchan->cl; struct device_node *shmem = of_parse_phandle(np, "shmem", idx); @@ -975,45 +976,53 @@ static int scpi_probe(struct platform_device *pdev) return ret; } - scpi_info->commands = scpi_std_commands; + scpi_drvinfo->commands = scpi_std_commands; - platform_set_drvdata(pdev, scpi_info); + platform_set_drvdata(pdev, scpi_drvinfo); - if (scpi_info->is_legacy) { + if (scpi_drvinfo->is_legacy) { /* Replace with legacy variants */ scpi_ops.clk_set_val = legacy_scpi_clk_set_val; - scpi_info->commands = scpi_legacy_commands; + scpi_drvinfo->commands = scpi_legacy_commands; /* Fill priority bitmap */ for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++) set_bit(legacy_hpriority_cmds[idx], - scpi_info->cmd_priority); + scpi_drvinfo->cmd_priority); } - ret = scpi_init_versions(scpi_info); + scpi_info = scpi_drvinfo; + + ret = scpi_init_versions(scpi_drvinfo); if (ret) { dev_err(dev, "incorrect or no SCP firmware found\n"); + scpi_info = NULL; return ret; } - if (scpi_info->is_legacy && !scpi_info->protocol_version && - !scpi_info->firmware_version) + if (scpi_drvinfo->is_legacy && !scpi_drvinfo->protocol_version && + !scpi_drvinfo->firmware_version) dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n"); else dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n", FIELD_GET(PROTO_REV_MAJOR_MASK, - scpi_info->protocol_version), + scpi_drvinfo->protocol_version), FIELD_GET(PROTO_REV_MINOR_MASK, - scpi_info->protocol_version), + scpi_drvinfo->protocol_version), FIELD_GET(FW_REV_MAJOR_MASK, - scpi_info->firmware_version), + scpi_drvinfo->firmware_version), FIELD_GET(FW_REV_MINOR_MASK, - scpi_info->firmware_version), + scpi_drvinfo->firmware_version), FIELD_GET(FW_REV_PATCH_MASK, - scpi_info->firmware_version)); - scpi_info->scpi_ops = &scpi_ops; + scpi_drvinfo->firmware_version)); - return devm_of_platform_populate(dev); + scpi_drvinfo->scpi_ops = &scpi_ops; + + ret = devm_of_platform_populate(dev); + if (ret) + scpi_info = NULL; + + return ret; } static const struct of_device_id scpi_of_match[] = { diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c index 8b8127fa8955..4a93fb490cb4 100644 --- a/drivers/firmware/dmi-sysfs.c +++ b/drivers/firmware/dmi-sysfs.c @@ -603,7 +603,7 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh, "%d-%d", dh->type, entry->instance); if (*ret) { - kfree(entry); + kobject_put(&entry->kobj); return; } diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index 4dde8edd53b6..3e8d4b51a814 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -242,29 +242,6 @@ failed: return ret; } -/** - * efi_capsule_flush - called by file close or file flush - * @file: file pointer - * @id: not used - * - * If a capsule is being partially uploaded then calling this function - * will be treated as upload termination and will free those completed - * buffer pages and -ECANCELED will be returned. - **/ -static int efi_capsule_flush(struct file *file, fl_owner_t id) -{ - int ret = 0; - struct capsule_info *cap_info = file->private_data; - - if (cap_info->index > 0) { - pr_err("capsule upload not complete\n"); - efi_free_all_buff_pages(cap_info); - ret = -ECANCELED; - } - - return ret; -} - /** * efi_capsule_release - called by file close * @inode: not used @@ -277,6 +254,13 @@ static int efi_capsule_release(struct inode *inode, struct file *file) { struct capsule_info *cap_info = file->private_data; + if (cap_info->index > 0 && + (cap_info->header.headersize == 0 || + cap_info->count < cap_info->total_size)) { + pr_err("capsule upload not complete\n"); + efi_free_all_buff_pages(cap_info); + } + kfree(cap_info->pages); kfree(cap_info->phys); kfree(file->private_data); @@ -324,7 +308,6 @@ static const struct file_operations efi_capsule_fops = { .owner = THIS_MODULE, .open = efi_capsule_open, .write = efi_capsule_write, - .flush = efi_capsule_flush, .release = efi_capsule_release, .llseek = no_llseek, }; diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index e3df82d5d37a..70be9c87fb67 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -590,7 +590,7 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables, seed = early_memremap(efi_rng_seed, sizeof(*seed)); if (seed != NULL) { - size = READ_ONCE(seed->size); + size = min(seed->size, EFI_RANDOM_SEED_SIZE); early_memunmap(seed, sizeof(*seed)); } else { pr_err("Could not map UEFI random seed!\n"); diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index d0537573501e..2c67f71f2375 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -37,6 +37,13 @@ KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \ $(call cc-option,-fno-addrsig) \ -D__DISABLE_EXPORTS +# +# struct randomization only makes sense for Linux internal types, which the EFI +# stub code never touches, so let's turn off struct randomization for the stub +# altogether +# +KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS), $(KBUILD_CFLAGS)) + # remove SCS flags from all objects in this directory KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS)) # disable LTO diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 415a971e7694..7f4bafcd9d33 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -24,7 +24,7 @@ efi_status_t check_platform_features(void) return EFI_SUCCESS; tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf; - if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) { + if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) { if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) efi_err("This 64 KB granular kernel is not supported by your CPU\n"); else diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index 368cd60000ee..d48b0de05b62 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -281,14 +281,6 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle, goto fail; } - /* - * Now that we have done our final memory allocation (and free) - * we can get the memory map key needed for exit_boot_services(). - */ - status = efi_get_memory_map(&map); - if (status != EFI_SUCCESS) - goto fail_free_new_fdt; - status = update_fdt((void *)fdt_addr, fdt_size, (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr, initrd_addr, initrd_size); diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index 24aa37535372..33ab56769595 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c @@ -75,7 +75,12 @@ efi_status_t efi_random_get_seed(void) if (status != EFI_SUCCESS) return status; - status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA, + /* + * Use EFI_ACPI_RECLAIM_MEMORY here so that it is guaranteed that the + * allocation will survive a kexec reboot (although we refresh the seed + * beforehand) + */ + status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY, sizeof(*seed) + EFI_RANDOM_SEED_SIZE, (void **)&seed); if (status != EFI_SUCCESS) diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c index 5efc524b14be..a2be3a71bcf8 100644 --- a/drivers/firmware/efi/libstub/secureboot.c +++ b/drivers/firmware/efi/libstub/secureboot.c @@ -19,7 +19,7 @@ static const efi_char16_t efi_SetupMode_name[] = L"SetupMode"; /* SHIM variables */ static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID; -static const efi_char16_t shim_MokSBState_name[] = L"MokSBState"; +static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT"; /* * Determine whether we're in secure boot mode. @@ -53,8 +53,8 @@ enum efi_secureboot_mode efi_get_secureboot(void) /* * See if a user has put the shim into insecure mode. If so, and if the - * variable doesn't have the runtime attribute set, we might as well - * honor that. + * variable doesn't have the non-volatile attribute set, we might as + * well honor that. */ size = sizeof(moksbstate); status = get_efi_var(shim_MokSBState_name, &shim_guid, @@ -63,7 +63,7 @@ enum efi_secureboot_mode efi_get_secureboot(void) /* If it fails, we don't care why. Default to secure */ if (status != EFI_SUCCESS) goto secure_boot_enabled; - if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1) + if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1) return efi_secureboot_mode_disabled; secure_boot_enabled: diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index 3672539cb96e..5d0f1b1966fc 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -414,6 +414,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, hdr->ramdisk_image = 0; hdr->ramdisk_size = 0; + /* + * Disregard any setup data that was provided by the bootloader: + * setup_data could be pointing anywhere, and we have no way of + * authenticating or validating the payload. + */ + hdr->setup_data = 0; + efi_stub_entry(handle, sys_table_arg, boot_params); /* not reached */ diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c index 8f665678e9e3..e8d69bd548f3 100644 --- a/drivers/firmware/efi/tpm.c +++ b/drivers/firmware/efi/tpm.c @@ -97,7 +97,7 @@ int __init efi_tpm_eventlog_init(void) goto out_calc; } - memblock_reserve((unsigned long)final_tbl, + memblock_reserve(efi.tpm_final_log, tbl_size + sizeof(*final_tbl)); efi_tpm_final_log_size = tbl_size; diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c index 0205987a4fd4..568074148f62 100644 --- a/drivers/firmware/google/coreboot_table.c +++ b/drivers/firmware/google/coreboot_table.c @@ -152,12 +152,8 @@ static int coreboot_table_probe(struct platform_device *pdev) if (!ptr) return -ENOMEM; - ret = bus_register(&coreboot_bus_type); - if (!ret) { - ret = coreboot_table_populate(dev, ptr); - if (ret) - bus_unregister(&coreboot_bus_type); - } + ret = coreboot_table_populate(dev, ptr); + memunmap(ptr); return ret; @@ -172,7 +168,6 @@ static int __cb_dev_unregister(struct device *dev, void *dummy) static int coreboot_table_remove(struct platform_device *pdev) { bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister); - bus_unregister(&coreboot_bus_type); return 0; } @@ -202,6 +197,32 @@ static struct platform_driver coreboot_table_driver = { .of_match_table = of_match_ptr(coreboot_of_match), }, }; -module_platform_driver(coreboot_table_driver); + +static int __init coreboot_table_driver_init(void) +{ + int ret; + + ret = bus_register(&coreboot_bus_type); + if (ret) + return ret; + + ret = platform_driver_register(&coreboot_table_driver); + if (ret) { + bus_unregister(&coreboot_bus_type); + return ret; + } + + return 0; +} + +static void __exit coreboot_table_driver_exit(void) +{ + platform_driver_unregister(&coreboot_table_driver); + bus_unregister(&coreboot_bus_type); +} + +module_init(coreboot_table_driver_init); +module_exit(coreboot_table_driver_exit); + MODULE_AUTHOR("Google, Inc."); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index 7d9367b22010..c1cd5ca875ca 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c @@ -680,6 +680,15 @@ static struct notifier_block gsmi_die_notifier = { static int gsmi_panic_callback(struct notifier_block *nb, unsigned long reason, void *arg) { + + /* + * Panic callbacks are executed with all other CPUs stopped, + * so we must not attempt to spin waiting for gsmi_dev.lock + * to be released. + */ + if (spin_is_locked(&gsmi_dev.lock)) + return NOTIFY_DONE; + gsmi_shutdown_reason(GSMI_SHUTDOWN_PANIC); return NOTIFY_DONE; } diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index 53c7e3f8cfde..7dd0ac1a0cfc 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -941,17 +941,17 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory); void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr) { struct stratix10_svc_data_mem *pmem; - size_t size = 0; list_for_each_entry(pmem, &svc_data_mem, node) if (pmem->vaddr == kaddr) { - size = pmem->size; - break; + gen_pool_free(chan->ctrl->genpool, + (unsigned long)kaddr, pmem->size); + pmem->vaddr = NULL; + list_del(&pmem->node); + return; } - gen_pool_free(chan->ctrl->genpool, (unsigned long)kaddr, size); - pmem->vaddr = NULL; - list_del(&pmem->node); + list_del(&svc_data_mem); } EXPORT_SYMBOL_GPL(stratix10_svc_free_memory); diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c index 440d99c63638..286fe1257961 100644 --- a/drivers/firmware/tegra/bpmp-debugfs.c +++ b/drivers/firmware/tegra/bpmp-debugfs.c @@ -74,28 +74,36 @@ static void seqbuf_seek(struct seqbuf *seqbuf, ssize_t offset) static const char *get_filename(struct tegra_bpmp *bpmp, const struct file *file, char *buf, int size) { - char root_path_buf[512]; - const char *root_path; - const char *filename; + const char *root_path, *filename = NULL; + char *root_path_buf; size_t root_len; + root_path_buf = kzalloc(512, GFP_KERNEL); + if (!root_path_buf) + goto out; + root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf, sizeof(root_path_buf)); if (IS_ERR(root_path)) - return NULL; + goto out; root_len = strlen(root_path); filename = dentry_path(file->f_path.dentry, buf, size); - if (IS_ERR(filename)) - return NULL; + if (IS_ERR(filename)) { + filename = NULL; + goto out; + } - if (strlen(filename) < root_len || - strncmp(filename, root_path, root_len)) - return NULL; + if (strlen(filename) < root_len || strncmp(filename, root_path, root_len)) { + filename = NULL; + goto out; + } filename += root_len; +out: + kfree(root_path_buf); return filename; } @@ -429,7 +437,7 @@ static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp, mode |= attrs & DEBUGFS_S_IWUSR ? 0200 : 0; dentry = debugfs_create_file(name, mode, parent, bpmp, &bpmp_debug_fops); - if (!dentry) { + if (IS_ERR(dentry)) { err = -ENOMEM; goto out; } @@ -680,7 +688,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf, if (t & DEBUGFS_S_ISDIR) { dentry = debugfs_create_dir(name, parent); - if (!dentry) + if (IS_ERR(dentry)) return -ENOMEM; err = bpmp_populate_dir(bpmp, seqbuf, dentry, depth+1); if (err < 0) @@ -693,7 +701,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf, dentry = debugfs_create_file(name, mode, parent, bpmp, &debugfs_fops); - if (!dentry) + if (IS_ERR(dentry)) return -ENOMEM; } } @@ -743,11 +751,11 @@ int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp) return 0; root = debugfs_create_dir("bpmp", NULL); - if (!root) + if (IS_ERR(root)) return -ENOMEM; bpmp->debugfs_mirror = debugfs_create_dir("debug", root); - if (!bpmp->debugfs_mirror) { + if (IS_ERR(bpmp->debugfs_mirror)) { err = -ENOMEM; goto out; } diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c index 2cf25fd5e897..75b4b3ec933a 100644 --- a/drivers/fpga/altera-pr-ip-core.c +++ b/drivers/fpga/altera-pr-ip-core.c @@ -108,7 +108,7 @@ static int alt_pr_fpga_write(struct fpga_manager *mgr, const char *buf, u32 *buffer_32 = (u32 *)buf; size_t i = 0; - if (count <= 0) + if (!count) return -EINVAL; /* Write out the complete 32-bit chunks */ diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index b450870b75ed..eb8a6e329af9 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -1857,7 +1857,7 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev, return -EINVAL; fds = memdup_user((void __user *)(arg + sizeof(hdr)), - hdr.count * sizeof(s32)); + array_size(hdr.count, sizeof(s32))); if (IS_ERR(fds)) return PTR_ERR(fds); diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c index 59ddc9fd5bca..92e6eebd1851 100644 --- a/drivers/fsi/fsi-core.c +++ b/drivers/fsi/fsi-core.c @@ -1309,6 +1309,9 @@ int fsi_master_register(struct fsi_master *master) mutex_init(&master->scan_lock); master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL); + if (master->idx < 0) + return master->idx; + dev_set_name(&master->dev, "fsi%d", master->idx); master->dev.class = &fsi_master_class; diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c index fdcebe59510d..68d95051dd0e 100644 --- a/drivers/gpio/gpio-amd8111.c +++ b/drivers/gpio/gpio-amd8111.c @@ -231,7 +231,10 @@ found: ioport_unmap(gp.pm); goto out; } + return 0; + out: + pci_dev_put(pdev); return err; } @@ -239,6 +242,7 @@ static void __exit amd_gpio_exit(void) { gpiochip_remove(&gp.chip); ioport_unmap(gp.pm); + pci_dev_put(gp.pdev); } module_init(amd_gpio_init); diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index 4275c18a097a..ea2e2618b794 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -646,10 +646,9 @@ static int dwapb_get_clks(struct dwapb_gpio *gpio) gpio->clks[1].id = "db"; err = devm_clk_bulk_get_optional(gpio->dev, DWAPB_NR_CLOCKS, gpio->clks); - if (err) { - dev_err(gpio->dev, "Cannot get APB/Debounce clocks\n"); - return err; - } + if (err) + return dev_err_probe(gpio->dev, err, + "Cannot get APB/Debounce clocks\n"); err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks); if (err) { diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 67ed4f238d43..876027fdefc9 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -375,6 +375,13 @@ static void gpio_mockup_debugfs_setup(struct device *dev, } } +static void gpio_mockup_debugfs_cleanup(void *data) +{ + struct gpio_mockup_chip *chip = data; + + debugfs_remove_recursive(chip->dbg_dir); +} + static void gpio_mockup_dispose_mappings(void *data) { struct gpio_mockup_chip *chip = data; @@ -457,7 +464,7 @@ static int gpio_mockup_probe(struct platform_device *pdev) gpio_mockup_debugfs_setup(dev, chip); - return 0; + return devm_add_action_or_reset(dev, gpio_mockup_debugfs_cleanup, chip); } static struct platform_driver gpio_mockup_driver = { @@ -597,9 +604,9 @@ static int __init gpio_mockup_init(void) static void __exit gpio_mockup_exit(void) { + gpio_mockup_unregister_pdevs(); debugfs_remove_recursive(gpio_mockup_dbg_dir); platform_driver_unregister(&gpio_mockup_driver); - gpio_mockup_unregister_pdevs(); } module_init(gpio_mockup_init); diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index d60d5520707d..60c2533a39a5 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c @@ -169,6 +169,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) switch (flow_type) { case IRQ_TYPE_EDGE_FALLING: + case IRQ_TYPE_LEVEL_LOW: raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR, gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR) diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index ed7c5fc47f52..2ab34a8e6273 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -700,6 +700,9 @@ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, unsigned long flags; unsigned int on, off; + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle; do_div(val, NSEC_PER_SEC); if (val > UINT_MAX) diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index a78167b2c9ca..3ad1a9e432c8 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -350,6 +350,9 @@ static const struct regmap_config pca953x_i2c_regmap = { .reg_bits = 8, .val_bits = 8, + .use_single_read = true, + .use_single_write = true, + .readable_reg = pca953x_readable_register, .writeable_reg = pca953x_writeable_register, .volatile_reg = pca953x_volatile_register, @@ -761,11 +764,11 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin bitmap_xor(cur_stat, new_stat, old_stat, gc->ngpio); bitmap_and(trigger, cur_stat, chip->irq_mask, gc->ngpio); + bitmap_copy(chip->irq_stat, new_stat, gc->ngpio); + if (bitmap_empty(trigger, gc->ngpio)) return false; - bitmap_copy(chip->irq_stat, new_stat, gc->ngpio); - bitmap_and(cur_stat, chip->irq_trig_fall, old_stat, gc->ngpio); bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio); bitmap_or(new_stat, old_stat, cur_stat, gc->ngpio); @@ -893,15 +896,18 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert) { DECLARE_BITMAP(val, MAX_LINE); + u8 regaddr; int ret; - ret = regcache_sync_region(chip->regmap, chip->regs->output, - chip->regs->output + NBANK(chip)); + regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0); + ret = regcache_sync_region(chip->regmap, regaddr, + regaddr + NBANK(chip) - 1); if (ret) goto out; - ret = regcache_sync_region(chip->regmap, chip->regs->direction, - chip->regs->direction + NBANK(chip)); + regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0); + ret = regcache_sync_region(chip->regmap, regaddr, + regaddr + NBANK(chip) - 1); if (ret) goto out; @@ -1107,20 +1113,21 @@ static int pca953x_regcache_sync(struct device *dev) { struct pca953x_chip *chip = dev_get_drvdata(dev); int ret; + u8 regaddr; /* * The ordering between direction and output is important, * sync these registers first and only then sync the rest. */ - ret = regcache_sync_region(chip->regmap, chip->regs->direction, - chip->regs->direction + NBANK(chip)); + regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0); + ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1); if (ret) { dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret); return ret; } - ret = regcache_sync_region(chip->regmap, chip->regs->output, - chip->regs->output + NBANK(chip)); + regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0); + ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1); if (ret) { dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret); return ret; @@ -1128,16 +1135,18 @@ static int pca953x_regcache_sync(struct device *dev) #ifdef CONFIG_GPIO_PCA953X_IRQ if (chip->driver_data & PCA_PCAL) { - ret = regcache_sync_region(chip->regmap, PCAL953X_IN_LATCH, - PCAL953X_IN_LATCH + NBANK(chip)); + regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0); + ret = regcache_sync_region(chip->regmap, regaddr, + regaddr + NBANK(chip) - 1); if (ret) { dev_err(dev, "Failed to sync INT latch registers: %d\n", ret); return ret; } - ret = regcache_sync_region(chip->regmap, PCAL953X_INT_MASK, - PCAL953X_INT_MASK + NBANK(chip)); + regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0); + ret = regcache_sync_region(chip->regmap, regaddr, + regaddr + NBANK(chip) - 1); if (ret) { dev_err(dev, "Failed to sync INT mask registers: %d\n", ret); @@ -1153,7 +1162,9 @@ static int pca953x_suspend(struct device *dev) { struct pca953x_chip *chip = dev_get_drvdata(dev); + mutex_lock(&chip->i2c_lock); regcache_cache_only(chip->regmap, true); + mutex_unlock(&chip->i2c_lock); if (atomic_read(&chip->wakeup_path)) device_set_wakeup_path(dev); @@ -1176,13 +1187,17 @@ static int pca953x_resume(struct device *dev) } } + mutex_lock(&chip->i2c_lock); regcache_cache_only(chip->regmap, false); regcache_mark_dirty(chip->regmap); ret = pca953x_regcache_sync(dev); - if (ret) + if (ret) { + mutex_unlock(&chip->i2c_lock); return ret; + } ret = regcache_sync(chip->regmap); + mutex_unlock(&chip->i2c_lock); if (ret) { dev_err(dev, "Failed to restore register map: %d\n", ret); return ret; diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index 58776f2d69ff..1ae612c796ee 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c @@ -125,9 +125,13 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, { struct vf610_gpio_port *port = gpiochip_get_data(chip); unsigned long mask = BIT(gpio); + u32 val; - if (port->sdata && port->sdata->have_paddr) - vf610_gpio_writel(mask, port->gpio_base + GPIO_PDDR); + if (port->sdata && port->sdata->have_paddr) { + val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR); + val |= mask; + vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR); + } vf610_gpio_set(chip, gpio, value); diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c index 98cd715ccc33..8d09b619c166 100644 --- a/drivers/gpio/gpio-vr41xx.c +++ b/drivers/gpio/gpio-vr41xx.c @@ -217,8 +217,6 @@ static int giu_get_irq(unsigned int irq) printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n", maskl, pendl, maskh, pendh); - atomic_inc(&irq_err_count); - return -EINVAL; } diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c index 7f8f5b02e31d..4b61d975cc0e 100644 --- a/drivers/gpio/gpio-winbond.c +++ b/drivers/gpio/gpio-winbond.c @@ -385,12 +385,13 @@ static int winbond_gpio_get(struct gpio_chip *gc, unsigned int offset) unsigned long *base = gpiochip_get_data(gc); const struct winbond_gpio_info *info; bool val; + int ret; winbond_gpio_get_info(&offset, &info); - val = winbond_sio_enter(*base); - if (val) - return val; + ret = winbond_sio_enter(*base); + if (ret) + return ret; winbond_sio_select_logical(*base, info->dev); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 55e4f402ec8b..44ee319da1b3 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -276,8 +276,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, pin = agpio->pin_table[0]; if (pin <= 255) { - char ev_name[5]; - sprintf(ev_name, "_%c%02hhX", + char ev_name[8]; + sprintf(ev_name, "_%c%02X", agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', pin); if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index 2613881a66e6..381cfa26a4a1 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -1769,7 +1769,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) ret = -ENODEV; goto out_free_le; } - le->irq = irq; if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? @@ -1783,7 +1782,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) init_waitqueue_head(&le->wait); /* Request a thread to read the events */ - ret = request_threaded_irq(le->irq, + ret = request_threaded_irq(irq, lineevent_irq_handler, lineevent_irq_thread, irqflags, @@ -1792,6 +1791,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) if (ret) goto out_free_le; + le->irq = irq; + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); if (fd < 0) { ret = fd; diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index be9002d622d5..6fc8fe8b6b85 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -863,7 +863,8 @@ int of_mm_gpiochip_add_data(struct device_node *np, if (mm_gc->save_regs) mm_gc->save_regs(mm_gc); - mm_gc->gc.of_node = np; + of_node_put(mm_gc->gc.of_node); + mm_gc->gc.of_node = of_node_get(np); ret = gpiochip_add_data(gc, data); if (ret) @@ -871,6 +872,7 @@ int of_mm_gpiochip_add_data(struct device_node *np, return 0; err2: + of_node_put(np); iounmap(mm_gc->regs); err1: kfree(gc->label); @@ -912,7 +914,7 @@ static void of_gpiochip_init_valid_mask(struct gpio_chip *chip) i, &start); of_property_read_u32_index(np, "gpio-reserved-ranges", i + 1, &count); - if (start >= chip->ngpio || start + count >= chip->ngpio) + if (start >= chip->ngpio || start + count > chip->ngpio) continue; bitmap_clear(chip->valid_mask, start, count); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 0a8ad0d2742a..2c50f8a3c69b 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1412,6 +1412,16 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned offset) { struct irq_domain *domain = gc->irq.domain; +#ifdef CONFIG_GPIOLIB_IRQCHIP + /* + * Avoid race condition with other code, which tries to lookup + * an IRQ before the irqchip has been properly registered, + * i.e. while gpiochip is still being brought up. + */ + if (!gc->irq.initialized) + return -EPROBE_DEFER; +#endif + if (!gpiochip_irqchip_irq_valid(gc, offset)) return -ENXIO; @@ -1603,6 +1613,15 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc, gpiochip_set_irq_hooks(gc); + /* + * Using barrier() here to prevent compiler from reordering + * gc->irq.initialized before initialization of above + * GPIO chip irq members. + */ + barrier(); + + gc->irq.initialized = true; + acpi_gpiochip_request_interrupts(gc); return 0; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 31f47259d2b7..df2a2b089a64 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -30,6 +30,7 @@ menuconfig DRM config DRM_MIPI_DBI tristate depends on DRM + select DRM_KMS_HELPER config DRM_EDID bool "EDID function for DRM" diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h index 5b393622f592..a0f0a17e224f 100644 --- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h +++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h @@ -119,6 +119,7 @@ #define CONNECTOR_OBJECT_ID_eDP 0x14 #define CONNECTOR_OBJECT_ID_MXM 0x15 #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 +#define CONNECTOR_OBJECT_ID_USBC 0x17 /* deleted */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d949d6c52f24..ff5555353eb4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -283,7 +283,7 @@ enum amdgpu_kiq_irq { AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, AMDGPU_CP_KIQ_IRQ_LAST }; - +#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */ #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ #define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index fb6230c62daa..d3a974d10552 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -689,7 +689,8 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid) const uint32_t flush_type = 0; bool all_hub = false; - if (adev->family == AMDGPU_FAMILY_AI) + if (adev->family == AMDGPU_FAMILY_AI || + adev->family == AMDGPU_FAMILY_RV) all_hub = true; return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 26f8a2138377..1b4c7ced8b92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1024,11 +1024,15 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, struct dma_fence **ef) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct drm_file *drm_priv = filp->private_data; - struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv; - struct amdgpu_vm *avm = &drv_priv->vm; + struct amdgpu_fpriv *drv_priv; + struct amdgpu_vm *avm; int ret; + ret = amdgpu_file_to_fpriv(filp, &drv_priv); + if (ret) + return ret; + avm = &drv_priv->vm; + /* Already a compute VM? */ if (avm->process_info) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index df1f9b88a53f..aabfe5705bb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -315,8 +315,10 @@ static void amdgpu_connector_get_edid(struct drm_connector *connector) if (!amdgpu_connector->edid) { /* some laptops provide a hardcoded edid in rom for LCDs */ if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || - (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) + (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) { amdgpu_connector->edid = amdgpu_connector_get_hardcoded_edid(adev); + drm_connector_update_edid_property(connector, amdgpu_connector->edid); + } } } @@ -1671,10 +1673,12 @@ amdgpu_connector_add(struct amdgpu_device *adev, adev->mode_info.dither_property, AMDGPU_FMT_DITHER_DISABLE); - if (amdgpu_audio != 0) + if (amdgpu_audio != 0) { drm_object_attach_property(&amdgpu_connector->base.base, adev->mode_info.audio_property, AMDGPU_AUDIO_AUTO); + amdgpu_connector->audio = AMDGPU_AUDIO_AUTO; + } subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = true; @@ -1796,6 +1800,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, drm_object_attach_property(&amdgpu_connector->base.base, adev->mode_info.audio_property, AMDGPU_AUDIO_AUTO); + amdgpu_connector->audio = AMDGPU_AUDIO_AUTO; } drm_object_attach_property(&amdgpu_connector->base.base, adev->mode_info.dither_property, @@ -1849,6 +1854,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, drm_object_attach_property(&amdgpu_connector->base.base, adev->mode_info.audio_property, AMDGPU_AUDIO_AUTO); + amdgpu_connector->audio = AMDGPU_AUDIO_AUTO; } drm_object_attach_property(&amdgpu_connector->base.base, adev->mode_info.dither_property, @@ -1899,6 +1905,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, drm_object_attach_property(&amdgpu_connector->base.base, adev->mode_info.audio_property, AMDGPU_AUDIO_AUTO); + amdgpu_connector->audio = AMDGPU_AUDIO_AUTO; } drm_object_attach_property(&amdgpu_connector->base.base, adev->mode_info.dither_property, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 12598a4b5c78..ffd8f5601e28 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -116,7 +116,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs int ret; if (cs->in.num_chunks == 0) - return 0; + return -EINVAL; chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); if (!chunk_array) @@ -1484,6 +1484,7 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, return 0; default: + dma_fence_put(fence); return -EINVAL; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f262c4e7a48a..bde0496d2f15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2047,6 +2047,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); return r; } + + /*get pf2vf msg info at it's earliest time*/ + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_init_data_exchange(adev); + } } @@ -2176,6 +2181,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) /* need to do gmc hw init early so we can allocate gpu mem */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { + /* Try to reserve bad pages early */ + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_exchange_data(adev); + r = amdgpu_device_vram_scratch_init(adev); if (r) { DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index ed13a2f76884..30659c1776e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -632,7 +632,7 @@ MODULE_PARM_DESC(sched_policy, * Maximum number of processes that HWS can schedule concurrently. The maximum is the * number of VMIDs assigned to the HWS, which is also the default. */ -int hws_max_conc_proc = 8; +int hws_max_conc_proc = -1; module_param(hws_max_conc_proc, int, 0444); MODULE_PARM_DESC(hws_max_conc_proc, "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index e8c76bd8c501..6aa9fd9cb83b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -341,11 +341,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, if (r) goto release_object; - if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) { - r = amdgpu_mn_register(bo, args->addr); - if (r) - goto release_object; - } + r = amdgpu_mn_register(bo, args->addr); + if (r) + goto release_object; if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 9f9f55a2b257..f84582b70d0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -263,7 +263,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, * adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; - while (queue_bit-- >= 0) { + while (--queue_bit >= 0) { if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index ad9863b84f1f..6937f8134008 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -905,6 +905,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, if (WARN_ON_ONCE(min_offset > max_offset)) return -EINVAL; + /* Check domain to be pinned to against preferred domains */ + if (bo->preferred_domains & domain) + domain = bo->preferred_domains & domain; + /* A shared bo cannot be migrated to VRAM */ if (bo->prime_shared_count) { if (domain & AMDGPU_GEM_DOMAIN_GTT) @@ -1338,7 +1342,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) return; - dma_resv_lock(bo->base.resv, NULL); + if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))) + return; r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence); if (!WARN_ON(r)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 2f47f81a74a5..8a2abcfd5a88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1921,7 +1921,7 @@ static int psp_load_smu_fw(struct psp_context *psp) static bool fw_load_skip_check(struct psp_context *psp, struct amdgpu_firmware_info *ucode) { - if (!ucode->fw) + if (!ucode->fw || !ucode->ucode_size) return true; if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && @@ -2146,6 +2146,9 @@ static int psp_hw_fini(void *handle) psp_rap_terminate(psp); psp_dtm_terminate(psp); psp_hdcp_terminate(psp); + + if (adev->gmc.xgmi.num_physical_nodes > 1) + psp_xgmi_terminate(psp); } psp_asd_unload(psp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index eb22a190c242..3638f0e12a2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1979,15 +1979,12 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, return 0; } -static int amdgpu_ras_check_asic_type(struct amdgpu_device *adev) +static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) { - if (adev->asic_type != CHIP_VEGA10 && - adev->asic_type != CHIP_VEGA20 && - adev->asic_type != CHIP_ARCTURUS && - adev->asic_type != CHIP_SIENNA_CICHLID) - return 1; - else - return 0; + return adev->asic_type == CHIP_VEGA10 || + adev->asic_type == CHIP_VEGA20 || + adev->asic_type == CHIP_ARCTURUS || + adev->asic_type == CHIP_SIENNA_CICHLID; } /* @@ -2006,7 +2003,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev, *supported = 0; if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw || - amdgpu_ras_check_asic_type(adev)) + !amdgpu_ras_asic_supported(adev)) return; if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index b313ce4c3e97..30005ed8156f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -625,8 +625,7 @@ int amdgpu_ucode_create_bo(struct amdgpu_device *adev) void amdgpu_ucode_free_bo(struct amdgpu_device *adev) { - if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) - amdgpu_bo_free_kernel(&adev->firmware.fw_buf, + amdgpu_bo_free_kernel(&adev->firmware.fw_buf, &adev->firmware.fw_buf_mc, &adev->firmware.fw_buf_ptr); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index e7678ba8fdcf..d6f295103595 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -580,16 +580,34 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev) void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) { - uint64_t bp_block_offset = 0; - uint32_t bp_block_size = 0; - struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL; - adev->virt.fw_reserve.p_pf2vf = NULL; adev->virt.fw_reserve.p_vf2pf = NULL; adev->virt.vf2pf_update_interval_ms = 0; if (adev->mman.fw_vram_usage_va != NULL) { - adev->virt.vf2pf_update_interval_ms = 2000; + /* go through this logic in ip_init and reset to init workqueue*/ + amdgpu_virt_exchange_data(adev); + + INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); + schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms)); + } else if (adev->bios != NULL) { + /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/ + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); + + amdgpu_virt_read_pf2vf_data(adev); + } +} + + +void amdgpu_virt_exchange_data(struct amdgpu_device *adev) +{ + uint64_t bp_block_offset = 0; + uint32_t bp_block_size = 0; + struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL; + + if (adev->mman.fw_vram_usage_va != NULL) { adev->virt.fw_reserve.p_pf2vf = (struct amd_sriov_msg_pf2vf_info_header *) @@ -616,13 +634,9 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size); } } - - if (adev->virt.vf2pf_update_interval_ms != 0) { - INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); - schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms); - } } + void amdgpu_detect_virtualization(struct amdgpu_device *adev) { uint32_t reg; @@ -656,6 +670,12 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } + if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) + /* VF MMIO access (except mailbox range) from CPU + * will be blocked during sriov runtime + */ + adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT; + /* we have the ability to check now */ if (amdgpu_sriov_vf(adev)) { switch (adev->asic_type) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 8dd624c20f89..aea49bad914f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -31,6 +31,7 @@ #define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ +#define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */ /* all asic after AI use this offset */ #define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5 @@ -241,6 +242,9 @@ struct amdgpu_virt { #define amdgpu_passthrough(adev) \ ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) +#define amdgpu_sriov_vf_mmio_access_protection(adev) \ +((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT) + static inline bool is_virtual_machine(void) { #ifdef CONFIG_X86 @@ -271,6 +275,7 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev); void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); +void amdgpu_virt_exchange_data(struct amdgpu_device *adev); void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev); void amdgpu_detect_virtualization(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 635601d8b131..45b1f00c5968 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -3200,7 +3200,11 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) */ #ifdef CONFIG_X86_64 if (amdgpu_vm_update_mode == -1) { - if (amdgpu_gmc_vram_full_visible(&adev->gmc)) + /* For asic with VF MMIO access protection + * avoid using CPU for VM table updates + */ + if (amdgpu_gmc_vram_full_visible(&adev->gmc) && + !amdgpu_sriov_vf_mmio_access_protection(adev)) adev->vm_manager.vm_update_mode = AMDGPU_VM_USE_CPU_FOR_COMPUTE; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 042c85fc528b..def0b7092438 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -622,7 +622,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev) amdgpu_put_xgmi_hive(hive); } - return psp_xgmi_terminate(&adev->psp); + return 0; } int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index b19f7bd37781..38f4c7474487 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1248,6 +1248,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 }, /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 }, + /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */ + { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 }, { 0, 0, 0, 0, 0 }, }; @@ -2568,7 +2570,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) gfx_v9_0_tiling_mode_table_init(adev); - gfx_v9_0_setup_rb(adev); + if (adev->gfx.num_gfx_rings) + gfx_v9_0_setup_rb(adev); gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info); adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 150fa5258fb6..2aa9242c58ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -371,6 +371,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint32_t seq; uint16_t queried_pasid; bool ret; + u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout; struct amdgpu_ring *ring = &adev->gfx.kiq.ring; struct amdgpu_kiq *kiq = &adev->gfx.kiq; @@ -389,7 +390,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, amdgpu_ring_commit(ring); spin_unlock(&adev->gfx.kiq.ring_lock); - r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); + r = amdgpu_fence_wait_polling(ring, seq, usec_timeout); if (r < 1) { dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); return -ETIME; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 3a864041968f..1673bf3bae55 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -839,6 +839,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint32_t seq; uint16_t queried_pasid; bool ret; + u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout; struct amdgpu_ring *ring = &adev->gfx.kiq.ring; struct amdgpu_kiq *kiq = &adev->gfx.kiq; @@ -878,7 +879,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, amdgpu_ring_commit(ring); spin_unlock(&adev->gfx.kiq.ring_lock); - r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); + r = amdgpu_fence_wait_polling(ring, seq, usec_timeout); if (r < 1) { dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); up_read(&adev->reset_sem); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index f84701c562bf..97441f373531 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -178,6 +178,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); + tmp = mmVM_L2_CNTL3_DEFAULT; if (adev->gmc.translate_further) { tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 2099f6ebd833..bdb8e596bda6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -1429,8 +1429,11 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) { + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; uint32_t tmp; + vcn_v3_0_pause_dpg_mode(adev, 0, &state); + /* Wait for power status to be 1 */ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 31d793ee0836..86b4dadf772e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -784,7 +784,7 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size) /* Fetch the CRAT table from ACPI */ status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table); if (status == AE_NOT_FOUND) { - pr_warn("CRAT table not found\n"); + pr_info("CRAT table not found\n"); return -ENODATA; } else if (ACPI_FAILURE(status)) { const char *err = acpi_format_exception(status); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 84313135c2ea..148e43dee657 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -664,15 +664,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, - kfd->vm_info.first_vmid_kfd + 1; /* Verify module parameters regarding mapped process number*/ - if ((hws_max_conc_proc < 0) - || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) { - dev_err(kfd_device, - "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n", - hws_max_conc_proc, kfd->vm_info.vmid_num_kfd, - kfd->vm_info.vmid_num_kfd); + if (hws_max_conc_proc >= 0) + kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd); + else kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; - } else - kfd->max_proc_per_quantum = hws_max_conc_proc; /* calculate max size of mqds needed for queues */ size = max_num_of_queues_per_device * diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 2645ebc63a14..195b7e02dc4b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -138,19 +138,33 @@ void program_sh_mem_settings(struct device_queue_manager *dqm, } static void increment_queue_count(struct device_queue_manager *dqm, - enum kfd_queue_type type) + struct qcm_process_device *qpd, + struct queue *q) { dqm->active_queue_count++; - if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ) + if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || + q->properties.type == KFD_QUEUE_TYPE_DIQ) dqm->active_cp_queue_count++; + + if (q->properties.is_gws) { + dqm->gws_queue_count++; + qpd->mapped_gws_queue = true; + } } static void decrement_queue_count(struct device_queue_manager *dqm, - enum kfd_queue_type type) + struct qcm_process_device *qpd, + struct queue *q) { dqm->active_queue_count--; - if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ) + if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || + q->properties.type == KFD_QUEUE_TYPE_DIQ) dqm->active_cp_queue_count--; + + if (q->properties.is_gws) { + dqm->gws_queue_count--; + qpd->mapped_gws_queue = false; + } } static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) @@ -377,7 +391,7 @@ add_queue_to_list: list_add(&q->list, &qpd->queues_list); qpd->queue_count++; if (q->properties.is_active) - increment_queue_count(dqm, q->properties.type); + increment_queue_count(dqm, qpd, q); /* * Unconditionally increment this counter, regardless of the queue's @@ -502,13 +516,8 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, deallocate_vmid(dqm, qpd, q); } qpd->queue_count--; - if (q->properties.is_active) { - decrement_queue_count(dqm, q->properties.type); - if (q->properties.is_gws) { - dqm->gws_queue_count--; - qpd->mapped_gws_queue = false; - } - } + if (q->properties.is_active) + decrement_queue_count(dqm, qpd, q); return retval; } @@ -598,12 +607,11 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) * dqm->active_queue_count to determine whether a new runlist must be * uploaded. */ - if (q->properties.is_active && !prev_active) - increment_queue_count(dqm, q->properties.type); - else if (!q->properties.is_active && prev_active) - decrement_queue_count(dqm, q->properties.type); - - if (q->gws && !q->properties.is_gws) { + if (q->properties.is_active && !prev_active) { + increment_queue_count(dqm, &pdd->qpd, q); + } else if (!q->properties.is_active && prev_active) { + decrement_queue_count(dqm, &pdd->qpd, q); + } else if (q->gws && !q->properties.is_gws) { if (q->properties.is_active) { dqm->gws_queue_count++; pdd->qpd.mapped_gws_queue = true; @@ -665,11 +673,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; q->properties.is_active = false; - decrement_queue_count(dqm, q->properties.type); - if (q->properties.is_gws) { - dqm->gws_queue_count--; - qpd->mapped_gws_queue = false; - } + decrement_queue_count(dqm, qpd, q); if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n")) continue; @@ -713,7 +717,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, continue; q->properties.is_active = false; - decrement_queue_count(dqm, q->properties.type); + decrement_queue_count(dqm, qpd, q); } pdd->last_evict_timestamp = get_jiffies_64(); retval = execute_queues_cpsch(dqm, @@ -784,11 +788,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; q->properties.is_active = true; - increment_queue_count(dqm, q->properties.type); - if (q->properties.is_gws) { - dqm->gws_queue_count++; - qpd->mapped_gws_queue = true; - } + increment_queue_count(dqm, qpd, q); if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n")) continue; @@ -846,7 +846,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, continue; q->properties.is_active = true; - increment_queue_count(dqm, q->properties.type); + increment_queue_count(dqm, &pdd->qpd, q); } retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); @@ -1247,7 +1247,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, dqm->total_queue_count); list_add(&kq->list, &qpd->priv_queue_list); - increment_queue_count(dqm, kq->queue->properties.type); + increment_queue_count(dqm, qpd, kq->queue); qpd->is_debug = true; execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); dqm_unlock(dqm); @@ -1261,7 +1261,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, { dqm_lock(dqm); list_del(&kq->list); - decrement_queue_count(dqm, kq->queue->properties.type); + decrement_queue_count(dqm, qpd, kq->queue); qpd->is_debug = false; execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); /* @@ -1328,7 +1328,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, qpd->queue_count++; if (q->properties.is_active) { - increment_queue_count(dqm, q->properties.type); + increment_queue_count(dqm, qpd, q); execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); @@ -1513,15 +1513,11 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, list_del(&q->list); qpd->queue_count--; if (q->properties.is_active) { - decrement_queue_count(dqm, q->properties.type); + decrement_queue_count(dqm, qpd, q); retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); if (retval == -ETIME) qpd->reset_wavefronts = true; - if (q->properties.is_gws) { - dqm->gws_queue_count--; - qpd->mapped_gws_queue = false; - } } /* @@ -1732,7 +1728,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, /* Clean all kernel queues */ list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) { list_del(&kq->list); - decrement_queue_count(dqm, kq->queue->properties.type); + decrement_queue_count(dqm, qpd, kq->queue); qpd->is_debug = false; dqm->total_queue_count--; filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES; @@ -1745,13 +1741,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) deallocate_sdma_queue(dqm, q); - if (q->properties.is_active) { - decrement_queue_count(dqm, q->properties.type); - if (q->properties.is_gws) { - dqm->gws_queue_count--; - qpd->mapped_gws_queue = false; - } - } + if (q->properties.is_active) + decrement_queue_count(dqm, qpd, q); dqm->total_queue_count--; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index ba2c2ce0c55a..159be13ef20b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -531,6 +531,8 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events) event_waiters = kmalloc_array(num_events, sizeof(struct kfd_event_waiter), GFP_KERNEL); + if (!event_waiters) + return NULL; for (i = 0; (event_waiters) && (i < num_events) ; i++) { init_wait(&event_waiters[i].wait); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index 17d1736367ea..bd4caa36ab2e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -270,15 +270,6 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) return ret; } - ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client, - O_RDWR); - if (ret < 0) { - kfifo_free(&client->fifo); - kfree(client); - return ret; - } - *fd = ret; - init_waitqueue_head(&client->wait_queue); spin_lock_init(&client->lock); client->events = 0; @@ -288,5 +279,20 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) list_add_rcu(&client->list, &dev->smi_clients); spin_unlock(&dev->smi_lock); + ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client, + O_RDWR); + if (ret < 0) { + spin_lock(&dev->smi_lock); + list_del_rcu(&client->list); + spin_unlock(&dev->smi_lock); + + synchronize_rcu(); + + kfifo_free(&client->fifo); + kfree(client); + return ret; + } + *fd = ret; + return 0; } diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index f3274eb6b341..6c4cba09d23b 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -5,6 +5,7 @@ menu "Display Engine Configuration" config DRM_AMD_DC bool "AMD DC - Enable new display engine" default y + depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64 select SND_HDA_COMPONENT if SND_HDA_CORE select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) help @@ -12,6 +13,12 @@ config DRM_AMD_DC support for AMDGPU. This adds required support for Vega and Raven ASICs. + calculate_bandwidth() is presently broken on all !(X86_64 || SPARC64 || ARM64) + architectures built with Clang (all released versions), whereby the stack + frame gets blown up to well over 5k. This would cause an immediate kernel + panic on most architectures. We'll revert this when the following bug report + has been resolved: https://github.com/llvm/llvm-project/issues/41896. + config DRM_AMD_DC_DCN def_bool n help diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e828f9414ba2..167a1ee518a8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -922,6 +922,37 @@ static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device } } +struct amdgpu_stutter_quirk { + u16 chip_vendor; + u16 chip_device; + u16 subsys_vendor; + u16 subsys_device; + u8 revision; +}; + +static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { + /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ + { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, + { 0, 0, 0, 0, 0 }, +}; + +static bool dm_should_disable_stutter(struct pci_dev *pdev) +{ + const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; + + while (p && p->chip_device != 0) { + if (pdev->vendor == p->chip_vendor && + pdev->device == p->chip_device && + pdev->subsystem_vendor == p->subsys_vendor && + pdev->subsystem_device == p->subsys_device && + pdev->revision == p->revision) { + return true; + } + ++p; + } + return false; +} + static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; @@ -1014,6 +1045,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; + if (dm_should_disable_stutter(adev->pdev)) + adev->dm.dc->debug.disable_stutter = true; if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) adev->dm.dc->debug.disable_stutter = true; @@ -2022,7 +2055,8 @@ static int dm_resume(void *handle) * this is the case when traversing through already created * MST connectors, should be skipped */ - if (aconnector->mst_port) + if (aconnector->dc_link && + aconnector->dc_link->type == dc_connection_mst_branch) continue; mutex_lock(&aconnector->hpd_lock); @@ -2140,7 +2174,7 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) { - u32 max_cll, min_cll, max, min, q, r; + u32 max_avg, min_cll, max, min, q, r; struct amdgpu_dm_backlight_caps *caps; struct amdgpu_display_manager *dm; struct drm_connector *conn_base; @@ -2163,7 +2197,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps = &dm->backlight_caps; caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; caps->aux_support = false; - max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll; + max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall; min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll; if (caps->ext_caps->bits.oled == 1 /*|| @@ -2191,8 +2225,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) * The results of the above expressions can be verified at * pre_computed_values. */ - q = max_cll >> 5; - r = max_cll % 32; + q = max_avg >> 5; + r = max_avg % 32; max = (1 << q) * pre_computed_values[r]; // min luminance: maxLum * (CV/255)^2 / 100 @@ -2314,13 +2348,12 @@ void amdgpu_dm_update_connector_after_detect( aconnector->edid = (struct edid *)sink->dc_edid.raw_edid; - drm_connector_update_edid_property(connector, - aconnector->edid); if (aconnector->dc_link->aux_mode) drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, aconnector->edid); } + drm_connector_update_edid_property(connector, aconnector->edid); amdgpu_dm_update_freesync_caps(connector, aconnector->edid); update_connector_ext_caps(aconnector); } else { diff --git a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c index 6ca288fb5fb9..2d46bc527b21 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c @@ -26,12 +26,12 @@ #include "bw_fixed.h" -#define MIN_I64 \ - (int64_t)(-(1LL << 63)) - #define MAX_I64 \ (int64_t)((1ULL << 63) - 1) +#define MIN_I64 \ + (-MAX_I64 - 1) + #define FRACTIONAL_PART_MASK \ ((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 93f5229c303e..99887bcfada0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2202,11 +2202,8 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->abm_level) stream->abm_level = *update->abm_level; - if (update->periodic_interrupt0) - stream->periodic_interrupt0 = *update->periodic_interrupt0; - - if (update->periodic_interrupt1) - stream->periodic_interrupt1 = *update->periodic_interrupt1; + if (update->periodic_interrupt) + stream->periodic_interrupt = *update->periodic_interrupt; if (update->gamut_remap) stream->gamut_remap_matrix = *update->gamut_remap; @@ -2288,13 +2285,8 @@ static void commit_planes_do_stream_update(struct dc *dc, if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) { - if (stream_update->periodic_interrupt0 && - dc->hwss.setup_periodic_interrupt) - dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0); - - if (stream_update->periodic_interrupt1 && - dc->hwss.setup_periodic_interrupt) - dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1); + if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) + dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || stream_update->vrr_infopacket || diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 0e359a299f9e..3f4403e77814 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2822,7 +2822,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video &dpcd_pattern_type.value, sizeof(dpcd_pattern_type)); - channel_count = dpcd_test_mode.bits.channel_count + 1; + channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT); // read pattern periods for requested channels when sawTooth pattern is requested if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH || diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 5f4cdb05c4db..1e47afc4ccc1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1674,6 +1674,9 @@ static bool are_stream_backends_same( if (is_timing_changed(stream_a, stream_b)) return false; + if (stream_a->signal != stream_b->signal) + return false; + if (stream_a->dpms_off != stream_b->dpms_off) return false; @@ -1698,8 +1701,8 @@ bool dc_is_stream_unchanged( if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) return false; - // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks - if (old_stream->audio_info.mode_count != stream->audio_info.mode_count) + /*compare audio info*/ + if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0) return false; return true; diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 205bedd1b196..0487c1b8957c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -179,8 +179,7 @@ struct dc_stream_state { /* DMCU info */ unsigned int abm_level; - struct periodic_interrupt_config periodic_interrupt0; - struct periodic_interrupt_config periodic_interrupt1; + struct periodic_interrupt_config periodic_interrupt; /* from core_stream struct */ struct dc_context *ctx; @@ -244,8 +243,7 @@ struct dc_stream_update { struct dc_info_packet *hdr_static_metadata; unsigned int *abm_level; - struct periodic_interrupt_config *periodic_interrupt0; - struct periodic_interrupt_config *periodic_interrupt1; + struct periodic_interrupt_config *periodic_interrupt; struct dc_info_packet *vrr_infopacket; struct dc_info_packet *vsc_infopacket; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index bae3a146b2cc..89cc852cb27c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -546,9 +546,11 @@ static void dce112_get_pix_clk_dividers_helper ( switch (pix_clk_params->color_depth) { case COLOR_DEPTH_101010: actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2; + actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10; break; case COLOR_DEPTH_121212: actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2; + actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10; break; case COLOR_DEPTH_161616: actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 3ac6c7b65a45..e33fe0207b9e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2047,7 +2047,8 @@ static void dce110_setup_audio_dto( continue; if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A) continue; - if (pipe_ctx->stream_res.audio != NULL) { + if (pipe_ctx->stream_res.audio != NULL && + pipe_ctx->stream_res.audio->enabled == false) { struct audio_output audio_output; build_audio_output(context, pipe_ctx, &audio_output); @@ -2075,7 +2076,8 @@ static void dce110_setup_audio_dto( if (!dc_is_dp_signal(pipe_ctx->stream->signal)) continue; - if (pipe_ctx->stream_res.audio != NULL) { + if (pipe_ctx->stream_res.audio != NULL && + pipe_ctx->stream_res.audio->enabled == false) { struct audio_output audio_output; build_audio_output(context, pipe_ctx, &audio_output); diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 8f362e8c1787..be6d43c9979c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -361,7 +361,8 @@ static const struct dce_audio_registers audio_regs[] = { audio_regs(2), audio_regs(3), audio_regs(4), - audio_regs(5) + audio_regs(5), + audio_regs(6), }; #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 532f6a1145b5..71a85c5306ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2387,14 +2387,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) &blnd_cfg.black_color); } - if (per_pixel_alpha) - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; - else - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; - blnd_cfg.overlap_only = false; blnd_cfg.global_gain = 0xff; + if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; + blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; + } else if (per_pixel_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; + } else { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; + } + if (pipe_ctx->plane_state->global_alpha) blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; else @@ -3607,7 +3611,7 @@ void dcn10_calc_vupdate_position( { const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing; int vline_int_offset_from_vupdate = - pipe_ctx->stream->periodic_interrupt0.lines_offset; + pipe_ctx->stream->periodic_interrupt.lines_offset; int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); int start_position; @@ -3632,18 +3636,10 @@ void dcn10_calc_vupdate_position( static void dcn10_cal_vline_position( struct dc *dc, struct pipe_ctx *pipe_ctx, - enum vline_select vline, uint32_t *start_line, uint32_t *end_line) { - enum vertical_interrupt_ref_point ref_point = INVALID_POINT; - - if (vline == VLINE0) - ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point; - else if (vline == VLINE1) - ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point; - - switch (ref_point) { + switch (pipe_ctx->stream->periodic_interrupt.ref_point) { case START_V_UPDATE: dcn10_calc_vupdate_position( dc, @@ -3652,7 +3648,9 @@ static void dcn10_cal_vline_position( end_line); break; case START_V_SYNC: - // Suppose to do nothing because vsync is 0; + // vsync is line 0 so start_line is just the requested line offset + *start_line = pipe_ctx->stream->periodic_interrupt.lines_offset; + *end_line = *start_line + 2; break; default: ASSERT(0); @@ -3662,24 +3660,15 @@ static void dcn10_cal_vline_position( void dcn10_setup_periodic_interrupt( struct dc *dc, - struct pipe_ctx *pipe_ctx, - enum vline_select vline) + struct pipe_ctx *pipe_ctx) { struct timing_generator *tg = pipe_ctx->stream_res.tg; + uint32_t start_line = 0; + uint32_t end_line = 0; - if (vline == VLINE0) { - uint32_t start_line = 0; - uint32_t end_line = 0; + dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line); - dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line); - - tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line); - - } else if (vline == VLINE1) { - pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1( - tg, - pipe_ctx->stream->periodic_interrupt1.lines_offset); - } + tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line); } void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index e5691e499023..81b5057d5ff1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -174,8 +174,7 @@ void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx); void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx); void dcn10_setup_periodic_interrupt( struct dc *dc, - struct pipe_ctx *pipe_ctx, - enum vline_select vline); + struct pipe_ctx *pipe_ctx); enum dc_status dcn10_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index 3fcd408e9103..855682590c1b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -125,6 +125,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) while (tmp_mpcc != NULL) { if (tmp_mpcc->dpp_id == dpp_id) return tmp_mpcc; + + /* avoid circular linked list */ + ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot); + if (tmp_mpcc == tmp_mpcc->mpcc_bot) + break; + tmp_mpcc = tmp_mpcc->mpcc_bot; } return NULL; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 800be2693fac..963d72f96dca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -464,6 +464,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable) OTG_CLOCK_ON, 1, 1, 1000); } else { + + //last chance to clear underflow, otherwise, it will always there due to clock is off. + if (optc->funcs->is_optc_underflow_occurred(optc) == true) + optc->funcs->clear_optc_underflow(optc); + REG_UPDATE_2(OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, 0, OTG_CLOCK_EN, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 79a2b9c785f0..c6c4888c6665 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1481,6 +1481,7 @@ static void dcn20_update_dchubp_dpp( /* Any updates are handled in dc interface, just need * to apply existing for plane enable / opp change */ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed + || pipe_ctx->update_flags.bits.plane_changed || pipe_ctx->stream->update_flags.bits.gamut_remap || pipe_ctx->stream->update_flags.bits.out_csc) { #if defined(CONFIG_DRM_AMD_DC_DCN3_0) @@ -1745,7 +1746,7 @@ void dcn20_post_unlock_program_front_end( for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000 && hubp->funcs->hubp_is_flip_pending(hubp); j++) - mdelay(1); + udelay(1); } } @@ -2270,14 +2271,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx, &blnd_cfg.black_color); } - if (per_pixel_alpha) - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; - else - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; - blnd_cfg.overlap_only = false; blnd_cfg.global_gain = 0xff; + if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; + blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; + } else if (per_pixel_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; + } else { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; + } + if (pipe_ctx->plane_state->global_alpha) blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; else diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 99cc095dc33c..a701ea56c0aa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -533,6 +533,12 @@ struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) while (tmp_mpcc != NULL) { if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id) return tmp_mpcc; + + /* avoid circular linked list */ + ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot); + if (tmp_mpcc == tmp_mpcc->mpcc_bot) + break; + tmp_mpcc = tmp_mpcc->mpcc_bot; } return NULL; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 7ed4d7c8734f..01c4e8753294 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -1267,6 +1267,7 @@ static struct clock_source *dcn21_clock_source_create( return &clk_src->base; } + kfree(clk_src); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c index af462fe4260d..b0fd8859bd2f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c @@ -86,7 +86,7 @@ bool hubp3_program_surface_flip_and_addr( VMID, address->vmid); if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) { - REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1); + REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0); REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1); } else { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 2663f1b31842..e427f4ffa080 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -6653,8 +6653,7 @@ static double CalculateUrgentLatency( return ret; } - -static void UseMinimumDCFCLK( +static noinline_for_stack void UseMinimumDCFCLK( struct display_mode_lib *mode_lib, int MaxInterDCNTileRepeaters, int MaxPrefetchMode, diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index 92280cc05e2d..dae8e489c8cf 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -53,8 +53,8 @@ */ struct gpio_service *dal_gpio_service_create( - enum dce_version dce_version_major, - enum dce_version dce_version_minor, + enum dce_version dce_version, + enum dce_environment dce_environment, struct dc_context *ctx) { struct gpio_service *service; @@ -67,14 +67,14 @@ struct gpio_service *dal_gpio_service_create( return NULL; } - if (!dal_hw_translate_init(&service->translate, dce_version_major, - dce_version_minor)) { + if (!dal_hw_translate_init(&service->translate, dce_version, + dce_environment)) { BREAK_TO_DEBUGGER(); goto failure_1; } - if (!dal_hw_factory_init(&service->factory, dce_version_major, - dce_version_minor)) { + if (!dal_hw_factory_init(&service->factory, dce_version, + dce_environment)) { BREAK_TO_DEBUGGER(); goto failure_1; } diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 64c1be818b0e..3165a66c5362 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -32,11 +32,6 @@ #include "inc/hw/link_encoder.h" #include "core_status.h" -enum vline_select { - VLINE0, - VLINE1 -}; - struct pipe_ctx; struct dc_state; struct dc_stream_status; @@ -112,8 +107,7 @@ struct hw_sequencer_funcs { int group_index, int group_size, struct pipe_ctx *grouped_pipes[]); void (*setup_periodic_interrupt)(struct dc *dc, - struct pipe_ctx *pipe_ctx, - enum vline_select vline); + struct pipe_ctx *pipe_ctx); void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, unsigned int vmin, unsigned int vmax, unsigned int vmid, unsigned int vmid_frame_number); diff --git a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h index 9c55d247227e..7e3240e73c1f 100644 --- a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h +++ b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h @@ -42,8 +42,8 @@ void dal_gpio_destroy( struct gpio **ptr); struct gpio_service *dal_gpio_service_create( - enum dce_version dce_version_major, - enum dce_version dce_version_minor, + enum dce_version dce_version, + enum dce_environment dce_environment, struct dc_context *ctx); struct gpio *dal_gpio_service_create_irq( diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 09bc2c249e1a..3c4390d71a82 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1524,6 +1524,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num, struct fixed31_32 lut2; struct fixed31_32 delta_lut; struct fixed31_32 delta_index; + const struct fixed31_32 one = dc_fixpt_from_int(1); i = 0; /* fixed_pt library has problems handling too small values */ @@ -1552,6 +1553,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num, } else hw_x = coordinates_x[i].x; + if (dc_fixpt_le(one, hw_x)) + hw_x = one; + norm_x = dc_fixpt_mul(norm_factor, hw_x); index = dc_fixpt_floor(norm_x); if (index < 0 || index > 255) diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 0fdf7a3e96de..96e18050a617 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -100,7 +100,8 @@ enum vsc_packet_revision { //PB7 = MD0 #define MASK_VTEM_MD0__VRR_EN 0x01 #define MASK_VTEM_MD0__M_CONST 0x02 -#define MASK_VTEM_MD0__RESERVED2 0x0C +#define MASK_VTEM_MD0__QMS_EN 0x04 +#define MASK_VTEM_MD0__RESERVED2 0x08 #define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0 //MD1 @@ -109,7 +110,7 @@ enum vsc_packet_revision { //MD2 #define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03 #define MASK_VTEM_MD2__RB 0x04 -#define MASK_VTEM_MD2__RESERVED3 0xF8 +#define MASK_VTEM_MD2__NEXT_TFR 0xF8 //MD3 #define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index e6f40ee9f313..9d97938bd49e 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -709,13 +709,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, hwmgr->display_config->num_display > 3 ? - data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk : + (data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk / 100) : min_mclk, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinSocclkByFreq, - data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk, + data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk / 100, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinVcn, @@ -728,11 +728,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, - data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk, + data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk / 100, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxSocclkByFreq, - data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk, + data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk / 100, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxVcn, diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c index 4b3faaccecb9..c8a5a5698edd 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c @@ -1609,19 +1609,7 @@ static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate) static u8 kv_get_acp_boot_level(struct amdgpu_device *adev) { - u8 i; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - - for (i = 0; i < table->count; i++) { - if (table->entries[i].clk >= 0) /* XXX */ - break; - } - - if (i >= table->count) - i = table->count - 1; - - return i; + return 0; } static void kv_update_acp_boot_level(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c index a1e7ba5995c5..d6544a6dabc7 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c @@ -7250,17 +7250,15 @@ static int si_parse_power_table(struct amdgpu_device *adev) if (!adev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; - for (i = 0; i < state_array->ucNumEntries; i++) { + for (adev->pm.dpm.num_ps = 0, i = 0; i < state_array->ucNumEntries; i++) { u8 *idx; power_state = (union pplib_power_state *)power_state_offset; non_clock_array_index = power_state->v2.nonClockInfoIndex; non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) &non_clock_info_array->nonClockInfo[non_clock_array_index]; ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL); - if (ps == NULL) { - kfree(adev->pm.dpm.ps); + if (ps == NULL) return -ENOMEM; - } adev->pm.dpm.ps[i].ps_priv = ps; si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], non_clock_info, @@ -7282,8 +7280,8 @@ static int si_parse_power_table(struct amdgpu_device *adev) k++; } power_state_offset += 2 + power_state->v2.ucNumDPMLevels; + adev->pm.dpm.num_ps++; } - adev->pm.dpm.num_ps = state_array->ucNumEntries; /* fill in the vce power states */ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 1c526cb239e0..3a31058b029e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -379,16 +379,31 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu) return 0; } -static int arcturus_check_powerplay_table(struct smu_context *smu) +static void arcturus_check_bxco_support(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table; struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + uint32_t val; if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO || - powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) - smu_baco->platform_support = true; + powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) { + val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); + smu_baco->platform_support = + (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : + false; + } +} + +static int arcturus_check_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_11_0_powerplay_table *powerplay_table = + table_context->power_play_table; + + arcturus_check_bxco_support(smu); table_context->thermal_controller_type = powerplay_table->thermal_controller_type; @@ -2131,13 +2146,11 @@ static void arcturus_get_unique_id(struct smu_context *smu) static bool arcturus_is_baco_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - uint32_t val; if (!smu_v11_0_baco_is_support(smu) || amdgpu_sriov_vf(adev)) return false; - val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); - return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false; + return true; } static int arcturus_set_df_cstate(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 2937784bc824..a7773b6453d5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -338,19 +338,34 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, return 0; } -static int navi10_check_powerplay_table(struct smu_context *smu) +static void navi10_check_bxco_support(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table; struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + uint32_t val; + + if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO || + powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) { + val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); + smu_baco->platform_support = + (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : + false; + } +} + +static int navi10_check_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_11_0_powerplay_table *powerplay_table = + table_context->power_play_table; if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC) smu->dc_controlled_by_gpio = true; - if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO || - powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) - smu_baco->platform_support = true; + navi10_check_bxco_support(smu); table_context->thermal_controller_type = powerplay_table->thermal_controller_type; @@ -1948,13 +1963,11 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu, static bool navi10_is_baco_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - uint32_t val; if (amdgpu_sriov_vf(adev) || (!smu_v11_0_baco_is_support(smu))) return false; - val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); - return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false; + return true; } static int navi10_set_default_od_settings(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 8556c229ff59..45c815262200 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -294,16 +294,47 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu, return 0; } -static int sienna_cichlid_check_powerplay_table(struct smu_context *smu) +static void sienna_cichlid_check_bxco_support(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; struct smu_11_0_7_powerplay_table *powerplay_table = table_context->power_play_table; struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + uint32_t val; if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO || - powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO) - smu_baco->platform_support = true; + powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO) { + val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); + smu_baco->platform_support = + (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : + false; + + /* + * Disable BACO entry/exit completely on below SKUs to + * avoid hardware intermittent failures. + */ + if (((adev->pdev->device == 0x73A1) && + (adev->pdev->revision == 0x00)) || + ((adev->pdev->device == 0x73BF) && + (adev->pdev->revision == 0xCF)) || + ((adev->pdev->device == 0x7422) && + (adev->pdev->revision == 0x00))) + smu_baco->platform_support = false; + + } +} + +static int sienna_cichlid_check_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_11_0_7_powerplay_table *powerplay_table = + table_context->power_play_table; + + if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_HARDWAREDC) + smu->dc_controlled_by_gpio = true; + + sienna_cichlid_check_bxco_support(smu); table_context->thermal_controller_type = powerplay_table->thermal_controller_type; @@ -1736,13 +1767,11 @@ static int sienna_cichlid_run_btc(struct smu_context *smu) static bool sienna_cichlid_is_baco_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - uint32_t val; if (amdgpu_sriov_vf(adev) || (!smu_v11_0_baco_is_support(smu))) return false; - val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); - return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false; + return true; } static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu) @@ -2759,6 +2788,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .dump_pptable = sienna_cichlid_dump_pptable, .init_microcode = smu_v11_0_init_microcode, .load_microcode = smu_v11_0_load_microcode, + .fini_microcode = smu_v11_0_fini_microcode, .init_smc_tables = sienna_cichlid_init_smc_tables, .fini_smc_tables = smu_v11_0_fini_smc_tables, .init_power = smu_v11_0_init_power, @@ -2805,6 +2835,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq, .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, .run_btc = sienna_cichlid_run_btc, + .set_power_source = smu_v11_0_set_power_source, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, .get_gpu_metrics = sienna_cichlid_get_gpu_metrics, diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c index 98e915e325dd..bc3f42e915e9 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c @@ -264,6 +264,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms, formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl, layer->layer_type, &n_formats); + if (!formats) { + kfree(kplane); + return -ENOMEM; + } err = drm_universal_plane_init(&kms->base, plane, get_possible_crtcs(kms, c->pipeline), @@ -274,8 +278,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms, komeda_put_fourcc_list(formats); - if (err) - goto cleanup; + if (err) { + kfree(kplane); + return err; + } drm_plane_helper_add(plane, &komeda_plane_helper_funcs); diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c index 587d94798f5c..af729094260c 100644 --- a/drivers/gpu/drm/arm/malidp_crtc.c +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -483,7 +483,10 @@ static void malidp_crtc_reset(struct drm_crtc *crtc) if (crtc->state) malidp_crtc_destroy_state(crtc, crtc->state); - __drm_atomic_helper_crtc_reset(crtc, &state->base); + if (state) + __drm_atomic_helper_crtc_reset(crtc, &state->base); + else + __drm_atomic_helper_crtc_reset(crtc, NULL); } static int malidp_crtc_enable_vblank(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index a0f6ee15c248..711061bf3eb7 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -386,10 +386,7 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1); #else static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) { - unsigned int offset = adv7511->type == ADV7533 ? - ADV7533_REG_CEC_OFFSET : 0; - - regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, + regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, ADV7511_CEC_CTRL_POWER_DOWN); return 0; } diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c index a20a45c0b353..ddd1305b82b2 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c @@ -316,7 +316,7 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) goto err_cec_alloc; } - regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0); + regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, 0); /* cec soft reset */ regmap_write(adv7511->regmap_cec, ADV7511_REG_CEC_SOFT_RESET + offset, 0x01); @@ -343,7 +343,7 @@ err_cec_alloc: dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n", ret); err_cec_parse_dt: - regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, + regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, ADV7511_CEC_CTRL_POWER_DOWN); return ret == -EPROBE_DEFER ? ret : 0; } diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 338c6a2ad322..7f3000152b78 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -1066,6 +1066,10 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv) ADV7511_CEC_I2C_ADDR_DEFAULT); if (IS_ERR(adv->i2c_cec)) return PTR_ERR(adv->i2c_cec); + + regmap_write(adv->regmap, ADV7511_REG_CEC_I2C_ADDR, + adv->i2c_cec->addr << 1); + i2c_set_clientdata(adv->i2c_cec, adv); adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec, @@ -1270,9 +1274,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) if (ret) goto err_i2c_unregister_packet; - regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, - adv7511->i2c_cec->addr << 1); - INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work); if (i2c->irq) { @@ -1309,6 +1310,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) return 0; err_unregister_cec: + cec_unregister_adapter(adv7511->cec_adap); i2c_unregister_device(adv7511->i2c_cec); if (adv7511->cec_clk) clk_disable_unprepare(adv7511->cec_clk); @@ -1382,10 +1384,21 @@ static struct i2c_driver adv7511_driver = { static int __init adv7511_init(void) { - if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) - mipi_dsi_driver_register(&adv7533_dsi_driver); + int ret; - return i2c_add_driver(&adv7511_driver); + if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) { + ret = mipi_dsi_driver_register(&adv7533_dsi_driver); + if (ret) + return ret; + } + + ret = i2c_add_driver(&adv7511_driver); + if (ret) { + if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) + mipi_dsi_driver_unregister(&adv7533_dsi_driver); + } + + return ret; } module_init(adv7511_init); diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index fa0fb7bb3458..4784347a5f44 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1529,6 +1529,25 @@ static void analogix_dp_bridge_detach(struct drm_bridge *bridge) dp->plat_data->detach(dp->plat_data, bridge); } +static +struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp, + struct drm_atomic_state *state) +{ + struct drm_encoder *encoder = dp->encoder; + struct drm_connector *connector; + struct drm_connector_state *conn_state; + + connector = drm_atomic_get_old_connector_for_encoder(state, encoder); + if (!connector) + return NULL; + + conn_state = drm_atomic_get_old_connector_state(state, connector); + if (!conn_state) + return NULL; + + return conn_state->crtc; +} + static struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp, struct drm_atomic_state *state) @@ -1713,14 +1732,16 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, { struct drm_atomic_state *old_state = old_bridge_state->base.state; struct analogix_dp_device *dp = bridge->driver_private; - struct drm_crtc *crtc; + struct drm_crtc *old_crtc, *new_crtc; + struct drm_crtc_state *old_crtc_state = NULL; struct drm_crtc_state *new_crtc_state = NULL; + int ret; - crtc = analogix_dp_get_new_crtc(dp, old_state); - if (!crtc) + new_crtc = analogix_dp_get_new_crtc(dp, old_state); + if (!new_crtc) goto out; - new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc); + new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_crtc); if (!new_crtc_state) goto out; @@ -1729,6 +1750,19 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, return; out: + old_crtc = analogix_dp_get_old_crtc(dp, old_state); + if (old_crtc) { + old_crtc_state = drm_atomic_get_old_crtc_state(old_state, + old_crtc); + + /* When moving from PSR to fully disabled, exit PSR first. */ + if (old_crtc_state && old_crtc_state->self_refresh_active) { + ret = analogix_dp_disable_psr(dp); + if (ret) + DRM_ERROR("Failed to disable psr (%d)\n", ret); + } + } + analogix_dp_bridge_disable(bridge); } @@ -2026,8 +2060,19 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct analogix_dp_device *dp = to_dp(aux); + int ret; - return analogix_dp_transfer(dp, msg); + pm_runtime_get_sync(dp->dev); + + ret = analogix_dp_detect_hpd(dp); + if (ret) + goto out; + + ret = analogix_dp_transfer(dp, msg); +out: + pm_runtime_put(dp->dev); + + return ret; } int analogix_dp_audio_hw_params(struct analogix_dp_device *dp, diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index 29b1ce2140ab..1dcc28a4d853 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -816,13 +816,14 @@ static int lt9611_connector_init(struct drm_bridge *bridge, struct lt9611 *lt961 drm_connector_helper_add(<9611->connector, <9611_bridge_connector_helper_funcs); - drm_connector_attach_encoder(<9611->connector, bridge->encoder); if (!bridge->encoder) { DRM_ERROR("Parent encoder object not found"); return -ENODEV; } + drm_connector_attach_encoder(<9611->connector, bridge->encoder); + return 0; } diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c index cce98bf2a4e7..72248a565579 100644 --- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c +++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c @@ -296,7 +296,9 @@ static void ge_b850v3_lvds_remove(void) * This check is to avoid both the drivers * removing the bridge in their remove() function */ - if (!ge_b850v3_lvds_ptr) + if (!ge_b850v3_lvds_ptr || + !ge_b850v3_lvds_ptr->stdp2690_i2c || + !ge_b850v3_lvds_ptr->stdp4028_i2c) goto out; drm_bridge_remove(&ge_b850v3_lvds_ptr->bridge); diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c index 7bd0affa057a..924851010400 100644 --- a/drivers/gpu/drm/bridge/parade-ps8640.c +++ b/drivers/gpu/drm/bridge/parade-ps8640.c @@ -333,8 +333,8 @@ static int ps8640_probe(struct i2c_client *client) if (IS_ERR(ps_bridge->panel_bridge)) return PTR_ERR(ps_bridge->panel_bridge); - ps_bridge->supplies[0].supply = "vdd33"; - ps_bridge->supplies[1].supply = "vdd12"; + ps_bridge->supplies[0].supply = "vdd12"; + ps_bridge->supplies[1].supply = "vdd33"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies), ps_bridge->supplies); if (ret) diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index ec7745c31da0..ab0bce4a988c 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -605,7 +605,7 @@ static void *sii8620_burst_get_tx_buf(struct sii8620 *ctx, int len) u8 *buf = &ctx->burst.tx_buf[ctx->burst.tx_count]; int size = len + 2; - if (ctx->burst.tx_count + size > ARRAY_SIZE(ctx->burst.tx_buf)) { + if (ctx->burst.tx_count + size >= ARRAY_SIZE(ctx->burst.tx_buf)) { dev_err(ctx->dev, "TX-BLK buffer exhausted\n"); ctx->error = -EINVAL; return NULL; @@ -622,7 +622,7 @@ static u8 *sii8620_burst_get_rx_buf(struct sii8620 *ctx, int len) u8 *buf = &ctx->burst.rx_buf[ctx->burst.rx_count]; int size = len + 1; - if (ctx->burst.tx_count + size > ARRAY_SIZE(ctx->burst.tx_buf)) { + if (ctx->burst.rx_count + size >= ARRAY_SIZE(ctx->burst.rx_buf)) { dev_err(ctx->dev, "RX-BLK buffer exhausted\n"); ctx->error = -EINVAL; return NULL; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 686d48011e37..5b45fe44579f 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -3650,6 +3650,9 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, * if supported. In any case the default RGB888 format is added */ + /* Default 8bit RGB fallback */ + output_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24; + if (max_bpc >= 16 && info->bpc == 16) { if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444) output_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48; @@ -3683,9 +3686,6 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444) output_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24; - /* Default 8bit RGB fallback */ - output_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24; - *num_output_fmts = i; return output_fmts; diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 34a3e4e9f717..b4f7e7a7f7c5 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1535,19 +1535,12 @@ static irqreturn_t tc_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) +static int tc_probe_edp_bridge_endpoint(struct tc_data *tc) { - struct device *dev = &client->dev; + struct device *dev = tc->dev; struct drm_panel *panel; - struct tc_data *tc; int ret; - tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL); - if (!tc) - return -ENOMEM; - - tc->dev = dev; - /* port@2 is the output port */ ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, NULL); if (ret && ret != -ENODEV) @@ -1566,6 +1559,50 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) tc->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; } + return 0; +} + +static void tc_clk_disable(void *data) +{ + struct clk *refclk = data; + + clk_disable_unprepare(refclk); +} + +static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct tc_data *tc; + int ret; + + tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL); + if (!tc) + return -ENOMEM; + + tc->dev = dev; + + ret = tc_probe_edp_bridge_endpoint(tc); + if (ret) + return ret; + + tc->refclk = devm_clk_get(dev, "ref"); + if (IS_ERR(tc->refclk)) { + ret = PTR_ERR(tc->refclk); + dev_err(dev, "Failed to get refclk: %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(tc->refclk); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, tc_clk_disable, tc->refclk); + if (ret) + return ret; + + /* tRSTW = 100 cycles , at 13 MHz that is ~7.69 us */ + usleep_range(10, 15); + /* Shut down GPIO is optional */ tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH); if (IS_ERR(tc->sd_gpio)) @@ -1586,13 +1623,6 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) usleep_range(5000, 10000); } - tc->refclk = devm_clk_get(dev, "ref"); - if (IS_ERR(tc->refclk)) { - ret = PTR_ERR(tc->refclk); - dev_err(dev, "Failed to get refclk: %d\n", ret); - return ret; - } - tc->regmap = devm_regmap_init_i2c(client, &tc_regmap_config); if (IS_ERR(tc->regmap)) { ret = PTR_ERR(tc->regmap); diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 1a58481037b3..77a447a3fb1d 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -621,9 +621,9 @@ static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata) &pdata->bridge.encoder->crtc->state->adjusted_mode; u8 hsync_polarity = 0, vsync_polarity = 0; - if (mode->flags & DRM_MODE_FLAG_PHSYNC) + if (mode->flags & DRM_MODE_FLAG_NHSYNC) hsync_polarity = CHA_HSYNC_POLARITY; - if (mode->flags & DRM_MODE_FLAG_PVSYNC) + if (mode->flags & DRM_MODE_FLAG_NVSYNC) vsync_polarity = CHA_VSYNC_POLARITY; ti_sn_bridge_write_u16(pdata, SN_CHA_ACTIVE_LINE_LENGTH_LOW_REG, diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 9418d0847630..4108c7265d53 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -999,9 +999,19 @@ crtc_needs_disable(struct drm_crtc_state *old_state, return drm_atomic_crtc_effectively_active(old_state); /* - * We need to run through the crtc_funcs->disable() function if the CRTC - * is currently on, if it's transitioning to self refresh mode, or if - * it's in self refresh mode and needs to be fully disabled. + * We need to disable bridge(s) and CRTC if we're transitioning out of + * self-refresh and changing CRTCs at the same time, because the + * bridge tracks self-refresh status via CRTC state. + */ + if (old_state->self_refresh_active && + old_state->crtc != new_state->crtc) + return true; + + /* + * We also need to run through the crtc_funcs->disable() function if + * the CRTC is currently on, if it's transitioning to self refresh + * mode, or if it's in self refresh mode and needs to be fully + * disabled. */ return old_state->active || (old_state->self_refresh_active && !new_state->enable) || diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 044acd07c153..d799ec14fd7f 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -753,8 +753,8 @@ static int select_bus_fmt_recursive(struct drm_bridge *first_bridge, struct drm_connector_state *conn_state, u32 out_bus_fmt) { + unsigned int i, num_in_bus_fmts = 0; struct drm_bridge_state *cur_state; - unsigned int num_in_bus_fmts, i; struct drm_bridge *prev_bridge; u32 *in_bus_fmts; int ret; @@ -875,7 +875,7 @@ drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge, struct drm_connector *conn = conn_state->connector; struct drm_encoder *encoder = bridge->encoder; struct drm_bridge_state *last_bridge_state; - unsigned int i, num_out_bus_fmts; + unsigned int i, num_out_bus_fmts = 0; struct drm_bridge *last_bridge; u32 *out_bus_fmts; int ret = 0; diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h index 25ce42e79995..61e09f8a8d0f 100644 --- a/drivers/gpu/drm/drm_crtc_helper_internal.h +++ b/drivers/gpu/drm/drm_crtc_helper_internal.h @@ -32,16 +32,6 @@ #include #include -/* drm_fb_helper.c */ -#ifdef CONFIG_DRM_FBDEV_EMULATION -int drm_fb_helper_modinit(void); -#else -static inline int drm_fb_helper_modinit(void) -{ - return 0; -} -#endif - /* drm_dp_aux_dev.c */ #ifdef CONFIG_DRM_DP_AUX_CHARDEV int drm_dp_aux_dev_init(void); diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 1c9ea9f7fdaf..f2ff0bfdf54d 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c @@ -62,23 +62,45 @@ ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, u8 offset, void *buffer, size_t size) { + u8 zero = 0; + char *tmpbuf = NULL; + /* + * As sub-addressing is not supported by all adaptors, + * always explicitly read from the start and discard + * any bytes that come before the requested offset. + * This way, no matter whether the adaptor supports it + * or not, we'll end up reading the proper data. + */ struct i2c_msg msgs[] = { { .addr = DP_DUAL_MODE_SLAVE_ADDRESS, .flags = 0, .len = 1, - .buf = &offset, + .buf = &zero, }, { .addr = DP_DUAL_MODE_SLAVE_ADDRESS, .flags = I2C_M_RD, - .len = size, + .len = size + offset, .buf = buffer, }, }; int ret; + if (offset) { + tmpbuf = kmalloc(size + offset, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; + + msgs[1].buf = tmpbuf; + } + ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs)); + if (tmpbuf) + memcpy(buffer, tmpbuf + offset, size); + + kfree(tmpbuf); + if (ret < 0) return ret; if (ret != ARRAY_SIZE(msgs)) @@ -205,18 +227,6 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) if (ret) return DRM_DP_DUAL_MODE_UNKNOWN; - /* - * Sigh. Some (maybe all?) type 1 adaptors are broken and ack - * the offset but ignore it, and instead they just always return - * data from the start of the HDMI ID buffer. So for a broken - * type 1 HDMI adaptor a single byte read will always give us - * 0x44, and for a type 1 DVI adaptor it should give 0x00 - * (assuming it implements any registers). Fortunately neither - * of those values will match the type 2 signature of the - * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with - * the type 2 adaptor detection safely even in the presence - * of broken type 1 adaptors. - */ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID, &adaptor_id, sizeof(adaptor_id)); DRM_DEBUG_KMS("DP dual mode adaptor ID: %02x (err %zd)\n", @@ -231,11 +241,10 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) return DRM_DP_DUAL_MODE_TYPE2_DVI; } /* - * If neither a proper type 1 ID nor a broken type 1 adaptor - * as described above, assume type 1, but let the user know - * that we may have misdetected the type. + * If not a proper type 1 ID, still assume type 1, but let + * the user know that we may have misdetected the type. */ - if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0]) + if (!is_type1_adaptor(adaptor_id)) DRM_ERROR("Unexpected DP dual mode adaptor ID %02x\n", adaptor_id); @@ -339,10 +348,8 @@ EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output); * @enable: enable (as opposed to disable) the TMDS output buffers * * Set the state of the TMDS output buffers in the adaptor. For - * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As - * some type 1 adaptors have problems with registers (see comments - * in drm_dp_dual_mode_detect()) we avoid touching the register, - * making this function a no-op on type 1 adaptors. + * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. + * Type1 adaptors do not support any register writes. * * Returns: * 0 on success, negative error code on failure diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 3c55753bab16..6ba16db77500 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -2172,17 +2172,8 @@ int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux, struct drm_dp_phy_test_params *data, u8 dp_rev) { int err, i; - u8 link_config[2]; u8 test_pattern; - link_config[0] = drm_dp_link_rate_to_bw_code(data->link_rate); - link_config[1] = data->num_lanes; - if (data->enhanced_frame_cap) - link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; - err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, link_config, 2); - if (err < 0) - return err; - test_pattern = data->phy_pattern; if (dp_rev < 0x12) { test_pattern = (test_pattern << 2) & diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 1f54e9470165..0feeac52e4eb 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -4792,6 +4792,7 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr, mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port); drm_edid_get_monitor_name(mst_edid, name, namelen); + kfree(mst_edid); } /** @@ -4855,14 +4856,14 @@ void drm_dp_mst_dump_topology(struct seq_file *m, seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf); ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); - if (ret) { + if (ret != 2) { seq_printf(m, "faux/mst read failed\n"); goto out; } seq_printf(m, "faux/mst: %*ph\n", 2, buf); ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); - if (ret) { + if (ret != 1) { seq_printf(m, "mst ctrl read failed\n"); goto out; } @@ -4870,7 +4871,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m, /* dump the standard OUI branch header */ ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); - if (ret) { + if (ret != DP_BRANCH_OUI_HEADER_SIZE) { seq_printf(m, "branch oui read failed\n"); goto out; } @@ -5237,7 +5238,7 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm mst_state = drm_atomic_get_mst_topology_state(state, mgr); if (IS_ERR(mst_state)) - return -EINVAL; + return PTR_ERR(mst_state); list_for_each_entry(pos, &mst_state->vcpis, next) { diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 2620182322f8..879fccd43390 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -610,7 +610,7 @@ static int drm_dev_init(struct drm_device *dev, mutex_init(&dev->clientlist_mutex); mutex_init(&dev->master_mutex); - ret = drmm_add_action(dev, drm_dev_init_release, NULL); + ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL); if (ret) return ret; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 4fdfb41b91e1..258b01e2f1b7 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1995,9 +1995,6 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, connector_bad_edid(connector, edid, edid[0x7e] + 1); - edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions; - edid[0x7e] = valid_extensions; - new = kmalloc_array(valid_extensions + 1, EDID_LENGTH, GFP_KERNEL); if (!new) @@ -2014,6 +2011,9 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, base += EDID_LENGTH; } + new[EDID_LENGTH - 1] += new[0x7e] - valid_extensions; + new[0x7e] = valid_extensions; + kfree(edid); edid = new; } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 8033467db4be..ac5d61e65124 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -2271,24 +2271,3 @@ void drm_fbdev_generic_setup(struct drm_device *dev, drm_client_register(&fb_helper->client); } EXPORT_SYMBOL(drm_fbdev_generic_setup); - -/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT) - * but the module doesn't depend on any fb console symbols. At least - * attempt to load fbcon to avoid leaving the system without a usable console. - */ -int __init drm_fb_helper_modinit(void) -{ -#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT) - const char name[] = "fbcon"; - struct module *fbcon; - - mutex_lock(&module_mutex); - fbcon = find_module(name); - mutex_unlock(&module_mutex); - - if (!fbcon) - request_module_nowait(name); -#endif - return 0; -} -EXPORT_SYMBOL(drm_fb_helper_modinit); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 09d4f8922539..8b30e8d83fbc 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1262,7 +1262,7 @@ retry: ret = dma_resv_lock_slow_interruptible(obj->resv, acquire_ctx); if (ret) { - ww_acquire_done(acquire_ctx); + ww_acquire_fini(acquire_ctx); return ret; } } @@ -1287,7 +1287,7 @@ retry: goto retry; } - ww_acquire_done(acquire_ctx); + ww_acquire_fini(acquire_ctx); return ret; } } diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index cfacce0418a4..c56656a95cf9 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -563,12 +563,20 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - int ret; WARN_ON(shmem->base.import_attach); - ret = drm_gem_shmem_get_pages(shmem); - WARN_ON_ONCE(ret != 0); + mutex_lock(&shmem->pages_lock); + + /* + * We should have already pinned the pages when the buffer was first + * mmap'd, vm_open() just grabs an additional reference for the new + * mm the vma is getting copied into (ie. on fork()). + */ + if (!WARN_ON_ONCE(!shmem->pages_use_count)) + shmem->pages_use_count++; + + mutex_unlock(&shmem->pages_lock); drm_gem_vm_open(vma); } @@ -616,10 +624,8 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) shmem = to_drm_gem_shmem_obj(obj); ret = drm_gem_shmem_get_pages(shmem); - if (ret) { - drm_gem_vm_close(vma); + if (ret) return ret; - } vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index f80e0f28087d..41efe40bc70f 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -116,7 +116,8 @@ static inline void drm_vblank_flush_worker(struct drm_vblank_crtc *vblank) static inline void drm_vblank_destroy_worker(struct drm_vblank_crtc *vblank) { - kthread_destroy_worker(vblank->worker); + if (vblank->worker) + kthread_destroy_worker(vblank->worker); } int drm_vblank_worker_init(struct drm_vblank_crtc *vblank); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 83fc51b13615..73818ffa019b 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -473,7 +473,13 @@ EXPORT_SYMBOL(drm_invalid_op); */ static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value) { - int len; + size_t len; + + /* don't attempt to copy a NULL pointer */ + if (WARN_ONCE(!value, "BUG: the value to copy was not set!")) { + *buf_len = 0; + return 0; + } /* don't overflow userbuf */ len = strlen(value); diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c index 221a8528c993..f933da1656eb 100644 --- a/drivers/gpu/drm/drm_kms_helper_common.c +++ b/drivers/gpu/drm/drm_kms_helper_common.c @@ -64,19 +64,18 @@ MODULE_PARM_DESC(edid_firmware, static int __init drm_kms_helper_init(void) { - int ret; + /* + * The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT) + * but the module doesn't depend on any fb console symbols. At least + * attempt to load fbcon to avoid leaving the system without a usable + * console. + */ + if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) && + IS_MODULE(CONFIG_FRAMEBUFFER_CONSOLE) && + !IS_ENABLED(CONFIG_EXPERT)) + request_module_nowait("fbcon"); - /* Call init functions from specific kms helpers here */ - ret = drm_fb_helper_modinit(); - if (ret < 0) - goto out; - - ret = drm_dp_aux_dev_init(); - if (ret < 0) - goto out; - -out: - return ret; + return drm_dp_aux_dev_init(); } static void __exit drm_kms_helper_exit(void) diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index 230c4fd7131c..9f132229aed1 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -1137,6 +1137,13 @@ int mipi_dbi_spi_transfer(struct spi_device *spi, u32 speed_hz, size_t chunk; int ret; + /* In __spi_validate, there's a validation that no partial transfers + * are accepted (xfer->len % w_size must be zero). + * Here we align max_chunk to multiple of 2 (16bits), + * to prevent transfers from being rejected. + */ + max_chunk = ALIGN_DOWN(max_chunk, 2); + spi_message_init_with_transfers(&m, &tr, 1); while (len) { diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index c73aaab95e0f..d95bf573a80b 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -300,6 +300,7 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); + mipi_dsi_detach(dsi); mipi_dsi_device_unregister(dsi); return 0; diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 448c2f2d803a..ca0fefeaab20 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -128,6 +128,18 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Acer Switch V 10 (SW5-017) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Anbernic Win600 */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Anbernic"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Win600"), + }, + .driver_data = (void *)&lcd720x1280_rightside_up, }, { /* Asus T100HA */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), @@ -166,6 +178,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"), }, .driver_data = (void *)&lcd720x1280_rightside_up, + }, { /* GPD Win Max */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1619-01"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* * GPD Pocket, note that the the DMI data is less generic then * it seems, devices with a board-vendor of "AMI Corporation" diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index affe1cfed009..24f643982903 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -186,6 +186,13 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, if (WARN_ON(config->num_total_plane >= 32)) return -EINVAL; + /* + * First driver to need more than 64 formats needs to fix this. Each + * format is encoded as a bit and the current code only supports a u64. + */ + if (WARN_ON(format_count > 64)) + return -EINVAL; + WARN_ON(drm_drv_uses_atomic_modeset(dev) && (!funcs->atomic_destroy_state || !funcs->atomic_duplicate_state)); @@ -207,13 +214,6 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, return -ENOMEM; } - /* - * First driver to need more than 64 formats needs to fix this. Each - * format is encoded as a bit and the current code only supports a u64. - */ - if (WARN_ON(format_count > 64)) - return -EINVAL; - if (format_modifiers) { const uint64_t *temp_modifiers = format_modifiers; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 984569a59a90..9ba2fe48228f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -282,6 +282,12 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context, mutex_lock(&context->lock); + /* Bail if the mapping has been reaped by another thread */ + if (!mapping->context) { + mutex_unlock(&context->lock); + return; + } + /* If the vram node is on the mm, unmap and remove the node */ if (mapping->vram_node.mm == &context->mm) etnaviv_iommu_remove_mapping(context, mapping); diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index f2d87a7445c7..1c04c232dce1 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -800,31 +800,40 @@ static int exynos7_decon_resume(struct device *dev) if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to prepare_enable the pclk [%d]\n", ret); - return ret; + goto err_pclk_enable; } ret = clk_prepare_enable(ctx->aclk); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to prepare_enable the aclk [%d]\n", ret); - return ret; + goto err_aclk_enable; } ret = clk_prepare_enable(ctx->eclk); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to prepare_enable the eclk [%d]\n", ret); - return ret; + goto err_eclk_enable; } ret = clk_prepare_enable(ctx->vclk); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to prepare_enable the vclk [%d]\n", ret); - return ret; + goto err_vclk_enable; } return 0; + +err_vclk_enable: + clk_disable_unprepare(ctx->eclk); +err_eclk_enable: + clk_disable_unprepare(ctx->aclk); +err_aclk_enable: + clk_disable_unprepare(ctx->pclk); +err_pclk_enable: + return ret; } #endif diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index 3df6d6e850f5..70148ae16f14 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -529,15 +529,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc, WARN_ON(drm_crtc_vblank_get(crtc) != 0); gma_crtc->page_flip_event = event; + spin_unlock_irqrestore(&dev->event_lock, flags); /* Call this locked if we want an event at vblank interrupt. */ ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); if (ret) { - gma_crtc->page_flip_event = NULL; - drm_crtc_vblank_put(crtc); + spin_lock_irqsave(&dev->event_lock, flags); + if (gma_crtc->page_flip_event) { + gma_crtc->page_flip_event = NULL; + drm_crtc_vblank_put(crtc); + } + spin_unlock_irqrestore(&dev->event_lock, flags); } - - spin_unlock_irqrestore(&dev->event_lock, flags); } else { ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); } diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 531c5485be17..bd3df5adb96f 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -536,14 +536,15 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe, struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) { - struct drm_crtc *crtc = NULL; + struct drm_crtc *crtc; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + if (gma_crtc->pipe == pipe) - break; + return crtc; } - return crtc; + return NULL; } int gma_connector_clones(struct drm_device *dev, int type_mask) diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig index 43943e980203..4e41c144a290 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig +++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_HISI_HIBMC tristate "DRM Support for Hisilicon Hibmc" - depends on DRM && PCI && ARM64 + depends on DRM && PCI && (ARM64 || COMPILE_TEST) + depends on MMU select DRM_KMS_HELPER select DRM_VRAM_HELPER select DRM_TTM diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 0bf31f9a8af5..e6780fcc5006 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -526,8 +526,8 @@ static int i915_dmc_info(struct seq_file *m, void *unused) * reg for DC3CO debugging and validation, * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter. */ - seq_printf(m, "DC3CO count: %d\n", - intel_de_read(dev_priv, DMC_DEBUG3)); + seq_printf(m, "DC3CO count: %d\n", intel_de_read(dev_priv, IS_DGFX(dev_priv) ? + DG1_DMC_DEBUG3 : TGL_DMC_DEBUG3)); } else { dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT : SKL_CSR_DC3_DC5_COUNT; diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index f2c8b56be9ea..261a5e97a0b4 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -163,6 +163,28 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) intel_dp_compute_rate(intel_dp, intel_dp->link_rate, &link_bw, &rate_select); + /* + * WaEdpLinkRateDataReload + * + * Parade PS8461E MUX (used on varius TGL+ laptops) needs + * to snoop the link rates reported by the sink when we + * use LINK_RATE_SET in order to operate in jitter cleaning + * mode (as opposed to redriver mode). Unfortunately it + * loses track of the snooped link rates when powered down, + * so we need to make it re-snoop often. Without this high + * link rates are not stable. + */ + if (!link_bw) { + struct intel_connector *connector = intel_dp->attached_connector; + __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; + + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link rates\n", + connector->base.base.id, connector->base.name); + + drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, + sink_rates, sizeof(sink_rates)); + } + if (link_bw) drm_dbg_kms(&i915->drm, "Using LINK_BW_SET value %02x\n", link_bw); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index ecaa538b2d35..ef7878193491 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -790,6 +790,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); if (ret) { + drm_dp_mst_put_port_malloc(port); intel_connector_free(intel_connector); return NULL; } diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index eed037ec0b29..8777d35ac7a7 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -121,9 +121,25 @@ struct i2c_adapter_lookup { #define ICL_GPIO_DDPA_CTRLCLK_2 8 #define ICL_GPIO_DDPA_CTRLDATA_2 9 -static enum port intel_dsi_seq_port_to_port(u8 port) +static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi, + u8 seq_port) { - return port ? PORT_C : PORT_A; + /* + * If single link DSI is being used on any port, the VBT sequence block + * send packet apparently always has 0 for the port. Just use the port + * we have configured, and ignore the sequence block port. + */ + if (hweight8(intel_dsi->ports) == 1) + return ffs(intel_dsi->ports) - 1; + + if (seq_port) { + if (intel_dsi->ports & PORT_B) + return PORT_B; + else if (intel_dsi->ports & PORT_C) + return PORT_C; + } + + return PORT_A; } static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, @@ -145,15 +161,10 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, seq_port = (flags >> MIPI_PORT_SHIFT) & 3; - /* For DSI single link on Port A & C, the seq_port value which is - * parsed from Sequence Block#53 of VBT has been set to 0 - * Now, read/write of packets for the DSI single link on Port A and - * Port C will based on the DVO port from VBT block 2. - */ - if (intel_dsi->ports == (1 << PORT_C)) - port = PORT_C; - else - port = intel_dsi_seq_port_to_port(seq_port); + port = intel_dsi_seq_port_to_port(intel_dsi, seq_port); + + if (drm_WARN_ON(&dev_priv->drm, !intel_dsi->dsi_hosts[port])) + goto out; dsi_device = intel_dsi->dsi_hosts[port]->device; if (!dsi_device) { diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 6d083b98f6ae..abff2d6cedd1 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -376,21 +376,6 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, return -EINVAL; } - /* - * The port numbering and mapping here is bizarre. The now-obsolete - * swsci spec supports ports numbered [0..4]. Port E is handled as a - * special case, but port F and beyond are not. The functionality is - * supposed to be obsolete for new platforms. Just bail out if the port - * number is out of bounds after mapping. - */ - if (port > 4) { - drm_dbg_kms(&dev_priv->drm, - "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n", - intel_encoder->base.base.id, intel_encoder->base.name, - port_name(intel_encoder->port), port); - return -EINVAL; - } - if (!enable) parm |= 4 << 8; diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index 46beb155d835..8eb1842f14ce 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -156,6 +156,9 @@ static struct intel_quirk intel_quirks[] = { /* ASRock ITX*/ { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, + /* ECS Liva Q2 */ + { 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time }, + { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time }, }; void intel_init_quirks(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 4eaa4aa86ecd..58f8fb7c8799 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2760,13 +2760,10 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) if (!intel_sdvo_connector) return false; - if (device == 0) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0; + if (device == 0) intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; - } else if (device == 1) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1; + else if (device == 1) intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; - } intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; @@ -2821,7 +2818,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; - intel_sdvo->controlled_output |= type; intel_sdvo_connector->output_flag = type; if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { @@ -2862,13 +2858,10 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) encoder->encoder_type = DRM_MODE_ENCODER_DAC; connector->connector_type = DRM_MODE_CONNECTOR_VGA; - if (device == 0) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; + if (device == 0) intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; - } else if (device == 1) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; + else if (device == 1) intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; - } if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { kfree(intel_sdvo_connector); @@ -2898,13 +2891,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) encoder->encoder_type = DRM_MODE_ENCODER_LVDS; connector->connector_type = DRM_MODE_CONNECTOR_LVDS; - if (device == 0) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; + if (device == 0) intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; - } else if (device == 1) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; + else if (device == 1) intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; - } if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { kfree(intel_sdvo_connector); @@ -2937,16 +2927,39 @@ err: return false; } +static u16 intel_sdvo_filter_output_flags(u16 flags) +{ + flags &= SDVO_OUTPUT_MASK; + + /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ + if (!(flags & SDVO_OUTPUT_TMDS0)) + flags &= ~SDVO_OUTPUT_TMDS1; + + if (!(flags & SDVO_OUTPUT_RGB0)) + flags &= ~SDVO_OUTPUT_RGB1; + + if (!(flags & SDVO_OUTPUT_LVDS0)) + flags &= ~SDVO_OUTPUT_LVDS1; + + return flags; +} + static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) { - /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); + + flags = intel_sdvo_filter_output_flags(flags); + + intel_sdvo->controlled_output = flags; + + intel_sdvo_select_ddc_bus(i915, intel_sdvo); if (flags & SDVO_OUTPUT_TMDS0) if (!intel_sdvo_dvi_init(intel_sdvo, 0)) return false; - if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) + if (flags & SDVO_OUTPUT_TMDS1) if (!intel_sdvo_dvi_init(intel_sdvo, 1)) return false; @@ -2967,7 +2980,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) if (!intel_sdvo_analog_init(intel_sdvo, 0)) return false; - if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) + if (flags & SDVO_OUTPUT_RGB1) if (!intel_sdvo_analog_init(intel_sdvo, 1)) return false; @@ -2975,14 +2988,13 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) if (!intel_sdvo_lvds_init(intel_sdvo, 0)) return false; - if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) + if (flags & SDVO_OUTPUT_LVDS1) if (!intel_sdvo_lvds_init(intel_sdvo, 1)) return false; - if ((flags & SDVO_OUTPUT_MASK) == 0) { + if (flags == 0) { unsigned char bytes[2]; - intel_sdvo->controlled_output = 0; memcpy(bytes, &intel_sdvo->caps.output_flags, 2); DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", SDVO_NAME(intel_sdvo), @@ -3394,8 +3406,6 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv, */ intel_sdvo->base.cloneable = 0; - intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo); - /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) goto err_output; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 8dd295dbe241..dd35d3d7ad03 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -36,13 +36,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme goto err_unpin_pages; } - ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL); + ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL); if (ret) goto err_free; src = obj->mm.pages->sgl; dst = st->sgl; - for (i = 0; i < obj->mm.pages->nents; i++) { + for (i = 0; i < obj->mm.pages->orig_nents; i++) { sg_set_page(dst, sg_page(src), src->length, 0); dst = sg_next(dst); src = sg_next(src); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 6615eb5147e2..5f86d9aacb8a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -736,6 +736,24 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt) mutex_lock(>->tlb_invalidate_lock); intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */ + + for_each_engine(engine, gt, id) { + struct reg_and_bit rb; + + rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num); + if (!i915_mmio_reg_offset(rb.reg)) + continue; + + if (INTEL_GEN(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS || + engine->class == VIDEO_ENHANCEMENT_CLASS)) + rb.bit = _MASKED_BIT_ENABLE(rb.bit); + + intel_uncore_write_fw(uncore, rb.reg, rb.bit); + } + + spin_unlock_irq(&uncore->lock); + for_each_engine(engine, gt, id) { /* * HW architecture suggest typical invalidation time at 40us, @@ -750,7 +768,6 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt) if (!i915_mmio_reg_offset(rb.reg)) continue; - intel_uncore_write_fw(uncore, rb.reg, rb.bit); if (__intel_wait_for_register_fw(uncore, rb.reg, rb.bit, 0, timeout_us, timeout_ms, diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c index 66fcbf9d0fdd..cca285185dc4 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -200,7 +200,7 @@ out_active: spin_lock(&timelines->lock); if (flush_submission(gt, timeout)) /* Wait, there's more! */ active_count++; - return active_count ? timeout : 0; + return active_count ? timeout ?: -ETIME : 0; } int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout) diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 95d41c01d0e0..35d55f98a06f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -4788,8 +4788,8 @@ static int live_lrc_layout(void *arg) continue; hw = shmem_pin_map(engine->default_state); - if (IS_ERR(hw)) { - err = PTR_ERR(hw); + if (!hw) { + err = -ENOMEM; break; } hw += LRC_STATE_OFFSET / sizeof(*hw); @@ -4965,8 +4965,8 @@ static int live_lrc_fixed(void *arg) continue; hw = shmem_pin_map(engine->default_state); - if (IS_ERR(hw)) { - err = PTR_ERR(hw); + if (!hw) { + err = -ENOMEM; break; } hw += LRC_STATE_OFFSET / sizeof(*hw); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 0b1ea29dcffa..606e6c315fe2 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -660,7 +660,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu, else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) index = FDI_RX_IMR_TO_PIPE(offset); else { - gvt_vgpu_err("Unsupport registers %x\n", offset); + gvt_vgpu_err("Unsupported registers %x\n", offset); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 74e66dea5708..5e670aace59f 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -3964,8 +3964,8 @@ addr_err: return ERR_PTR(err); } -static ssize_t show_dynamic_id(struct device *dev, - struct device_attribute *attr, +static ssize_t show_dynamic_id(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { struct i915_oa_config *oa_config = diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h index a36a455ae336..534951ff38bb 100644 --- a/drivers/gpu/drm/i915/i915_perf_types.h +++ b/drivers/gpu/drm/i915/i915_perf_types.h @@ -54,7 +54,7 @@ struct i915_oa_config { struct attribute_group sysfs_metric; struct attribute *attrs[2]; - struct device_attribute sysfs_metric_id; + struct kobj_attribute sysfs_metric_id; struct kref ref; struct rcu_head rcu; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 12488996a7f4..04157d8ced32 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7202,7 +7202,7 @@ enum { #define _SEL_FETCH_PLANE_BASE_6_A 0x70940 #define _SEL_FETCH_PLANE_BASE_7_A 0x70960 #define _SEL_FETCH_PLANE_BASE_CUR_A 0x70880 -#define _SEL_FETCH_PLANE_BASE_1_B 0x70990 +#define _SEL_FETCH_PLANE_BASE_1_B 0x71890 #define _SEL_FETCH_PLANE_BASE_A(plane) _PICK(plane, \ _SEL_FETCH_PLANE_BASE_1_A, \ @@ -7546,7 +7546,8 @@ enum { #define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084) #define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088) -#define DMC_DEBUG3 _MMIO(0x101090) +#define TGL_DMC_DEBUG3 _MMIO(0x101090) +#define DG1_DMC_DEBUG3 _MMIO(0x13415c) /* Display Internal Timeout Register */ #define RM_TIMEOUT _MMIO(0x42060) diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 45d32ef42787..ac40a95374d3 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -500,7 +500,14 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, struct device *kdev = kobj_to_dev(kobj); struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); struct i915_gpu_coredump *gpu; - ssize_t ret; + ssize_t ret = 0; + + /* + * FIXME: Concurrent clients triggering resets and reading + clearing + * dumps can cause inconsistent sysfs reads when a user calls in with a + * non-zero offset to complete a prior partial read but the + * gpu_coredump has been cleared or replaced. + */ gpu = i915_first_error_state(i915); if (IS_ERR(gpu)) { @@ -512,8 +519,10 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, const char *str = "No error state collected\n"; size_t len = strlen(str); - ret = min_t(size_t, count, len - off); - memcpy(buf, str + off, ret); + if (off < len) { + ret = min_t(size_t, count, len - off); + memcpy(buf, str + off, ret); + } } return ret; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 472aaea75ef8..1b5e8d3e45a9 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2846,7 +2846,7 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, } static void intel_read_wm_latency(struct drm_i915_private *dev_priv, - u16 wm[8]) + u16 wm[]) { struct intel_uncore *uncore = &dev_priv->uncore; @@ -5145,10 +5145,14 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED || modifier == I915_FORMAT_MOD_Yf_TILED || modifier == I915_FORMAT_MOD_Y_TILED_CCS || - modifier == I915_FORMAT_MOD_Yf_TILED_CCS; + modifier == I915_FORMAT_MOD_Yf_TILED_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED; wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS || - modifier == I915_FORMAT_MOD_Yf_TILED_CCS; + modifier == I915_FORMAT_MOD_Yf_TILED_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier); wp->width = width; diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c index c849533ca83e..3f5750cc2673 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-dev.c +++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c @@ -207,6 +207,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output) ret = dcss_submodules_init(dcss); if (ret) { + of_node_put(dcss->of_port); dev_err(dev, "submodules initialization failed\n"); goto clks_err; } @@ -237,6 +238,8 @@ void dcss_dev_destroy(struct dcss_dev *dcss) dcss_clocks_disable(dcss); } + of_node_put(dcss->of_port); + pm_runtime_disable(dcss->dev); dcss_submodules_stop(dcss); diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c index f54087ac44d3..46a188dd02ad 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-plane.c +++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c @@ -268,7 +268,6 @@ static void dcss_plane_atomic_update(struct drm_plane *plane, struct dcss_plane *dcss_plane = to_dcss_plane(plane); struct dcss_dev *dcss = plane->dev->dev_private; struct drm_framebuffer *fb = state->fb; - u32 pixel_format; struct drm_crtc_state *crtc_state; bool modifiers_present; u32 src_w, src_h, dst_w, dst_h; @@ -279,7 +278,6 @@ static void dcss_plane_atomic_update(struct drm_plane *plane, if (!fb || !state->crtc || !state->visible) return; - pixel_format = state->fb->format->format; crtc_state = state->crtc->state; modifiers_present = !!(fb->flags & DRM_MODE_FB_MODIFIERS); diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 75036aaa0c63..efd13e533726 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -553,6 +553,8 @@ static int imx_ldb_panel_ddc(struct device *dev, edidp = of_get_property(child, "edid", &edid_len); if (edidp) { channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL); + if (!channel->edid) + return -ENOMEM; } else if (!channel->panel) { /* fallback to display-timings node */ ret = of_get_drm_display_mode(child, diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 2a8d2e32e7b4..9fe6a4733106 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -212,8 +212,9 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector) return ret; } -static int imx_tve_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +imx_tve_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { struct imx_tve *tve = con_to_tve(connector); unsigned long rate; diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index d412fc265395..fd9d8e51837f 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -68,7 +68,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc, drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) { if (plane == &ipu_crtc->plane[0]->base) disable_full = true; - if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base) + if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base) disable_partial = true; } diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 605ac8825a59..b61bfa84b6bb 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -70,8 +70,10 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) ret = of_get_drm_display_mode(np, &imxpd->mode, &imxpd->bus_flags, OF_USE_NATIVE_MODE); - if (ret) + if (ret) { + drm_mode_destroy(connector->dev, mode); return ret; + } drm_mode_copy(mode, &imxpd->mode); mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index b6bb5fc7d183..e34718cf5c2e 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -68,6 +69,21 @@ struct ingenic_drm { bool panel_is_sharp; bool no_vblank; + + /* + * clk_mutex is used to synchronize the pixel clock rate update with + * the VBLANK. When the pixel clock's parent clock needs to be updated, + * clock_nb's notifier function will lock the mutex, then wait until the + * next VBLANK. At that point, the parent clock's rate can be updated, + * and the mutex is then unlocked. If an atomic commit happens in the + * meantime, it will lock on the mutex, effectively waiting until the + * clock update process finishes. Finally, the pixel clock's rate will + * be recomputed when the mutex has been released, in the pending atomic + * commit, or a future one. + */ + struct mutex clk_mutex; + bool update_clk_rate; + struct notifier_block clock_nb; }; static const u32 ingenic_drm_primary_formats[] = { @@ -111,6 +127,29 @@ static inline struct ingenic_drm *drm_crtc_get_priv(struct drm_crtc *crtc) return container_of(crtc, struct ingenic_drm, crtc); } +static inline struct ingenic_drm *drm_nb_get_priv(struct notifier_block *nb) +{ + return container_of(nb, struct ingenic_drm, clock_nb); +} + +static int ingenic_drm_update_pixclk(struct notifier_block *nb, + unsigned long action, + void *data) +{ + struct ingenic_drm *priv = drm_nb_get_priv(nb); + + switch (action) { + case PRE_RATE_CHANGE: + mutex_lock(&priv->clk_mutex); + priv->update_clk_rate = true; + drm_crtc_wait_one_vblank(&priv->crtc); + return NOTIFY_OK; + default: + mutex_unlock(&priv->clk_mutex); + return NOTIFY_OK; + } +} + static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *state) { @@ -276,8 +315,14 @@ static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc, if (drm_atomic_crtc_needs_modeset(state)) { ingenic_drm_crtc_update_timings(priv, &state->mode); + priv->update_clk_rate = true; + } + if (priv->update_clk_rate) { + mutex_lock(&priv->clk_mutex); clk_set_rate(priv->pix_clk, state->adjusted_mode.clock * 1000); + priv->update_clk_rate = false; + mutex_unlock(&priv->clk_mutex); } if (event) { @@ -936,16 +981,28 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) if (soc_info->has_osd) regmap_write(priv->map, JZ_REG_LCD_OSDC, JZ_LCD_OSDC_OSDEN); + mutex_init(&priv->clk_mutex); + priv->clock_nb.notifier_call = ingenic_drm_update_pixclk; + + parent_clk = clk_get_parent(priv->pix_clk); + ret = clk_notifier_register(parent_clk, &priv->clock_nb); + if (ret) { + dev_err(dev, "Unable to register clock notifier\n"); + goto err_devclk_disable; + } + ret = drm_dev_register(drm, 0); if (ret) { dev_err(dev, "Failed to register DRM driver\n"); - goto err_devclk_disable; + goto err_clk_notifier_unregister; } drm_fbdev_generic_setup(drm, 32); return 0; +err_clk_notifier_unregister: + clk_notifier_unregister(parent_clk, &priv->clock_nb); err_devclk_disable: if (priv->lcd_clk) clk_disable_unprepare(priv->lcd_clk); @@ -967,7 +1024,9 @@ static int compare_of(struct device *dev, void *data) static void ingenic_drm_unbind(struct device *dev) { struct ingenic_drm *priv = dev_get_drvdata(dev); + struct clk *parent_clk = clk_get_parent(priv->pix_clk); + clk_notifier_unregister(parent_clk, &priv->clock_nb); if (priv->lcd_clk) clk_disable_unprepare(priv->lcd_clk); clk_disable_unprepare(priv->pix_clk); diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c index 5275b2723293..64e6fb806290 100644 --- a/drivers/gpu/drm/mcde/mcde_dsi.c +++ b/drivers/gpu/drm/mcde/mcde_dsi.c @@ -1118,6 +1118,7 @@ static int mcde_dsi_bind(struct device *dev, struct device *master, bridge = of_drm_find_bridge(child); if (!bridge) { dev_err(dev, "failed to find bridge\n"); + of_node_put(child); return -EINVAL; } } diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c index cb29b649fcdb..12bf93769497 100644 --- a/drivers/gpu/drm/mediatek/mtk_cec.c +++ b/drivers/gpu/drm/mediatek/mtk_cec.c @@ -84,7 +84,7 @@ static void mtk_cec_mask(struct mtk_cec *cec, unsigned int offset, u32 tmp = readl(cec->regs + offset) & ~mask; tmp |= val & mask; - writel(val, cec->regs + offset); + writel(tmp, cec->regs + offset); } void mtk_cec_set_hpd_event(struct device *dev, diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 52f11a63a330..c1ae336df683 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -52,13 +52,7 @@ enum mtk_dpi_out_channel_swap { }; enum mtk_dpi_out_color_format { - MTK_DPI_COLOR_FORMAT_RGB, - MTK_DPI_COLOR_FORMAT_RGB_FULL, - MTK_DPI_COLOR_FORMAT_YCBCR_444, - MTK_DPI_COLOR_FORMAT_YCBCR_422, - MTK_DPI_COLOR_FORMAT_XV_YCC, - MTK_DPI_COLOR_FORMAT_YCBCR_444_FULL, - MTK_DPI_COLOR_FORMAT_YCBCR_422_FULL + MTK_DPI_COLOR_FORMAT_RGB }; struct mtk_dpi { @@ -358,24 +352,11 @@ static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi) static void mtk_dpi_config_color_format(struct mtk_dpi *dpi, enum mtk_dpi_out_color_format format) { - if ((format == MTK_DPI_COLOR_FORMAT_YCBCR_444) || - (format == MTK_DPI_COLOR_FORMAT_YCBCR_444_FULL)) { - mtk_dpi_config_yuv422_enable(dpi, false); - mtk_dpi_config_csc_enable(dpi, true); - mtk_dpi_config_swap_input(dpi, false); - mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_BGR); - } else if ((format == MTK_DPI_COLOR_FORMAT_YCBCR_422) || - (format == MTK_DPI_COLOR_FORMAT_YCBCR_422_FULL)) { - mtk_dpi_config_yuv422_enable(dpi, true); - mtk_dpi_config_csc_enable(dpi, true); - mtk_dpi_config_swap_input(dpi, true); - mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB); - } else { - mtk_dpi_config_yuv422_enable(dpi, false); - mtk_dpi_config_csc_enable(dpi, false); - mtk_dpi_config_swap_input(dpi, false); - mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB); - } + /* only support RGB888 */ + mtk_dpi_config_yuv422_enable(dpi, false); + mtk_dpi_config_csc_enable(dpi, false); + mtk_dpi_config_swap_input(dpi, false); + mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB); } static void mtk_dpi_power_off(struct mtk_dpi *dpi) @@ -416,7 +397,6 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi) if (dpi->pinctrl && dpi->pins_dpi) pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi); - mtk_dpi_enable(dpi); return 0; err_pixel: @@ -553,6 +533,7 @@ static void mtk_dpi_bridge_enable(struct drm_bridge *bridge) mtk_dpi_power_on(dpi); mtk_dpi_set_display_mode(dpi, &dpi->mode); + mtk_dpi_enable(dpi); } static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = { diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 65fd99c528af..146c4d04f572 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -202,6 +202,7 @@ struct mtk_dsi { struct mtk_phy_timing phy_timing; int refcount; bool enabled; + bool lanes_ready; u32 irq_data; wait_queue_head_t irq_wait_queue; const struct mtk_dsi_driver_data *driver_data; @@ -644,18 +645,11 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi) mtk_dsi_reset_engine(dsi); mtk_dsi_phy_timconfig(dsi); - mtk_dsi_rxtx_control(dsi); - usleep_range(30, 100); - mtk_dsi_reset_dphy(dsi); mtk_dsi_ps_control_vact(dsi); mtk_dsi_set_vm_cmd(dsi); mtk_dsi_config_vdo_timing(dsi); mtk_dsi_set_interrupt_enable(dsi); - mtk_dsi_clk_ulp_mode_leave(dsi); - mtk_dsi_lane0_ulp_mode_leave(dsi); - mtk_dsi_clk_hs_mode(dsi, 0); - return 0; err_disable_engine_clk: clk_disable_unprepare(dsi->engine_clk); @@ -687,6 +681,8 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi) mtk_dsi_reset_engine(dsi); mtk_dsi_lane0_ulp_mode_enter(dsi); mtk_dsi_clk_ulp_mode_enter(dsi); + /* set the lane number as 0 to pull down mipi */ + writel(0, dsi->regs + DSI_TXRX_CTRL); mtk_dsi_disable(dsi); @@ -694,21 +690,31 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi) clk_disable_unprepare(dsi->digital_clk); phy_power_off(dsi->phy); + + dsi->lanes_ready = false; +} + +static void mtk_dsi_lane_ready(struct mtk_dsi *dsi) +{ + if (!dsi->lanes_ready) { + dsi->lanes_ready = true; + mtk_dsi_rxtx_control(dsi); + usleep_range(30, 100); + mtk_dsi_reset_dphy(dsi); + mtk_dsi_clk_ulp_mode_leave(dsi); + mtk_dsi_lane0_ulp_mode_leave(dsi); + mtk_dsi_clk_hs_mode(dsi, 0); + msleep(20); + /* The reaction time after pulling up the mipi signal for dsi_rx */ + } } static void mtk_output_dsi_enable(struct mtk_dsi *dsi) { - int ret; - if (dsi->enabled) return; - ret = mtk_dsi_poweron(dsi); - if (ret < 0) { - DRM_ERROR("failed to power on dsi\n"); - return; - } - + mtk_dsi_lane_ready(dsi); mtk_dsi_set_mode(dsi); mtk_dsi_clk_hs_mode(dsi, 1); @@ -722,8 +728,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi) if (!dsi->enabled) return; - mtk_dsi_poweroff(dsi); - dsi->enabled = false; } @@ -746,24 +750,53 @@ static void mtk_dsi_bridge_mode_set(struct drm_bridge *bridge, drm_display_mode_to_videomode(adjusted, &dsi->vm); } -static void mtk_dsi_bridge_disable(struct drm_bridge *bridge) +static void mtk_dsi_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) { struct mtk_dsi *dsi = bridge_to_dsi(bridge); mtk_output_dsi_disable(dsi); } -static void mtk_dsi_bridge_enable(struct drm_bridge *bridge) +static void mtk_dsi_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) { struct mtk_dsi *dsi = bridge_to_dsi(bridge); + if (dsi->refcount == 0) + return; + mtk_output_dsi_enable(dsi); } +static void mtk_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct mtk_dsi *dsi = bridge_to_dsi(bridge); + int ret; + + ret = mtk_dsi_poweron(dsi); + if (ret < 0) + DRM_ERROR("failed to power on dsi\n"); +} + +static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct mtk_dsi *dsi = bridge_to_dsi(bridge); + + mtk_dsi_poweroff(dsi); +} + static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = { .attach = mtk_dsi_bridge_attach, - .disable = mtk_dsi_bridge_disable, - .enable = mtk_dsi_bridge_enable, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_disable = mtk_dsi_bridge_atomic_disable, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_enable = mtk_dsi_bridge_atomic_enable, + .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable, + .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable, + .atomic_reset = drm_atomic_helper_bridge_reset, .mode_set = mtk_dsi_bridge_mode_set, }; @@ -891,24 +924,35 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, u8 read_data[16]; void *src_addr; u8 irq_flag = CMD_DONE_INT_FLAG; + u32 dsi_mode; + int ret; - if (readl(dsi->regs + DSI_MODE_CTRL) & MODE) { - DRM_ERROR("dsi engine is not command mode\n"); - return -EINVAL; + dsi_mode = readl(dsi->regs + DSI_MODE_CTRL); + if (dsi_mode & MODE) { + mtk_dsi_stop(dsi); + ret = mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500); + if (ret) + goto restore_dsi_mode; } if (MTK_DSI_HOST_IS_READ(msg->type)) irq_flag |= LPRX_RD_RDY_INT_FLAG; - if (mtk_dsi_host_send_cmd(dsi, msg, irq_flag) < 0) - return -ETIME; + mtk_dsi_lane_ready(dsi); - if (!MTK_DSI_HOST_IS_READ(msg->type)) - return 0; + ret = mtk_dsi_host_send_cmd(dsi, msg, irq_flag); + if (ret) + goto restore_dsi_mode; + + if (!MTK_DSI_HOST_IS_READ(msg->type)) { + recv_cnt = 0; + goto restore_dsi_mode; + } if (!msg->rx_buf) { DRM_ERROR("dsi receive buffer size may be NULL\n"); - return -EINVAL; + ret = -EINVAL; + goto restore_dsi_mode; } for (i = 0; i < 16; i++) @@ -933,7 +977,13 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n", recv_cnt, *((u8 *)(msg->tx_buf))); - return recv_cnt; +restore_dsi_mode: + if (dsi_mode & MODE) { + mtk_dsi_set_mode(dsi); + mtk_dsi_start(dsi); + } + + return ret < 0 ? ret : recv_cnt; } static const struct mipi_dsi_host_ops mtk_dsi_ops = { diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 728fea509412..b0bfe85f5f6a 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -116,8 +116,11 @@ static bool meson_vpu_has_available_connectors(struct device *dev) for_each_endpoint_of_node(dev->of_node, ep) { /* If the endpoint node exists, consider it enabled */ remote = of_graph_get_remote_port(ep); - if (remote) + if (remote) { + of_node_put(remote); + of_node_put(ep); return true; + } } return false; @@ -525,6 +528,13 @@ static int meson_drv_probe(struct platform_device *pdev) return 0; }; +static int meson_drv_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &meson_drv_master_ops); + + return 0; +} + static struct meson_drm_match_data meson_drm_gxbb_data = { .compat = VPU_COMPATIBLE_GXBB, }; @@ -562,6 +572,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = { static struct platform_driver meson_drm_platform_driver = { .probe = meson_drv_probe, + .remove = meson_drv_remove, .shutdown = meson_drv_shutdown, .driver = { .name = "meson-drm", diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index 35338ed18209..255c6b863f8d 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c @@ -163,7 +163,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane, /* Enable OSD and BLK0, set max global alpha */ priv->viu.osd1_ctrl_stat = OSD_ENABLE | - (0xFF << OSD_GLOBAL_ALPHA_SHIFT) | + (0x100 << OSD_GLOBAL_ALPHA_SHIFT) | OSD_BLK0_ENABLE; priv->viu.osd1_ctrl_stat2 = readl(priv->io_base + diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c index 259f3e6bec90..d4b907889a21 100644 --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c @@ -94,7 +94,7 @@ static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv, priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12)); writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff), priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21)); - writel((m[11] & 0x1fff) << 16, + writel((m[11] & 0x1fff), priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22)); writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff), @@ -469,17 +469,17 @@ void meson_viu_init(struct meson_drm *priv) priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE)); if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { - writel_relaxed(VIU_OSD_BLEND_REORDER(0, 1) | - VIU_OSD_BLEND_REORDER(1, 0) | - VIU_OSD_BLEND_REORDER(2, 0) | - VIU_OSD_BLEND_REORDER(3, 0) | - VIU_OSD_BLEND_DIN_EN(1) | - VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 | - VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 | - VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 | - VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) | - VIU_OSD_BLEND_HOLD_LINES(4), - priv->io_base + _REG(VIU_OSD_BLEND_CTRL)); + u32 val = (u32)VIU_OSD_BLEND_REORDER(0, 1) | + (u32)VIU_OSD_BLEND_REORDER(1, 0) | + (u32)VIU_OSD_BLEND_REORDER(2, 0) | + (u32)VIU_OSD_BLEND_REORDER(3, 0) | + (u32)VIU_OSD_BLEND_DIN_EN(1) | + (u32)VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 | + (u32)VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 | + (u32)VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 | + (u32)VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) | + (u32)VIU_OSD_BLEND_HOLD_LINES(4); + writel_relaxed(val, priv->io_base + _REG(VIU_OSD_BLEND_CTRL)); writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE, priv->io_base + _REG(OSD1_BLEND_SRC_CTRL)); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 9e09805575db..dffc133b8b1c 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1222,7 +1222,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu) return ERR_CAST(mmu); return msm_gem_address_space_create(mmu, - "gpu", 0x100000000ULL, 0x1ffffffffULL); + "gpu", 0x100000000ULL, SZ_4G); } static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) @@ -1308,6 +1308,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) BUG_ON(!node); ret = a6xx_gmu_init(a6xx_gpu, node); + of_node_put(node); if (ret) { a6xx_destroy(&(a6xx_gpu->base.base)); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 458b5b26d3c2..de8cc25506d6 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -960,7 +960,8 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) release_firmware(adreno_gpu->fw[i]); - pm_runtime_disable(&priv->gpu_pdev->dev); + if (pm_runtime_enabled(&priv->gpu_pdev->dev)) + pm_runtime_disable(&priv->gpu_pdev->dev); msm_gpu_cleanup(&adreno_gpu->base); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c index 6f0f54588124..108882bbd2b8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c @@ -146,6 +146,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx, active_v_end = active_v_start + (p->yres * hsync_period) - 1; display_v_start += p->hsync_pulse_width + p->h_back_porch; + display_v_end -= p->h_front_porch; active_hctl = (active_h_end << 16) | active_h_start; display_hctl = active_hctl; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 08e082d0443a..b7841f7fc10a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -675,11 +675,11 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) _dpu_kms_mmu_destroy(dpu_kms); if (dpu_kms->catalog) { - for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { - u32 vbif_idx = dpu_kms->catalog->vbif[i].id; - - if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) - dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]); + for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { + if (dpu_kms->hw_vbif[i]) { + dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]); + dpu_kms->hw_vbif[i] = NULL; + } } } @@ -937,7 +937,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms) dpu_kms_parse_data_bus_icc_path(dpu_kms); - pm_runtime_get_sync(&dpu_kms->pdev->dev); + rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev); + if (rc < 0) + goto error; dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0); @@ -983,7 +985,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms) for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { u32 vbif_idx = dpu_kms->catalog->vbif[i].id; - dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx, + dpu_kms->hw_vbif[vbif_idx] = dpu_hw_vbif_init(vbif_idx, dpu_kms->vbif[vbif_idx], dpu_kms->catalog); if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) { rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c index 5e8c3f3e6625..fc86d34aec80 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c @@ -11,6 +11,14 @@ #include "dpu_hw_vbif.h" #include "dpu_trace.h" +static struct dpu_hw_vbif *dpu_get_vbif(struct dpu_kms *dpu_kms, enum dpu_vbif vbif_idx) +{ + if (vbif_idx < ARRAY_SIZE(dpu_kms->hw_vbif)) + return dpu_kms->hw_vbif[vbif_idx]; + + return NULL; +} + /** * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt * @vbif: Pointer to hardware vbif driver @@ -148,20 +156,15 @@ exit: void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, struct dpu_vbif_set_ot_params *params) { - struct dpu_hw_vbif *vbif = NULL; + struct dpu_hw_vbif *vbif; struct dpu_hw_mdp *mdp; bool forced_on = false; u32 ot_lim; - int ret, i; + int ret; mdp = dpu_kms->hw_mdp; - for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { - if (dpu_kms->hw_vbif[i] && - dpu_kms->hw_vbif[i]->idx == params->vbif_idx) - vbif = dpu_kms->hw_vbif[i]; - } - + vbif = dpu_get_vbif(dpu_kms, params->vbif_idx); if (!vbif || !mdp) { DPU_DEBUG("invalid arguments vbif %d mdp %d\n", vbif != NULL, mdp != NULL); @@ -204,7 +207,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, struct dpu_vbif_set_qos_params *params) { - struct dpu_hw_vbif *vbif = NULL; + struct dpu_hw_vbif *vbif; struct dpu_hw_mdp *mdp; bool forced_on = false; const struct dpu_vbif_qos_tbl *qos_tbl; @@ -216,13 +219,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, } mdp = dpu_kms->hw_mdp; - for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { - if (dpu_kms->hw_vbif[i] && - dpu_kms->hw_vbif[i]->idx == params->vbif_idx) { - vbif = dpu_kms->hw_vbif[i]; - break; - } - } + vbif = dpu_get_vbif(dpu_kms, params->vbif_idx); if (!vbif || !vbif->cap) { DPU_ERROR("invalid vbif %d\n", params->vbif_idx); diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index 913de5938782..b4d0bfc83d70 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -221,6 +221,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, encoder = mdp4_lcdc_encoder_init(dev, panel_node); if (IS_ERR(encoder)) { DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n"); + of_node_put(panel_node); return PTR_ERR(encoder); } @@ -230,6 +231,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, connector = mdp4_lvds_connector_init(dev, panel_node, encoder); if (IS_ERR(connector)) { DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n"); + of_node_put(panel_node); return PTR_ERR(connector); } diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c index 7288041dd86a..7444b75c4215 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c @@ -56,8 +56,9 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector) return ret; } -static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +mdp4_lvds_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { struct mdp4_lvds_connector *mdp4_lvds_connector = to_mdp4_lvds_connector(connector); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index a8fa084dfa49..ff4f207cbdea 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -608,9 +608,15 @@ int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc, if (ret) return ret; - mdp5_mixer_release(new_crtc_state->state, old_mixer); + ret = mdp5_mixer_release(new_crtc_state->state, old_mixer); + if (ret) + return ret; + if (old_r_mixer) { - mdp5_mixer_release(new_crtc_state->state, old_r_mixer); + ret = mdp5_mixer_release(new_crtc_state->state, old_r_mixer); + if (ret) + return ret; + if (!need_right_mixer) pipeline->r_mixer = NULL; } @@ -977,8 +983,10 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &mdp5_crtc->cursor.iova); - if (ret) + if (ret) { + drm_gem_object_put(cursor_bo); return -EINVAL; + } pm_runtime_get_sync(&pdev->dev); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index e193865ce9a2..9baaaef706ab 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -598,9 +598,9 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) pdev = mdp5_kms->pdev; irq = irq_of_parse_and_map(pdev->dev.of_node, 0); - if (irq < 0) { - ret = irq; - DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret); + if (!irq) { + ret = -EINVAL; + DRM_DEV_ERROR(&pdev->dev, "failed to get irq\n"); goto fail; } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c index 954db683ae44..2536def2a000 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c @@ -116,21 +116,28 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc, return 0; } -void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer) +int mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer) { struct mdp5_global_state *global_state = mdp5_get_global_state(s); - struct mdp5_hw_mixer_state *new_state = &global_state->hwmixer; + struct mdp5_hw_mixer_state *new_state; if (!mixer) - return; + return 0; + + if (IS_ERR(global_state)) + return PTR_ERR(global_state); + + new_state = &global_state->hwmixer; if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx])) - return; + return -EINVAL; DBG("%s: release from crtc %s", mixer->name, new_state->hwmixer_to_crtc[mixer->idx]->name); new_state->hwmixer_to_crtc[mixer->idx] = NULL; + + return 0; } void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h index 43c9ba43ce18..545ee223b9d7 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h @@ -30,7 +30,7 @@ void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm); int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc, uint32_t caps, struct mdp5_hw_mixer **mixer, struct mdp5_hw_mixer **r_mixer); -void mdp5_mixer_release(struct drm_atomic_state *s, - struct mdp5_hw_mixer *mixer); +int mdp5_mixer_release(struct drm_atomic_state *s, + struct mdp5_hw_mixer *mixer); #endif /* __MDP5_LM_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c index ba6695963aa6..e4b8a789835a 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c @@ -119,18 +119,24 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane, return 0; } -void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe) +int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe) { struct msm_drm_private *priv = s->dev->dev_private; struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); - struct mdp5_global_state *state = mdp5_get_global_state(s); - struct mdp5_hw_pipe_state *new_state = &state->hwpipe; + struct mdp5_global_state *state; + struct mdp5_hw_pipe_state *new_state; if (!hwpipe) - return; + return 0; + + state = mdp5_get_global_state(s); + if (IS_ERR(state)) + return PTR_ERR(state); + + new_state = &state->hwpipe; if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx])) - return; + return -EINVAL; DBG("%s: release from plane %s", hwpipe->name, new_state->hwpipe_to_plane[hwpipe->idx]->name); @@ -141,6 +147,8 @@ void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe) } new_state->hwpipe_to_plane[hwpipe->idx] = NULL; + + return 0; } void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h index 9b26d0761bd4..cca67938cab2 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h @@ -37,7 +37,7 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane, uint32_t caps, uint32_t blkcfg, struct mdp5_hw_pipe **hwpipe, struct mdp5_hw_pipe **r_hwpipe); -void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe); +int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe); struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, uint32_t reg_offset, uint32_t caps); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c index 83423092de2f..0dc23c86747e 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -179,7 +179,10 @@ static void mdp5_plane_reset(struct drm_plane *plane) drm_framebuffer_put(plane->state->fb); kfree(to_mdp5_plane_state(plane->state)); + plane->state = NULL; mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL); + if (!mdp5_state) + return; /* assign default blend parameters */ mdp5_state->alpha = 255; @@ -390,12 +393,24 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, mdp5_state->r_hwpipe = NULL; - mdp5_pipe_release(state->state, old_hwpipe); - mdp5_pipe_release(state->state, old_right_hwpipe); + ret = mdp5_pipe_release(state->state, old_hwpipe); + if (ret) + return ret; + + ret = mdp5_pipe_release(state->state, old_right_hwpipe); + if (ret) + return ret; + } } else { - mdp5_pipe_release(state->state, mdp5_state->hwpipe); - mdp5_pipe_release(state->state, mdp5_state->r_hwpipe); + ret = mdp5_pipe_release(state->state, mdp5_state->hwpipe); + if (ret) + return ret; + + ret = mdp5_pipe_release(state->state, mdp5_state->r_hwpipe); + if (ret) + return ret; + mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL; } diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index aeca8b2ac5c6..613348b022fe 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -416,7 +416,7 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, if (rate == link_rate_hbr3) pixel_div = 6; - else if (rate == 1620000 || rate == 270000) + else if (rate == 162000 || rate == 270000) pixel_div = 2; else if (rate == link_rate_hbr2) pixel_div = 4; @@ -572,7 +572,7 @@ void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog) dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN); } -u32 dp_catalog_hpd_get_state_status(struct dp_catalog *dp_catalog) +u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog) { struct dp_catalog_private *catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h index 6d257dbebf29..176a9020a520 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.h +++ b/drivers/gpu/drm/msm/dp/dp_catalog.h @@ -97,7 +97,7 @@ void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable); void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, u32 intr_mask, bool en); void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog); -u32 dp_catalog_hpd_get_state_status(struct dp_catalog *dp_catalog); +u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog); u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog); void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog); int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, u8 v_level, diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index c83a1650437d..9fac55c24214 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -1205,7 +1205,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, if (ret) return ret; - dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN); + dp_ctrl_train_pattern_set(ctrl, pattern); for (tries = 0; tries <= maximum_retries; tries++) { drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd); @@ -1460,6 +1460,30 @@ static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl) return ret; } +static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl) +{ + struct dp_io *dp_io; + struct phy *phy; + int ret; + + dp_io = &ctrl->parser->io; + phy = dp_io->phy; + + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + + dp_catalog_ctrl_reset(ctrl->catalog); + + ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false); + if (ret) { + DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret); + } + + phy_power_off(phy); + phy_exit(phy); + + return 0; +} + static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl) { int ret = 0; @@ -1640,8 +1664,7 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) if (rc) return rc; - while (--link_train_max_retries && - !atomic_read(&ctrl->dp_ctrl.aborted)) { + while (--link_train_max_retries) { rc = dp_ctrl_reinitialize_mainlink(ctrl); if (rc) { DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", @@ -1656,6 +1679,10 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) break; } else if (training_step == DP_TRAINING_1) { /* link train_1 failed */ + if (!dp_catalog_link_is_connected(ctrl->catalog)) { + break; + } + rc = dp_ctrl_link_rate_down_shift(ctrl); if (rc < 0) { /* already in RBR = 1.6G */ if (cr.lane_0_1 & DP_LANE0_1_CR_DONE) { @@ -1675,6 +1702,10 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) } } else if (training_step == DP_TRAINING_2) { /* link train_2 failed, lower lane rate */ + if (!dp_catalog_link_is_connected(ctrl->catalog)) { + break; + } + rc = dp_ctrl_link_lane_down_shift(ctrl); if (rc < 0) { /* end with failure */ @@ -1695,6 +1726,11 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) */ if (rc == 0) /* link train successfully */ dp_ctrl_push_idle(dp_ctrl); + else { + /* link training failed */ + dp_ctrl_deinitialize_mainlink(ctrl); + rc = -ECONNRESET; + } return rc; } diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 6cd6934c8c9f..5a152d505dfb 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -45,7 +45,7 @@ enum { ST_CONNECT_PENDING, ST_CONNECTED, ST_DISCONNECT_PENDING, - ST_SUSPEND_PENDING, + ST_DISPLAY_OFF, ST_SUSPENDED, }; @@ -102,6 +102,8 @@ struct dp_display_private { struct dp_display_mode dp_mode; struct msm_dp dp_display; + bool encoder_mode_set; + /* wait for audio signaling */ struct completion audio_comp; @@ -111,6 +113,7 @@ struct dp_display_private { u32 hpd_state; u32 event_pndx; u32 event_gndx; + struct task_struct *ev_tsk; struct dp_event event_list[DP_EVENT_Q_MAX]; spinlock_t event_lock; @@ -194,6 +197,8 @@ void dp_display_signal_audio_complete(struct msm_dp *dp_display) complete_all(&dp->audio_comp); } +static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv); + static int dp_display_bind(struct device *dev, struct device *master, void *data) { @@ -234,9 +239,18 @@ static int dp_display_bind(struct device *dev, struct device *master, } rc = dp_register_audio_driver(dev, dp->audio); - if (rc) + if (rc) { DRM_ERROR("Audio registration Dp failed\n"); + goto end; + } + rc = dp_hpd_event_thread_start(dp); + if (rc) { + DRM_ERROR("Event thread create failed\n"); + goto end; + } + + return 0; end: return rc; } @@ -255,6 +269,12 @@ static void dp_display_unbind(struct device *dev, struct device *master, return; } + /* disable all HPD interrupts */ + if (dp->core_initialized) + dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); + + kthread_stop(dp->ev_tsk); + dp_power_client_deinit(dp->power); dp_aux_unregister(dp->aux); priv->dp = NULL; @@ -288,13 +308,24 @@ static void dp_display_send_hpd_event(struct msm_dp *dp_display) drm_helper_hpd_irq_event(connector->dev); } -static int dp_display_send_hpd_notification(struct dp_display_private *dp, - bool hpd) + +static void dp_display_set_encoder_mode(struct dp_display_private *dp) { - static bool encoder_mode_set; struct msm_drm_private *priv = dp->dp_display.drm_dev->dev_private; struct msm_kms *kms = priv->kms; + if (!dp->encoder_mode_set && dp->dp_display.encoder && + kms->funcs->set_encoder_mode) { + kms->funcs->set_encoder_mode(kms, + dp->dp_display.encoder, false); + + dp->encoder_mode_set = true; + } +} + +static int dp_display_send_hpd_notification(struct dp_display_private *dp, + bool hpd) +{ if ((hpd && dp->dp_display.is_connected) || (!hpd && !dp->dp_display.is_connected)) { DRM_DEBUG_DP("HPD already %s\n", (hpd ? "on" : "off")); @@ -307,15 +338,6 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp, dp->dp_display.is_connected = hpd; - if (dp->dp_display.is_connected && dp->dp_display.encoder - && !encoder_mode_set - && kms->funcs->set_encoder_mode) { - kms->funcs->set_encoder_mode(kms, - dp->dp_display.encoder, false); - DRM_DEBUG_DP("set_encoder_mode() Completed\n"); - encoder_mode_set = true; - } - dp_display_send_hpd_event(&dp->dp_display); return 0; @@ -351,7 +373,6 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp) dp_add_event(dp, EV_USER_NOTIFICATION, true, 0); - end: return rc; } @@ -368,6 +389,8 @@ static void dp_display_host_init(struct dp_display_private *dp) if (dp->usbpd->orientation == ORIENTATION_CC2) flip = true; + dp_display_set_encoder_mode(dp); + dp_power_init(dp->power, flip); dp_ctrl_host_init(dp->ctrl, flip); dp_aux_init(dp->aux); @@ -451,25 +474,42 @@ static void dp_display_handle_video_request(struct dp_display_private *dp) } } +static int dp_display_handle_port_ststus_changed(struct dp_display_private *dp) +{ + int rc = 0; + + if (dp_display_is_sink_count_zero(dp)) { + DRM_DEBUG_DP("sink count is zero, nothing to do\n"); + if (dp->hpd_state != ST_DISCONNECTED) { + dp->hpd_state = ST_DISCONNECT_PENDING; + dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); + } + } else { + if (dp->hpd_state == ST_DISCONNECTED) { + dp->hpd_state = ST_CONNECT_PENDING; + rc = dp_display_process_hpd_high(dp); + if (rc) + dp->hpd_state = ST_DISCONNECTED; + } + } + + return rc; +} + static int dp_display_handle_irq_hpd(struct dp_display_private *dp) { - u32 sink_request; + u32 sink_request = dp->link->sink_request; - sink_request = dp->link->sink_request; - - if (sink_request & DS_PORT_STATUS_CHANGED) { - dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); - if (dp_display_is_sink_count_zero(dp)) { - DRM_DEBUG_DP("sink count is zero, nothing to do\n"); - return 0; + if (dp->hpd_state == ST_DISCONNECTED) { + if (sink_request & DP_LINK_STATUS_UPDATED) { + DRM_ERROR("Disconnected, no DP_LINK_STATUS_UPDATED\n"); + return -EINVAL; } - - return dp_display_process_hpd_high(dp); } dp_ctrl_handle_sink_request(dp->ctrl); - if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) + if (sink_request & DP_TEST_LINK_VIDEO_PATTERN) dp_display_handle_video_request(dp); return 0; @@ -478,7 +518,9 @@ static int dp_display_handle_irq_hpd(struct dp_display_private *dp) static int dp_display_usbpd_attention_cb(struct device *dev) { int rc = 0; + u32 sink_request; struct dp_display_private *dp; + struct dp_usbpd *hpd; if (!dev) { DRM_ERROR("invalid dev\n"); @@ -492,10 +534,17 @@ static int dp_display_usbpd_attention_cb(struct device *dev) return -ENODEV; } + hpd = dp->usbpd; + /* check for any test request issued by sink */ rc = dp_link_process_request(dp->link); - if (!rc) - dp_display_handle_irq_hpd(dp); + if (!rc) { + sink_request = dp->link->sink_request; + if (sink_request & DS_PORT_STATUS_CHANGED) + rc = dp_display_handle_port_ststus_changed(dp); + else + rc = dp_display_handle_irq_hpd(dp); + } return rc; } @@ -513,7 +562,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) mutex_lock(&dp->event_mutex); state = dp->hpd_state; - if (state == ST_SUSPEND_PENDING) { + if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) { mutex_unlock(&dp->event_mutex); return 0; } @@ -535,13 +584,18 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) hpd->hpd_high = 1; ret = dp_display_usbpd_configure_cb(&dp->pdev->dev); - if (ret) { /* failed */ + if (ret) { /* link train failed */ hpd->hpd_high = 0; dp->hpd_state = ST_DISCONNECTED; - } - /* start sanity checking */ - dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout); + if (ret == -ECONNRESET) { /* cable unplugged */ + dp->core_initialized = false; + } + + } else { + /* start sentinel checking in case of missing uevent */ + dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout); + } mutex_unlock(&dp->event_mutex); @@ -594,11 +648,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) mutex_lock(&dp->event_mutex); state = dp->hpd_state; - if (state == ST_SUSPEND_PENDING) { - mutex_unlock(&dp->event_mutex); - return 0; - } - if (state == ST_DISCONNECT_PENDING || state == ST_DISCONNECTED) { mutex_unlock(&dp->event_mutex); return 0; @@ -625,7 +674,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) */ dp_display_usbpd_disconnect_cb(&dp->pdev->dev); - /* start sanity checking */ + /* start sentinel checking in case of missing uevent */ dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND); /* signal the disconnect event early to ensure proper teardown */ @@ -659,17 +708,21 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) { u32 state; + int ret; mutex_lock(&dp->event_mutex); /* irq_hpd can happen at either connected or disconnected state */ state = dp->hpd_state; - if (state == ST_SUSPEND_PENDING) { + if (state == ST_DISPLAY_OFF) { mutex_unlock(&dp->event_mutex); return 0; } - dp_display_usbpd_attention_cb(&dp->pdev->dev); + ret = dp_display_usbpd_attention_cb(&dp->pdev->dev); + if (ret == -ECONNRESET) { /* cable unplugged */ + dp->core_initialized = false; + } mutex_unlock(&dp->event_mutex); @@ -814,6 +867,11 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data) dp_display = g_dp_display; + if (dp_display->power_on) { + DRM_DEBUG_DP("Link already setup, return\n"); + return 0; + } + rc = dp_ctrl_on_stream(dp->ctrl); if (!rc) dp_display->power_on = true; @@ -846,6 +904,9 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data) dp_display = g_dp_display; + if (!dp_display->power_on) + return 0; + /* wait only if audio was enabled */ if (dp_display->audio_enabled) { /* signal the disconnect event */ @@ -984,12 +1045,17 @@ static int hpd_event_thread(void *data) while (1) { if (timeout_mode) { wait_event_timeout(dp_priv->event_q, - (dp_priv->event_pndx == dp_priv->event_gndx), - EVENT_TIMEOUT); + (dp_priv->event_pndx == dp_priv->event_gndx) || + kthread_should_stop(), EVENT_TIMEOUT); } else { wait_event_interruptible(dp_priv->event_q, - (dp_priv->event_pndx != dp_priv->event_gndx)); + (dp_priv->event_pndx != dp_priv->event_gndx) || + kthread_should_stop()); } + + if (kthread_should_stop()) + break; + spin_lock_irqsave(&dp_priv->event_lock, flag); todo = &dp_priv->event_list[dp_priv->event_gndx]; if (todo->delay) { @@ -1062,12 +1128,17 @@ static int hpd_event_thread(void *data) return 0; } -static void dp_hpd_event_setup(struct dp_display_private *dp_priv) +static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv) { - init_waitqueue_head(&dp_priv->event_q); - spin_lock_init(&dp_priv->event_lock); + /* set event q to empty */ + dp_priv->event_gndx = 0; + dp_priv->event_pndx = 0; - kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler"); + dp_priv->ev_tsk = kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler"); + if (IS_ERR(dp_priv->ev_tsk)) + return PTR_ERR(dp_priv->ev_tsk); + + return 0; } static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) @@ -1091,7 +1162,7 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) } if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) { - /* delete connect pending event first */ + /* stop sentinel connect pending checking */ dp_del_event(dp, EV_CONNECT_PENDING_TIMEOUT); dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0); } @@ -1125,13 +1196,12 @@ int dp_display_request_irq(struct msm_dp *dp_display) dp = container_of(dp_display, struct dp_display_private, dp_display); dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0); - if (dp->irq < 0) { - rc = dp->irq; - DRM_ERROR("failed to get irq: %d\n", rc); - return rc; + if (!dp->irq) { + DRM_ERROR("failed to get irq\n"); + return -EINVAL; } - rc = devm_request_irq(&dp->pdev->dev, dp->irq, + rc = devm_request_irq(dp_display->drm_dev->dev, dp->irq, dp_display_irq_handler, IRQF_TRIGGER_HIGH, "dp_display_isr", dp); if (rc < 0) { @@ -1167,8 +1237,11 @@ static int dp_display_probe(struct platform_device *pdev) return -EPROBE_DEFER; } + /* setup event q */ mutex_init(&dp->event_mutex); g_dp_display = &dp->dp_display; + init_waitqueue_head(&dp->event_q); + spin_lock_init(&dp->event_lock); /* Store DP audio handle inside DP display */ g_dp_display->dp_audio = dp->audio; @@ -1220,15 +1293,12 @@ static int dp_pm_resume(struct device *dev) dp_catalog_ctrl_hpd_config(dp->catalog); - status = dp_catalog_hpd_get_state_status(dp->catalog); + status = dp_catalog_link_is_connected(dp->catalog); - if (status) { + if (status) dp->dp_display.is_connected = true; - } else { + else dp->dp_display.is_connected = false; - /* make sure next resume host_init be called */ - dp->core_initialized = false; - } mutex_unlock(&dp->event_mutex); @@ -1250,6 +1320,9 @@ static int dp_pm_suspend(struct device *dev) dp->hpd_state = ST_SUSPENDED; + /* host_init will be called at pm_resume */ + dp->core_initialized = false; + mutex_unlock(&dp->event_mutex); return 0; @@ -1308,8 +1381,6 @@ void msm_dp_irq_postinstall(struct msm_dp *dp_display) dp = container_of(dp_display, struct dp_display_private, dp_display); - dp_hpd_event_setup(dp); - dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 100); } @@ -1384,6 +1455,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) mutex_lock(&dp_display->event_mutex); + /* stop sentinel checking */ dp_del_event(dp_display, EV_CONNECT_PENDING_TIMEOUT); rc = dp_display_set_mode(dp, &dp_display->dp_mode); @@ -1402,7 +1474,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) state = dp_display->hpd_state; - if (state == ST_SUSPEND_PENDING) + if (state == ST_DISPLAY_OFF) dp_display_host_init(dp_display); dp_display_enable(dp_display, 0); @@ -1414,7 +1486,8 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) dp_display_unprepare(dp); } - if (state == ST_SUSPEND_PENDING) + /* manual kick off plug event to train link */ + if (state == ST_DISPLAY_OFF) dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0); /* completed connection */ @@ -1446,6 +1519,7 @@ int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder) mutex_lock(&dp_display->event_mutex); + /* stop sentinel checking */ dp_del_event(dp_display, EV_DISCONNECT_PENDING_TIMEOUT); dp_display_disable(dp_display, 0); @@ -1459,7 +1533,7 @@ int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder) /* completed disconnection */ dp_display->hpd_state = ST_DISCONNECTED; } else { - dp_display->hpd_state = ST_SUSPEND_PENDING; + dp_display->hpd_state = ST_DISPLAY_OFF; } mutex_unlock(&dp_display->event_mutex); diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 2768d1d306f0..4e8a19114e87 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -196,6 +196,11 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel, &panel->aux->ddc); if (!dp_panel->edid) { DRM_ERROR("panel edid read failed\n"); + /* check edid read fail is due to unplug */ + if (!dp_catalog_link_is_connected(panel->catalog)) { + rc = -ETIMEDOUT; + goto end; + } /* fail safe edid */ mutex_lock(&connector->dev->mode_config.mutex); diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index f845333593da..7377596a13f4 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -205,6 +205,12 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, return -EINVAL; priv = dev->dev_private; + + if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) { + DRM_DEV_ERROR(dev->dev, "too many bridges\n"); + return -ENOSPC; + } + msm_dsi->dev = dev; ret = msm_dsi_host_modeset_init(msm_dsi->host, dev); diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index d255bea87ca4..73f066ef6f40 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -117,7 +117,7 @@ static const char * const dsi_8996_bus_clk_names[] = { static const struct msm_dsi_config msm8996_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, .reg_cfg = { - .num = 2, + .num = 3, .regs = { {"vdda", 18160, 1 }, /* 1.25 V */ {"vcca", 17000, 32 }, /* 0.925 V */ @@ -156,7 +156,7 @@ static const char * const dsi_sdm660_bus_clk_names[] = { static const struct msm_dsi_config sdm660_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, .reg_cfg = { - .num = 2, + .num = 1, .regs = { {"vdda", 12560, 4 }, /* 1.2 V */ }, diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 64454a63bbac..51e8318cc8ff 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1371,10 +1371,10 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host, dsi_get_bpp(msm_host->format) / 8; len = dsi_cmd_dma_add(msm_host, msg); - if (!len) { + if (len < 0) { pr_err("%s: failed to add cmd type = 0x%x\n", __func__, msg->type); - return -EINVAL; + return len; } /* for video mode, do not send cmds more than @@ -1393,10 +1393,14 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host, } ret = dsi_cmd_dma_tx(msm_host, len); - if (ret < len) { - pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n", - __func__, msg->type, (*(u8 *)(msg->tx_buf)), len); - return -ECOMM; + if (ret < 0) { + pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d, ret=%d\n", + __func__, msg->type, (*(u8 *)(msg->tx_buf)), len, ret); + return ret; + } else if (ret < len) { + pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, ret=%d len=%d\n", + __func__, msg->type, (*(u8 *)(msg->tx_buf)), ret, len); + return -EIO; } return len; @@ -2139,9 +2143,12 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, } ret = dsi_cmds2buf_tx(msm_host, msg); - if (ret < msg->tx_len) { + if (ret < 0) { pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret); return ret; + } else if (ret < msg->tx_len) { + pr_err("%s: Read cmd Tx failed, too short: %d\n", __func__, ret); + return -ECOMM; } /* diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 1d28dfba2c9b..fb421ca56b3d 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -644,7 +644,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) return connector; fail: - connector->funcs->destroy(msm_dsi->connector); + connector->funcs->destroy(connector); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index e07986ab52c2..2e0be85ec394 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -345,7 +345,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, } else { timing->shared_timings.clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false); - timing->shared_timings.clk_pre_inc_by_2 = 0; + timing->shared_timings.clk_pre_inc_by_2 = 0; } timing->ta_go = 3; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 94f948ef279d..bd65dc9b8892 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -142,6 +142,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev) /* HDCP needs physical address of hdmi register */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, config->mmio_name); + if (!res) { + ret = -EINVAL; + goto fail; + } hdmi->mmio_phy_addr = res->start; hdmi->qfprom_mmio = msm_ioremap(pdev, @@ -289,6 +293,11 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, struct platform_device *pdev = hdmi->pdev; int ret; + if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) { + DRM_DEV_ERROR(dev->dev, "too many bridges\n"); + return -ENOSPC; + } + hdmi->dev = dev; hdmi->encoder = encoder; @@ -311,14 +320,14 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, } hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); - if (hdmi->irq < 0) { - ret = hdmi->irq; - DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret); + if (!hdmi->irq) { + ret = -EINVAL; + DRM_DEV_ERROR(dev->dev, "failed to get irq\n"); goto fail; } - ret = devm_request_irq(&pdev->dev, hdmi->irq, - msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + ret = devm_request_irq(dev->dev, hdmi->irq, + msm_hdmi_irq, IRQF_TRIGGER_HIGH, "hdmi_isr", hdmi); if (ret < 0) { DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n", diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index e37e5afc680a..087efcb1f34c 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 819567e40565..9c05bf6c4551 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -849,6 +849,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) get_pid_task(aspace->pid, PIDTYPE_PID); if (task) { comm = kstrdup(task->comm, GFP_KERNEL); + put_task_struct(task); } else { comm = NULL; } diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 515ef80816a0..8c64ce7288f1 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -17,7 +17,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) int npages = obj->size >> PAGE_SHIFT; if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */ - return NULL; + return ERR_PTR(-ENOMEM); return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages); } diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 50d881794758..2dcb7d1cff7c 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -58,7 +58,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, u64 addr = iova; unsigned int i; - for_each_sg(sgt->sgl, sg, sgt->nents, i) { + for_each_sgtable_sg(sgt, sg, i) { size_t size = sg->length; phys_addr_t phys = sg_phys(sg); diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index fea30e7aa9e8..084b6ae2a476 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -191,6 +191,9 @@ static int rd_open(struct inode *inode, struct file *file) file->private_data = rd; rd->open = true; + /* Reset fifo to clear any previously unread data: */ + rd->fifo.head = rd->fifo.tail = 0; + /* the parsing tools need to know gpu-id to know which * register database to load. */ diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h index 3d82b3c67dec..93f8f4f64578 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/atom.h +++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h @@ -160,14 +160,14 @@ nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc) static inline struct drm_encoder * nv50_head_atom_get_encoder(struct nv50_head_atom *atom) { - struct drm_encoder *encoder = NULL; + struct drm_encoder *encoder; /* We only ever have a single encoder */ drm_for_each_encoder_mask(encoder, atom->state.crtc->dev, atom->state.encoder_mask) - break; + return encoder; - return encoder; + return NULL; } #define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state) diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c index 66f32d965c72..5624a716e11c 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/crc.c +++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c @@ -411,9 +411,18 @@ void nv50_crc_atomic_check_outp(struct nv50_atom *atom) struct nv50_head_atom *armh = nv50_head_atom(old_crtc_state); struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state); struct nv50_outp_atom *outp_atom; - struct nouveau_encoder *outp = - nv50_real_outp(nv50_head_atom_get_encoder(armh)); - struct drm_encoder *encoder = &outp->base.base; + struct nouveau_encoder *outp; + struct drm_encoder *encoder, *enc; + + enc = nv50_head_atom_get_encoder(armh); + if (!enc) + continue; + + outp = nv50_real_outp(enc); + if (!outp) + continue; + + encoder = &outp->base.base; if (!asyh->clr.crc) continue; @@ -464,8 +473,16 @@ void nv50_crc_atomic_set(struct nv50_head *head, struct drm_device *dev = crtc->dev; struct nv50_crc *crc = &head->crc; const struct nv50_crc_func *func = nv50_disp(dev)->core->func->crc; - struct nouveau_encoder *outp = - nv50_real_outp(nv50_head_atom_get_encoder(asyh)); + struct nouveau_encoder *outp; + struct drm_encoder *encoder; + + encoder = nv50_head_atom_get_encoder(asyh); + if (!encoder) + return; + + outp = nv50_real_outp(encoder); + if (!outp) + return; func->set_src(head, outp->or, nv50_crc_source_type(outp, asyh->crc.src), diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index c7a94c94dbf3..f2f3280c3a50 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -51,8 +51,9 @@ static bool nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE], struct nouveau_backlight *bl) { - const int nb = ida_simple_get(&bl_ida, 0, 0, GFP_KERNEL); - if (nb < 0 || nb >= 100) + const int nb = ida_alloc_max(&bl_ida, 99, GFP_KERNEL); + + if (nb < 0) return false; if (nb > 0) snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb); @@ -280,7 +281,7 @@ nouveau_backlight_init(struct drm_connector *connector) nv_encoder, ops, &props); if (IS_ERR(bl->dev)) { if (bl->id >= 0) - ida_simple_remove(&bl_ida, bl->id); + ida_free(&bl_ida, bl->id); ret = PTR_ERR(bl->dev); goto fail_alloc; } @@ -306,7 +307,7 @@ nouveau_backlight_fini(struct drm_connector *connector) return; if (bl->id >= 0) - ida_simple_remove(&bl_ida, bl->id); + ida_free(&bl_ida, bl->id); backlight_device_unregister(bl->dev); nv_conn->backlight = NULL; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index b4946b595d86..b57dcad8865f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -279,8 +279,10 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, break; } - if (WARN_ON(pi < 0)) + if (WARN_ON(pi < 0)) { + kfree(nvbo); return ERR_PTR(-EINVAL); + } /* Disable compression if suitable settings couldn't be found. */ if (nvbo->comp && !vmm->page[pi].comp) { diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 4c992fd5bd68..9542fc63e796 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -500,7 +500,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector, connector->interlace_allowed = nv_encoder->caps.dp_interlace; else - connector->interlace_allowed = true; + connector->interlace_allowed = + drm->client.device.info.family < NV_DEVICE_INFO_V0_VOLTA; connector->doublescan_allowed = true; } else if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS || diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index f2ad6f49fb72..00128756dedb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -521,7 +521,7 @@ nouveau_display_hpd_work(struct work_struct *work) pm_runtime_mark_last_busy(drm->dev->dev); noop: - pm_runtime_put_sync(drm->dev->dev); + pm_runtime_put_autosuspend(dev->dev); } #ifdef CONFIG_ACPI @@ -543,7 +543,7 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val, * it's own hotplug events. */ pm_runtime_put_autosuspend(drm->dev->dev); - } else if (ret == 0) { + } else if (ret == 0 || ret == -EINPROGRESS) { /* We've started resuming the GPU already, so * it will handle scheduling a full reprobe * itself diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 92987daa5e17..5e72e6cb2f84 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -679,7 +679,11 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm, goto out_free_dma; for (i = 0; i < npages; i += max) { - args.end = start + (max << PAGE_SHIFT); + if (args.start + (max << PAGE_SHIFT) > end) + args.end = end; + else + args.end = args.start + (max << PAGE_SHIFT); + ret = migrate_vma_setup(&args); if (ret) goto out_free_pfns; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 24ec5339efb4..a3c86499ff77 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -464,7 +464,7 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work) if (state == FBINFO_STATE_RUNNING) { nouveau_fbcon_hotplug_resume(drm->fbcon); pm_runtime_mark_last_busy(drm->dev->dev); - pm_runtime_put_sync(drm->dev->dev); + pm_runtime_put_autosuspend(drm->dev->dev); } } diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index 5f5b87f99546..f08bda533bd9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -89,7 +89,6 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, ret = nouveau_bo_init(nvbo, size, align, NOUVEAU_GEM_DOMAIN_GART, sg, robj); if (ret) { - nouveau_bo_ref(NULL, &nvbo); obj = ERR_PTR(ret); goto unlock; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index d0d52c1d4aee..950a3de3e116 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -123,7 +123,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) mutex_init(&tdev->iommu.mutex); - if (iommu_present(&platform_bus_type)) { + if (device_iommu_mapped(dev)) { tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); if (!tdev->iommu.domain) goto error; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c index 8bff14ae16b0..f0368d9a0154 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c @@ -33,7 +33,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size) { u32 p = *addr; - if (*addr > bios->image0_size && bios->imaged_addr) { + if (*addr >= bios->image0_size && bios->imaged_addr) { *addr -= bios->image0_size; *addr += bios->imaged_addr; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c index dc184e857f85..5214fd0658b8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c @@ -135,10 +135,10 @@ nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate, list_for_each_entry_from_reverse(cstate, &pstate->list, head) { if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp)) - break; + return cstate; } - return cstate; + return NULL; } static struct nvkm_cstate * @@ -169,6 +169,8 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei) if (!list_empty(&pstate->list)) { cstate = nvkm_cstate_get(clk, pstate, cstatei); cstate = nvkm_cstate_find_best(clk, pstate, cstate); + if (!cstate) + return -EINVAL; } else { cstate = &pstate->base; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c index 7938722b4da1..d82529becfdc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c @@ -216,6 +216,7 @@ gm20b_pmu = { .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, .initmsg = gm20b_pmu_initmsg, + .reset = gf100_pmu_reset, }; #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c index 3dfb3e8522f6..9f32982216b6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c @@ -23,7 +23,7 @@ */ #include "priv.h" -static void +void gp102_pmu_reset(struct nvkm_pmu *pmu) { struct nvkm_device *device = pmu->subdev.device; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c index 7f5f9d544836..0bd4b32ad863 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c @@ -83,6 +83,7 @@ gp10b_pmu = { .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, .initmsg = gm20b_pmu_initmsg, + .reset = gp102_pmu_reset, }; #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h index b945ec320cd2..80c4cb861d40 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h @@ -41,6 +41,7 @@ int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32); bool gf100_pmu_enabled(struct nvkm_pmu *); void gf100_pmu_reset(struct nvkm_pmu *); +void gp102_pmu_reset(struct nvkm_pmu *pmu); void gk110_pmu_pgob(struct nvkm_pmu *, bool); diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 6ccbc29c4ce4..d5b3123ed081 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1173,6 +1173,7 @@ static void __dss_uninit_ports(struct dss_device *dss, unsigned int num_ports) default: break; } + of_node_put(port); } } @@ -1205,11 +1206,13 @@ static int dss_init_ports(struct dss_device *dss) default: break; } + of_node_put(port); } return 0; error: + of_node_put(port); __dss_uninit_ports(dss, i); return r; } diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c index bbdd086be7f5..4b92c6341490 100644 --- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c @@ -229,7 +229,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts, ret = i2c_smbus_write_byte_data(ts->i2c, reg, val); if (ret) - dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret); + dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret); } static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val) @@ -265,7 +265,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel) return 0; } -static int rpi_touchscreen_enable(struct drm_panel *panel) +static int rpi_touchscreen_prepare(struct drm_panel *panel) { struct rpi_touchscreen *ts = panel_to_ts(panel); int i; @@ -295,6 +295,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel) rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01); msleep(100); + return 0; +} + +static int rpi_touchscreen_enable(struct drm_panel *panel) +{ + struct rpi_touchscreen *ts = panel_to_ts(panel); + /* Turn on the backlight. */ rpi_touchscreen_i2c_write(ts, REG_PWM, 255); @@ -349,7 +356,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel, static const struct drm_panel_funcs rpi_touchscreen_funcs = { .disable = rpi_touchscreen_disable, .unprepare = rpi_touchscreen_noop, - .prepare = rpi_touchscreen_noop, + .prepare = rpi_touchscreen_prepare, .enable = rpi_touchscreen_enable, .get_modes = rpi_touchscreen_get_modes, }; diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 4e298108a62e..5f4ce178c290 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -964,7 +964,7 @@ static const struct drm_display_mode ampire_am_1280800n3tzqw_t00h_mode = { static const struct panel_desc ampire_am_1280800n3tzqw_t00h = { .modes = &ire_am_1280800n3tzqw_t00h_mode, .num_modes = 1, - .bpc = 6, + .bpc = 8, .size = { .width = 217, .height = 136, @@ -2432,6 +2432,7 @@ static const struct panel_desc innolux_g070y2_l01 = { .unprepare = 800, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, .connector_type = DRM_MODE_CONNECTOR_LVDS, }; @@ -2488,7 +2489,7 @@ static const struct panel_desc innolux_g121i1_l01 = { .enable = 200, .disable = 20, }, - .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, .connector_type = DRM_MODE_CONNECTOR_LVDS, }; @@ -2942,6 +2943,7 @@ static const struct display_timing logictechno_lt161010_2nh_timing = { static const struct panel_desc logictechno_lt161010_2nh = { .timings = &logictechno_lt161010_2nh_timing, .num_timings = 1, + .bpc = 6, .size = { .width = 154, .height = 86, @@ -2971,6 +2973,7 @@ static const struct display_timing logictechno_lt170410_2whc_timing = { static const struct panel_desc logictechno_lt170410_2whc = { .timings = &logictechno_lt170410_2whc_timing, .num_timings = 1, + .bpc = 8, .size = { .width = 217, .height = 136, diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index a70261809cdd..1dfc457bbefc 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -427,8 +427,8 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, if (args->retained) { if (args->madv == PANFROST_MADV_DONTNEED) - list_add_tail(&bo->base.madv_list, - &pfdev->shrinker_list); + list_move_tail(&bo->base.madv_list, + &pfdev->shrinker_list); else if (args->madv == PANFROST_MADV_WILLNEED) list_del_init(&bo->base.madv_list); } diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 7a854beef257..8f7af65a69e4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -484,7 +484,7 @@ err_map: err_pages: drm_gem_shmem_put_pages(&bo->base); err_bo: - drm_gem_object_put(&bo->base.base); + panfrost_gem_mapping_put(bomapping); return ret; } diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c index bdd883f4f0da..963a5d5e6987 100644 --- a/drivers/gpu/drm/pl111/pl111_versatile.c +++ b/drivers/gpu/drm/pl111/pl111_versatile.c @@ -402,6 +402,7 @@ static int pl111_vexpress_clcd_init(struct device *dev, struct device_node *np, if (of_device_is_compatible(child, "arm,pl111")) { has_coretile_clcd = true; ct_clcd = child; + of_node_put(child); break; } if (of_device_is_compatible(child, "arm,hdlcd")) { diff --git a/drivers/gpu/drm/radeon/.gitignore b/drivers/gpu/drm/radeon/.gitignore index 9c1a94153983..d8777383a64a 100644 --- a/drivers/gpu/drm/radeon/.gitignore +++ b/drivers/gpu/drm/radeon/.gitignore @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: MIT mkregtable *_reg_safe.h diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig index 6f60f4840cc5..52819e7f1fca 100644 --- a/drivers/gpu/drm/radeon/Kconfig +++ b/drivers/gpu/drm/radeon/Kconfig @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: MIT config DRM_RADEON_USERPTR bool "Always enable userptr support" depends on DRM_RADEON diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 11c97edde54d..3d502f1bbfcb 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: MIT # # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 59cdadcece15..a5218747742b 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -2740,10 +2740,10 @@ static int ni_set_mc_special_registers(struct radeon_device *rdev, table->mc_reg_table_entry[k].mc_data[j] |= 0x100; } j++; - if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE) - return -EINVAL; break; case MC_SEQ_RESERVE_M >> 2: + if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; temp_reg = RREG32(MC_PMG_CMD_MRS1); table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2; table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2; @@ -2752,8 +2752,6 @@ static int ni_set_mc_special_registers(struct radeon_device *rdev, (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); j++; - if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE) - return -EINVAL; break; default: break; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index e30834434442..ef111d460be2 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -473,6 +473,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode native_mode->vdisplay != 0 && native_mode->clock != 0) { mode = drm_mode_duplicate(dev, native_mode); + if (!mode) + return NULL; mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; drm_mode_set_name(mode); @@ -487,6 +489,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode * simpler. */ mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false); + if (!mode) + return NULL; mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name); } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 266e3cbbd09b..8287410f471f 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1623,6 +1623,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, if (r) { /* delay GPU reset to resume */ radeon_fence_driver_force_completion(rdev, i); + } else { + /* finish executing delayed work */ + flush_delayed_work(&rdev->fence_drv[i].lockup_work); } } diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index 3d489d9609f2..2c2d1aea7460 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -658,8 +658,10 @@ static int rockchip_dp_probe(struct platform_device *pdev) if (dp->data->split_mode && device_property_read_bool(dev, "split-mode")) { struct rockchip_dp_device *secondary = rockchip_dp_find_by_id(dev->driver, !dp->id); - if (!secondary) - return -EPROBE_DEFER; + if (!secondary) { + ret = -EPROBE_DEFER; + goto err_dp_remove; + } dp->plat_data.right = secondary->adp; dp->plat_data.split_mode = true; @@ -670,7 +672,15 @@ static int rockchip_dp_probe(struct platform_device *pdev) device_property_read_u32(dev, "min-refresh-rate", &dp->min_refresh_rate); device_property_read_u32(dev, "max-refresh-rate", &dp->max_refresh_rate); - return component_add(dev, &rockchip_dp_component_ops); + ret = component_add(dev, &rockchip_dp_component_ops); + if (ret) + goto err_dp_remove; + + return 0; + +err_dp_remove: + analogix_dp_remove(dp->adp); + return ret; } static int rockchip_dp_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index 4c0a795d5e22..25aed790a9f0 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -1480,5 +1480,11 @@ struct platform_driver dw_mipi_dsi_rockchip_driver = { .of_match_table = dw_mipi_dsi_rockchip_dt_ids, .pm = &dw_mipi_dsi_rockchip_pm_ops, .name = "dw-mipi-dsi-rockchip", + /* + * For dual-DSI display, one DSI pokes at the other DSI's + * drvdata in dw_mipi_dsi_rockchip_find_second(). This is not + * safe for asynchronous probe. + */ + .probe_type = PROBE_FORCE_SYNCHRONOUS, }, }; diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 62488ac14923..089c00a8e7d4 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -527,8 +527,8 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc) struct drm_device *ddev = crtc->dev; struct drm_connector_list_iter iter; struct drm_connector *connector = NULL; - struct drm_encoder *encoder = NULL; - struct drm_bridge *bridge = NULL; + struct drm_encoder *encoder = NULL, *en_iter; + struct drm_bridge *bridge = NULL, *br_iter; struct drm_display_mode *mode = &crtc->state->adjusted_mode; struct videomode vm; u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h; @@ -538,15 +538,19 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc) int ret; /* get encoder from crtc */ - drm_for_each_encoder(encoder, ddev) - if (encoder->crtc == crtc) + drm_for_each_encoder(en_iter, ddev) + if (en_iter->crtc == crtc) { + encoder = en_iter; break; + } if (encoder) { /* get bridge from encoder */ - list_for_each_entry(bridge, &encoder->bridge_chain, chain_node) - if (bridge->encoder == encoder) + list_for_each_entry(br_iter, &encoder->bridge_chain, chain_node) + if (br_iter->encoder == encoder) { + bridge = br_iter; break; + } /* Get the connector from encoder */ drm_connector_list_iter_begin(ddev, &iter); diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 29861fc81b35..c5912fd53772 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -71,7 +71,6 @@ static int sun4i_drv_bind(struct device *dev) goto free_drm; } - dev_set_drvdata(dev, drm); drm->dev_private = drv; INIT_LIST_HEAD(&drv->frontend_list); INIT_LIST_HEAD(&drv->engine_list); @@ -112,6 +111,8 @@ static int sun4i_drv_bind(struct device *dev) drm_fbdev_generic_setup(drm, 32); + dev_set_drvdata(dev, drm); + return 0; finish_poll: @@ -128,6 +129,7 @@ static void sun4i_drv_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); + dev_set_drvdata(dev, NULL); drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); drm_atomic_helper_shutdown(drm); diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index 4f5efcace68e..51edb4244af7 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -531,7 +531,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi, struct drm_display_mode *mode) { struct mipi_dsi_device *device = dsi->device; - unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8; + int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8; u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0; u32 basic_ctl = 0; size_t bytes; @@ -555,7 +555,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi, * (4 bytes). Its minimal size is therefore 10 bytes */ #define HSA_PACKET_OVERHEAD 10 - hsa = max((unsigned int)HSA_PACKET_OVERHEAD, + hsa = max(HSA_PACKET_OVERHEAD, (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD); /* @@ -564,7 +564,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi, * therefore 6 bytes */ #define HBP_PACKET_OVERHEAD 6 - hbp = max((unsigned int)HBP_PACKET_OVERHEAD, + hbp = max(HBP_PACKET_OVERHEAD, (mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD); /* @@ -574,7 +574,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi, * 16 bytes */ #define HFP_PACKET_OVERHEAD 16 - hfp = max((unsigned int)HFP_PACKET_OVERHEAD, + hfp = max(HFP_PACKET_OVERHEAD, (mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD); /* @@ -583,7 +583,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi, * bytes). Its minimal size is therefore 10 bytes. */ #define HBLK_PACKET_OVERHEAD 10 - hblk = max((unsigned int)HBLK_PACKET_OVERHEAD, + hblk = max(HBLK_PACKET_OVERHEAD, (mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp - HBLK_PACKET_OVERHEAD); diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 2c6ebc328b24..318692ad9680 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1042,6 +1042,10 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev) struct host1x *host1x = dev_get_drvdata(dev->dev.parent); struct iommu_domain *domain; + /* Our IOMMU usage policy doesn't currently play well with GART */ + if (of_machine_is_compatible("nvidia,tegra20")) + return false; + /* * If the Tegra DRM clients are backed by an IOMMU, push buffers are * likely to be allocated beyond the 32-bit boundary if sufficient diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c index b177525588c1..8ece0dd0b469 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_external.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c @@ -60,11 +60,13 @@ struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev, int tilcdc_add_component_encoder(struct drm_device *ddev) { struct tilcdc_drm_private *priv = ddev->dev_private; - struct drm_encoder *encoder; + struct drm_encoder *encoder = NULL, *iter; - list_for_each_entry(encoder, &ddev->mode_config.encoder_list, head) - if (encoder->possible_crtcs & (1 << priv->crtc->index)) + list_for_each_entry(iter, &ddev->mode_config.encoder_list, head) + if (iter->possible_crtcs & (1 << priv->crtc->index)) { + encoder = iter; break; + } if (!encoder) { dev_err(ddev->dev, "%s: No suitable encoder found\n", __func__); diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c index c0bc2a18edde..9d0c127bdb0c 100644 --- a/drivers/gpu/drm/tiny/st7735r.c +++ b/drivers/gpu/drm/tiny/st7735r.c @@ -175,6 +175,7 @@ MODULE_DEVICE_TABLE(of, st7735r_of_match); static const struct spi_device_id st7735r_id[] = { { "jd-t18003-t01", (uintptr_t)&jd_t18003_t01_cfg }, + { "rh128128t", (uintptr_t)&rh128128t_cfg }, { }, }; MODULE_DEVICE_TABLE(spi, st7735r_id); diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index edcfd8c120c4..209b5ceba3e0 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -400,9 +400,6 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe, udl_handle_damage(fb, 0, 0, fb->width, fb->height); - if (!crtc_state->mode_changed) - return; - /* enable display */ udl_crtc_write_mode_to_hw(crtc); } diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index f4ccca922e44..79724fddfb4b 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -319,7 +319,8 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc) u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1; bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 || vc4_encoder->type == VC4_ENCODER_TYPE_DSI1); - u32 format = is_dsi ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24; + bool is_dsi1 = vc4_encoder->type == VC4_ENCODER_TYPE_DSI1; + u32 format = is_dsi1 ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24; u8 ppc = pv_data->pixels_per_clock; bool debug_dump_regs = false; @@ -345,7 +346,8 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc) PV_HORZB_HACTIVE)); CRTC_WRITE(PV_VERTA, - VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end, + VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end + + interlace, PV_VERTA_VBP) | VC4_SET_FIELD(mode->crtc_vsync_end - mode->crtc_vsync_start, PV_VERTA_VSYNC)); @@ -357,7 +359,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc) if (interlace) { CRTC_WRITE(PV_VERTA_EVEN, VC4_SET_FIELD(mode->crtc_vtotal - - mode->crtc_vsync_end - 1, + mode->crtc_vsync_end, PV_VERTA_VBP) | VC4_SET_FIELD(mode->crtc_vsync_end - mode->crtc_vsync_start, @@ -377,7 +379,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc) PV_VCONTROL_CONTINUOUS | (is_dsi ? PV_VCONTROL_DSI : 0) | PV_VCONTROL_INTERLACE | - VC4_SET_FIELD(mode->htotal * pixel_rep / 2, + VC4_SET_FIELD(mode->htotal * pixel_rep / (2 * ppc), PV_VCONTROL_ODD_DELAY)); CRTC_WRITE(PV_VSYNCD_EVEN, 0); } else { diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 839610f8092a..888aec1bbeee 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -246,6 +246,15 @@ static void vc4_match_add_drivers(struct device *dev, } } +static const struct of_device_id vc4_dma_range_matches[] = { + { .compatible = "brcm,bcm2711-hvs" }, + { .compatible = "brcm,bcm2835-hvs" }, + { .compatible = "brcm,bcm2835-v3d" }, + { .compatible = "brcm,cygnus-v3d" }, + { .compatible = "brcm,vc4-v3d" }, + {} +}; + static int vc4_drm_bind(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -263,6 +272,16 @@ static int vc4_drm_bind(struct device *dev) vc4_drm_driver.driver_features &= ~DRIVER_RENDER; of_node_put(node); + node = of_find_matching_node_and_match(NULL, vc4_dma_range_matches, + NULL); + if (node) { + ret = of_dma_configure(dev, node, true); + of_node_put(node); + + if (ret) + return ret; + } + vc4 = devm_drm_dev_alloc(dev, &vc4_drm_driver, struct vc4_dev, base); if (IS_ERR(vc4)) return PTR_ERR(vc4); @@ -385,7 +404,12 @@ static int __init vc4_drm_register(void) if (ret) return ret; - return platform_driver_register(&vc4_platform_driver); + ret = platform_driver_register(&vc4_platform_driver); + if (ret) + platform_unregister_drivers(component_drivers, + ARRAY_SIZE(component_drivers)); + + return ret; } static void __exit vc4_drm_unregister(void) diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 9809c3a856c6..921463625d82 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -77,7 +77,6 @@ struct vc4_dev { struct vc4_hvs *hvs; struct vc4_v3d *v3d; struct vc4_dpi *dpi; - struct vc4_dsi *dsi1; struct vc4_vec *vec; struct vc4_txp *txp; diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index eaf276978ee7..0bda40c2d787 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -181,8 +181,50 @@ #define DSI0_TXPKT_PIX_FIFO 0x20 /* AKA PIX_FIFO */ -#define DSI0_INT_STAT 0x24 -#define DSI0_INT_EN 0x28 +#define DSI0_INT_STAT 0x24 +#define DSI0_INT_EN 0x28 +# define DSI0_INT_FIFO_ERR BIT(25) +# define DSI0_INT_CMDC_DONE_MASK VC4_MASK(24, 23) +# define DSI0_INT_CMDC_DONE_SHIFT 23 +# define DSI0_INT_CMDC_DONE_NO_REPEAT 1 +# define DSI0_INT_CMDC_DONE_REPEAT 3 +# define DSI0_INT_PHY_DIR_RTF BIT(22) +# define DSI0_INT_PHY_D1_ULPS BIT(21) +# define DSI0_INT_PHY_D1_STOP BIT(20) +# define DSI0_INT_PHY_RXLPDT BIT(19) +# define DSI0_INT_PHY_RXTRIG BIT(18) +# define DSI0_INT_PHY_D0_ULPS BIT(17) +# define DSI0_INT_PHY_D0_LPDT BIT(16) +# define DSI0_INT_PHY_D0_FTR BIT(15) +# define DSI0_INT_PHY_D0_STOP BIT(14) +/* Signaled when the clock lane enters the given state. */ +# define DSI0_INT_PHY_CLK_ULPS BIT(13) +# define DSI0_INT_PHY_CLK_HS BIT(12) +# define DSI0_INT_PHY_CLK_FTR BIT(11) +/* Signaled on timeouts */ +# define DSI0_INT_PR_TO BIT(10) +# define DSI0_INT_TA_TO BIT(9) +# define DSI0_INT_LPRX_TO BIT(8) +# define DSI0_INT_HSTX_TO BIT(7) +/* Contention on a line when trying to drive the line low */ +# define DSI0_INT_ERR_CONT_LP1 BIT(6) +# define DSI0_INT_ERR_CONT_LP0 BIT(5) +/* Control error: incorrect line state sequence on data lane 0. */ +# define DSI0_INT_ERR_CONTROL BIT(4) +# define DSI0_INT_ERR_SYNC_ESC BIT(3) +# define DSI0_INT_RX2_PKT BIT(2) +# define DSI0_INT_RX1_PKT BIT(1) +# define DSI0_INT_CMD_PKT BIT(0) + +#define DSI0_INTERRUPTS_ALWAYS_ENABLED (DSI0_INT_ERR_SYNC_ESC | \ + DSI0_INT_ERR_CONTROL | \ + DSI0_INT_ERR_CONT_LP0 | \ + DSI0_INT_ERR_CONT_LP1 | \ + DSI0_INT_HSTX_TO | \ + DSI0_INT_LPRX_TO | \ + DSI0_INT_TA_TO | \ + DSI0_INT_PR_TO) + # define DSI1_INT_PHY_D3_ULPS BIT(30) # define DSI1_INT_PHY_D3_STOP BIT(29) # define DSI1_INT_PHY_D2_ULPS BIT(28) @@ -493,6 +535,18 @@ */ #define DSI1_ID 0x8c +struct vc4_dsi_variant { + /* Whether we're on bcm2835's DSI0 or DSI1. */ + unsigned int port; + + bool broken_axi_workaround; + + const char *debugfs_name; + const struct debugfs_reg32 *regs; + size_t nregs; + +}; + /* General DSI hardware state. */ struct vc4_dsi { struct platform_device *pdev; @@ -509,8 +563,7 @@ struct vc4_dsi { u32 *reg_dma_mem; dma_addr_t reg_paddr; - /* Whether we're on bcm2835's DSI0 or DSI1. */ - int port; + const struct vc4_dsi_variant *variant; /* DSI channel for the panel we're connected to. */ u32 channel; @@ -586,10 +639,10 @@ dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val) #define DSI_READ(offset) readl(dsi->regs + (offset)) #define DSI_WRITE(offset, val) dsi_dma_workaround_write(dsi, offset, val) #define DSI_PORT_READ(offset) \ - DSI_READ(dsi->port ? DSI1_##offset : DSI0_##offset) + DSI_READ(dsi->variant->port ? DSI1_##offset : DSI0_##offset) #define DSI_PORT_WRITE(offset, val) \ - DSI_WRITE(dsi->port ? DSI1_##offset : DSI0_##offset, val) -#define DSI_PORT_BIT(bit) (dsi->port ? DSI1_##bit : DSI0_##bit) + DSI_WRITE(dsi->variant->port ? DSI1_##offset : DSI0_##offset, val) +#define DSI_PORT_BIT(bit) (dsi->variant->port ? DSI1_##bit : DSI0_##bit) /* VC4 DSI encoder KMS struct */ struct vc4_dsi_encoder { @@ -750,6 +803,9 @@ static void vc4_dsi_encoder_disable(struct drm_encoder *encoder) list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) { if (iter->funcs->disable) iter->funcs->disable(iter); + + if (iter == dsi->bridge) + break; } vc4_dsi_ulps(dsi, true); @@ -794,11 +850,9 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder, /* Find what divider gets us a faster clock than the requested * pixel clock. */ - for (divider = 1; divider < 8; divider++) { - if (parent_rate / divider < pll_clock) { - divider--; + for (divider = 1; divider < 255; divider++) { + if (parent_rate / (divider + 1) < pll_clock) break; - } } /* Now that we've picked a PLL divider, calculate back to its @@ -835,9 +889,9 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder) unsigned long phy_clock; int ret; - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret) { - DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port); + DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port); return; } @@ -871,7 +925,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder) DSI_PORT_WRITE(STAT, DSI_PORT_READ(STAT)); /* Set AFE CTR00/CTR1 to release powerdown of analog. */ - if (dsi->port == 0) { + if (dsi->variant->port == 0) { u32 afec0 = (VC4_SET_FIELD(7, DSI_PHY_AFEC0_PTATADJ) | VC4_SET_FIELD(7, DSI_PHY_AFEC0_CTATADJ)); @@ -883,6 +937,9 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder) DSI_PORT_WRITE(PHY_AFEC0, afec0); + /* AFEC reset hold time */ + mdelay(1); + DSI_PORT_WRITE(PHY_AFEC1, VC4_SET_FIELD(6, DSI0_PHY_AFEC1_IDR_DLANE1) | VC4_SET_FIELD(6, DSI0_PHY_AFEC1_IDR_DLANE0) | @@ -1017,7 +1074,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder) DSI_PORT_BIT(PHYC_CLANE_ENABLE) | ((dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ? 0 : DSI_PORT_BIT(PHYC_HS_CLK_CONTINUOUS)) | - (dsi->port == 0 ? + (dsi->variant->port == 0 ? VC4_SET_FIELD(lpx - 1, DSI0_PHYC_ESC_CLK_LPDT) : VC4_SET_FIELD(lpx - 1, DSI1_PHYC_ESC_CLK_LPDT))); @@ -1043,18 +1100,15 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder) DSI_DISP1_ENABLE); /* Ungate the block. */ - if (dsi->port == 0) + if (dsi->variant->port == 0) DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI0_CTRL_CTRL0); else DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI1_CTRL_EN); /* Bring AFE out of reset. */ - if (dsi->port == 0) { - } else { - DSI_PORT_WRITE(PHY_AFEC0, - DSI_PORT_READ(PHY_AFEC0) & - ~DSI1_PHY_AFEC0_RESET); - } + DSI_PORT_WRITE(PHY_AFEC0, + DSI_PORT_READ(PHY_AFEC0) & + ~DSI_PORT_BIT(PHY_AFEC0_RESET)); vc4_dsi_ulps(dsi, false); @@ -1173,13 +1227,28 @@ static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host, /* Enable the appropriate interrupt for the transfer completion. */ dsi->xfer_result = 0; reinit_completion(&dsi->xfer_completion); - DSI_PORT_WRITE(INT_STAT, DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF); - if (msg->rx_len) { - DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED | - DSI1_INT_PHY_DIR_RTF)); + if (dsi->variant->port == 0) { + DSI_PORT_WRITE(INT_STAT, + DSI0_INT_CMDC_DONE_MASK | DSI1_INT_PHY_DIR_RTF); + if (msg->rx_len) { + DSI_PORT_WRITE(INT_EN, (DSI0_INTERRUPTS_ALWAYS_ENABLED | + DSI0_INT_PHY_DIR_RTF)); + } else { + DSI_PORT_WRITE(INT_EN, + (DSI0_INTERRUPTS_ALWAYS_ENABLED | + VC4_SET_FIELD(DSI0_INT_CMDC_DONE_NO_REPEAT, + DSI0_INT_CMDC_DONE))); + } } else { - DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED | - DSI1_INT_TXPKT1_DONE)); + DSI_PORT_WRITE(INT_STAT, + DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF); + if (msg->rx_len) { + DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED | + DSI1_INT_PHY_DIR_RTF)); + } else { + DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED | + DSI1_INT_TXPKT1_DONE)); + } } /* Send the packet. */ @@ -1196,7 +1265,7 @@ static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host, ret = dsi->xfer_result; } - DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED); + DSI_PORT_WRITE(INT_EN, DSI_PORT_BIT(INTERRUPTS_ALWAYS_ENABLED)); if (ret) goto reset_fifo_and_return; @@ -1242,7 +1311,7 @@ reset_fifo_and_return: DSI_PORT_BIT(CTRL_RESET_FIFOS)); DSI_PORT_WRITE(TXPKT1C, 0); - DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED); + DSI_PORT_WRITE(INT_EN, DSI_PORT_BIT(INTERRUPTS_ALWAYS_ENABLED)); return ret; } @@ -1305,8 +1374,16 @@ static const struct drm_encoder_helper_funcs vc4_dsi_encoder_helper_funcs = { .mode_fixup = vc4_dsi_encoder_mode_fixup, }; +static const struct vc4_dsi_variant bcm2835_dsi1_variant = { + .port = 1, + .broken_axi_workaround = true, + .debugfs_name = "dsi1_regs", + .regs = dsi1_regs, + .nregs = ARRAY_SIZE(dsi1_regs), +}; + static const struct of_device_id vc4_dsi_dt_match[] = { - { .compatible = "brcm,bcm2835-dsi1", (void *)(uintptr_t)1 }, + { .compatible = "brcm,bcm2835-dsi1", &bcm2835_dsi1_variant }, {} }; @@ -1317,7 +1394,7 @@ static void dsi_handle_error(struct vc4_dsi *dsi, if (!(stat & bit)) return; - DRM_ERROR("DSI%d: %s error\n", dsi->port, type); + DRM_ERROR("DSI%d: %s error\n", dsi->variant->port, type); *ret = IRQ_HANDLED; } @@ -1351,26 +1428,28 @@ static irqreturn_t vc4_dsi_irq_handler(int irq, void *data) DSI_PORT_WRITE(INT_STAT, stat); dsi_handle_error(dsi, &ret, stat, - DSI1_INT_ERR_SYNC_ESC, "LPDT sync"); + DSI_PORT_BIT(INT_ERR_SYNC_ESC), "LPDT sync"); dsi_handle_error(dsi, &ret, stat, - DSI1_INT_ERR_CONTROL, "data lane 0 sequence"); + DSI_PORT_BIT(INT_ERR_CONTROL), "data lane 0 sequence"); dsi_handle_error(dsi, &ret, stat, - DSI1_INT_ERR_CONT_LP0, "LP0 contention"); + DSI_PORT_BIT(INT_ERR_CONT_LP0), "LP0 contention"); dsi_handle_error(dsi, &ret, stat, - DSI1_INT_ERR_CONT_LP1, "LP1 contention"); + DSI_PORT_BIT(INT_ERR_CONT_LP1), "LP1 contention"); dsi_handle_error(dsi, &ret, stat, - DSI1_INT_HSTX_TO, "HSTX timeout"); + DSI_PORT_BIT(INT_HSTX_TO), "HSTX timeout"); dsi_handle_error(dsi, &ret, stat, - DSI1_INT_LPRX_TO, "LPRX timeout"); + DSI_PORT_BIT(INT_LPRX_TO), "LPRX timeout"); dsi_handle_error(dsi, &ret, stat, - DSI1_INT_TA_TO, "turnaround timeout"); + DSI_PORT_BIT(INT_TA_TO), "turnaround timeout"); dsi_handle_error(dsi, &ret, stat, - DSI1_INT_PR_TO, "peripheral reset timeout"); + DSI_PORT_BIT(INT_PR_TO), "peripheral reset timeout"); - if (stat & (DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF)) { + if (stat & ((dsi->variant->port ? DSI1_INT_TXPKT1_DONE : + DSI0_INT_CMDC_DONE_MASK) | + DSI_PORT_BIT(INT_PHY_DIR_RTF))) { complete(&dsi->xfer_completion); ret = IRQ_HANDLED; - } else if (stat & DSI1_INT_HSTX_TO) { + } else if (stat & DSI_PORT_BIT(INT_HSTX_TO)) { complete(&dsi->xfer_completion); dsi->xfer_result = -ETIMEDOUT; ret = IRQ_HANDLED; @@ -1390,12 +1469,12 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi) struct device *dev = &dsi->pdev->dev; const char *parent_name = __clk_get_name(dsi->pll_phy_clock); static const struct { - const char *dsi0_name, *dsi1_name; + const char *name; int div; } phy_clocks[] = { - { "dsi0_byte", "dsi1_byte", 8 }, - { "dsi0_ddr2", "dsi1_ddr2", 4 }, - { "dsi0_ddr", "dsi1_ddr", 2 }, + { "byte", 8 }, + { "ddr2", 4 }, + { "ddr", 2 }, }; int i; @@ -1411,8 +1490,12 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi) for (i = 0; i < ARRAY_SIZE(phy_clocks); i++) { struct clk_fixed_factor *fix = &dsi->phy_clocks[i]; struct clk_init_data init; + char clk_name[16]; int ret; + snprintf(clk_name, sizeof(clk_name), + "dsi%u_%s", dsi->variant->port, phy_clocks[i].name); + /* We just use core fixed factor clock ops for the PHY * clocks. The clocks are actually gated by the * PHY_AFEC0_DDRCLK_EN bits, which we should be @@ -1429,10 +1512,7 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi) memset(&init, 0, sizeof(init)); init.parent_names = &parent_name; init.num_parents = 1; - if (dsi->port == 1) - init.name = phy_clocks[i].dsi1_name; - else - init.name = phy_clocks[i].dsi0_name; + init.name = clk_name; init.ops = &clk_fixed_factor_ops; ret = devm_clk_hw_register(dev, &fix->hw); @@ -1451,7 +1531,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = dev_get_drvdata(master); - struct vc4_dev *vc4 = to_vc4_dev(drm); struct vc4_dsi *dsi = dev_get_drvdata(dev); struct vc4_dsi_encoder *vc4_dsi_encoder; struct drm_panel *panel; @@ -1463,7 +1542,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) if (!match) return -ENODEV; - dsi->port = (uintptr_t)match->data; + dsi->variant = match->data; vc4_dsi_encoder = devm_kzalloc(dev, sizeof(*vc4_dsi_encoder), GFP_KERNEL); @@ -1471,7 +1550,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) return -ENOMEM; INIT_LIST_HEAD(&dsi->bridge_chain); - vc4_dsi_encoder->base.type = VC4_ENCODER_TYPE_DSI1; + vc4_dsi_encoder->base.type = dsi->variant->port ? + VC4_ENCODER_TYPE_DSI1 : VC4_ENCODER_TYPE_DSI0; vc4_dsi_encoder->dsi = dsi; dsi->encoder = &vc4_dsi_encoder->base.base; @@ -1480,13 +1560,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(dsi->regs); dsi->regset.base = dsi->regs; - if (dsi->port == 0) { - dsi->regset.regs = dsi0_regs; - dsi->regset.nregs = ARRAY_SIZE(dsi0_regs); - } else { - dsi->regset.regs = dsi1_regs; - dsi->regset.nregs = ARRAY_SIZE(dsi1_regs); - } + dsi->regset.regs = dsi->variant->regs; + dsi->regset.nregs = dsi->variant->nregs; if (DSI_PORT_READ(ID) != DSI_ID_VALUE) { dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n", @@ -1498,7 +1573,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) * from the ARM. It does handle writes from the DMA engine, * so set up a channel for talking to it. */ - if (dsi->port == 1) { + if (dsi->variant->broken_axi_workaround) { dsi->reg_dma_mem = dma_alloc_coherent(dev, 4, &dsi->reg_dma_paddr, GFP_KERNEL); @@ -1604,9 +1679,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) if (ret) return ret; - if (dsi->port == 1) - vc4->dsi1 = dsi; - drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI); drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs); @@ -1622,10 +1694,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) */ list_splice_init(&dsi->encoder->bridge_chain, &dsi->bridge_chain); - if (dsi->port == 0) - vc4_debugfs_add_regset32(drm, "dsi0_regs", &dsi->regset); - else - vc4_debugfs_add_regset32(drm, "dsi1_regs", &dsi->regset); + vc4_debugfs_add_regset32(drm, dsi->variant->debugfs_name, &dsi->regset); pm_runtime_enable(dev); @@ -1635,8 +1704,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) static void vc4_dsi_unbind(struct device *dev, struct device *master, void *data) { - struct drm_device *drm = dev_get_drvdata(master); - struct vc4_dev *vc4 = to_vc4_dev(drm); struct vc4_dsi *dsi = dev_get_drvdata(dev); if (dsi->bridge) @@ -1648,9 +1715,6 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master, */ list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain); drm_encoder_cleanup(dsi->encoder); - - if (dsi->port == 1) - vc4->dsi1 = NULL; } static const struct component_ops vc4_dsi_ops = { diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index a308f2d05d17..08175c3dd374 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -83,6 +83,8 @@ #define CEC_CLOCK_FREQ 40000 #define VC4_HSM_MID_CLOCK 149985000 +#define HDMI_14_MAX_TMDS_CLK (340 * 1000 * 1000) + static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *)m->private; @@ -209,7 +211,9 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) static void vc4_hdmi_connector_reset(struct drm_connector *connector) { drm_atomic_helper_connector_reset(connector); - drm_atomic_helper_connector_tv_reset(connector); + + if (connector->state) + drm_atomic_helper_connector_tv_reset(connector); } static const struct drm_connector_funcs vc4_hdmi_connector_funcs = { @@ -518,12 +522,12 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, VC4_HDMI_VERTA_VFP) | VC4_SET_FIELD(mode->crtc_vdisplay, VC4_HDMI_VERTA_VAL)); u32 vertb = (VC4_SET_FIELD(0, VC4_HDMI_VERTB_VSPO) | - VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end, + VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end + + interlaced, VC4_HDMI_VERTB_VBP)); u32 vertb_even = (VC4_SET_FIELD(0, VC4_HDMI_VERTB_VSPO) | VC4_SET_FIELD(mode->crtc_vtotal - - mode->crtc_vsync_end - - interlaced, + mode->crtc_vsync_end, VC4_HDMI_VERTB_VBP)); HDMI_WRITE(HDMI_HORZA, @@ -561,13 +565,13 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, VC4_SET_FIELD(mode->crtc_vsync_start - mode->crtc_vdisplay, VC5_HDMI_VERTA_VFP) | VC4_SET_FIELD(mode->crtc_vdisplay, VC5_HDMI_VERTA_VAL)); - u32 vertb = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) | + u32 vertb = (VC4_SET_FIELD(mode->htotal >> (2 - pixel_rep), + VC5_HDMI_VERTB_VSPO) | VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end, VC4_HDMI_VERTB_VBP)); u32 vertb_even = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) | VC4_SET_FIELD(mode->crtc_vtotal - - mode->crtc_vsync_end - - interlaced, + mode->crtc_vsync_end - interlaced, VC4_HDMI_VERTB_VBP)); HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021); @@ -1031,22 +1035,12 @@ static int vc4_hdmi_audio_hw_params(struct snd_pcm_substream *substream, audio_packet_config |= VC4_SET_FIELD(channel_mask, VC4_HDMI_AUDIO_PACKET_CEA_MASK); - /* Set the MAI threshold. This logic mimics the firmware's. */ - if (vc4_hdmi->audio.samplerate > 96000) { - HDMI_WRITE(HDMI_MAI_THR, - VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQHIGH) | - VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW)); - } else if (vc4_hdmi->audio.samplerate > 48000) { - HDMI_WRITE(HDMI_MAI_THR, - VC4_SET_FIELD(0x14, VC4_HD_MAI_THR_DREQHIGH) | - VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW)); - } else { - HDMI_WRITE(HDMI_MAI_THR, - VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) | - VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) | - VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQHIGH) | - VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQLOW)); - } + /* Set the MAI threshold */ + HDMI_WRITE(HDMI_MAI_THR, + VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICHIGH) | + VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICLOW) | + VC4_SET_FIELD(0x06, VC4_HD_MAI_THR_DREQHIGH) | + VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_DREQLOW)); HDMI_WRITE(HDMI_MAI_CONFIG, VC4_HDMI_MAI_CONFIG_BIT_REVERSE | @@ -1231,12 +1225,12 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) struct snd_soc_card *card = &vc4_hdmi->audio.card; struct device *dev = &vc4_hdmi->pdev->dev; const __be32 *addr; - int index; + int index, len; int ret; - if (!of_find_property(dev->of_node, "dmas", NULL)) { + if (!of_find_property(dev->of_node, "dmas", &len) || !len) { dev_warn(dev, - "'dmas' DT property is missing, no HDMI audio\n"); + "'dmas' DT property is missing or empty, no HDMI audio\n"); return 0; } @@ -1947,7 +1941,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = { .encoder_type = VC4_ENCODER_TYPE_HDMI0, .debugfs_name = "hdmi0_regs", .card_name = "vc4-hdmi-0", - .max_pixel_clock = 297000000, + .max_pixel_clock = HDMI_14_MAX_TMDS_CLK, .registers = vc5_hdmi_hdmi0_fields, .num_registers = ARRAY_SIZE(vc5_hdmi_hdmi0_fields), .phy_lane_mapping = { @@ -1973,7 +1967,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = { .encoder_type = VC4_ENCODER_TYPE_HDMI1, .debugfs_name = "hdmi1_regs", .card_name = "vc4-hdmi-1", - .max_pixel_clock = 297000000, + .max_pixel_clock = HDMI_14_MAX_TMDS_CLK, .registers = vc5_hdmi_hdmi1_fields, .num_registers = ARRAY_SIZE(vc5_hdmi_hdmi1_fields), .phy_lane_mapping = { diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index ad691571d759..95fa6fc052a7 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -564,6 +564,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) struct vc4_hvs *hvs = NULL; int ret; u32 dispctrl; + u32 reg; hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL); if (!hvs) @@ -635,6 +636,26 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) vc4->hvs = hvs; + reg = HVS_READ(SCALER_DISPECTRL); + reg &= ~SCALER_DISPECTRL_DSP2_MUX_MASK; + HVS_WRITE(SCALER_DISPECTRL, + reg | VC4_SET_FIELD(0, SCALER_DISPECTRL_DSP2_MUX)); + + reg = HVS_READ(SCALER_DISPCTRL); + reg &= ~SCALER_DISPCTRL_DSP3_MUX_MASK; + HVS_WRITE(SCALER_DISPCTRL, + reg | VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX)); + + reg = HVS_READ(SCALER_DISPEOLN); + reg &= ~SCALER_DISPEOLN_DSP4_MUX_MASK; + HVS_WRITE(SCALER_DISPEOLN, + reg | VC4_SET_FIELD(3, SCALER_DISPEOLN_DSP4_MUX)); + + reg = HVS_READ(SCALER_DISPDITHER); + reg &= ~SCALER_DISPDITHER_DSP5_MUX_MASK; + HVS_WRITE(SCALER_DISPDITHER, + reg | VC4_SET_FIELD(3, SCALER_DISPDITHER_DSP5_MUX)); + dispctrl = HVS_READ(SCALER_DISPCTRL); dispctrl |= SCALER_DISPCTRL_ENABLE; @@ -642,10 +663,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) SCALER_DISPCTRL_DISPEIRQ(1) | SCALER_DISPCTRL_DISPEIRQ(2); - /* Set DSP3 (PV1) to use HVS channel 2, which would otherwise - * be unused. - */ - dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK; dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ | SCALER_DISPCTRL_SLVWREIRQ | SCALER_DISPCTRL_SLVRDEIRQ | @@ -659,7 +676,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) SCALER_DISPCTRL_DSPEISLUR(1) | SCALER_DISPCTRL_DSPEISLUR(2) | SCALER_DISPCTRL_SCLEIRQ); - dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); HVS_WRITE(SCALER_DISPCTRL, dispctrl); diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index af4b8944a603..4df222a83049 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -303,16 +303,16 @@ static int vc4_plane_margins_adj(struct drm_plane_state *pstate) adjhdisplay, crtc_state->mode.hdisplay); vc4_pstate->crtc_x += left; - if (vc4_pstate->crtc_x > crtc_state->mode.hdisplay - left) - vc4_pstate->crtc_x = crtc_state->mode.hdisplay - left; + if (vc4_pstate->crtc_x > crtc_state->mode.hdisplay - right) + vc4_pstate->crtc_x = crtc_state->mode.hdisplay - right; adjvdisplay = crtc_state->mode.vdisplay - (top + bottom); vc4_pstate->crtc_y = DIV_ROUND_CLOSEST(vc4_pstate->crtc_y * adjvdisplay, crtc_state->mode.vdisplay); vc4_pstate->crtc_y += top; - if (vc4_pstate->crtc_y > crtc_state->mode.vdisplay - top) - vc4_pstate->crtc_y = crtc_state->mode.vdisplay - top; + if (vc4_pstate->crtc_y > crtc_state->mode.vdisplay - bottom) + vc4_pstate->crtc_y = crtc_state->mode.vdisplay - bottom; vc4_pstate->crtc_w = DIV_ROUND_CLOSEST(vc4_pstate->crtc_w * adjhdisplay, @@ -332,7 +332,6 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); struct drm_framebuffer *fb = state->fb; struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0); - u32 subpixel_src_mask = (1 << 16) - 1; int num_planes = fb->format->num_planes; struct drm_crtc_state *crtc_state; u32 h_subsample = fb->format->hsub; @@ -354,18 +353,15 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) for (i = 0; i < num_planes; i++) vc4_state->offsets[i] = bo->paddr + fb->offsets[i]; - /* We don't support subpixel source positioning for scaling. */ - if ((state->src.x1 & subpixel_src_mask) || - (state->src.x2 & subpixel_src_mask) || - (state->src.y1 & subpixel_src_mask) || - (state->src.y2 & subpixel_src_mask)) { - return -EINVAL; - } - - vc4_state->src_x = state->src.x1 >> 16; - vc4_state->src_y = state->src.y1 >> 16; - vc4_state->src_w[0] = (state->src.x2 - state->src.x1) >> 16; - vc4_state->src_h[0] = (state->src.y2 - state->src.y1) >> 16; + /* + * We don't support subpixel source positioning for scaling, + * but fractional coordinates can be generated by clipping + * so just round for now + */ + vc4_state->src_x = DIV_ROUND_CLOSEST(state->src.x1, 1 << 16); + vc4_state->src_y = DIV_ROUND_CLOSEST(state->src.y1, 1 << 16); + vc4_state->src_w[0] = DIV_ROUND_CLOSEST(state->src.x2, 1 << 16) - vc4_state->src_x; + vc4_state->src_h[0] = DIV_ROUND_CLOSEST(state->src.y2, 1 << 16) - vc4_state->src_y; vc4_state->crtc_x = state->dst.x1; vc4_state->crtc_y = state->dst.y1; diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c index d13502ae973d..f8fa09dfea5d 100644 --- a/drivers/gpu/drm/vc4/vc4_txp.c +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -295,12 +295,18 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn, if (WARN_ON(i == ARRAY_SIZE(drm_fmts))) return; - ctrl = TXP_GO | TXP_VSTART_AT_EOF | TXP_EI | + ctrl = TXP_GO | TXP_EI | VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) | VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT); if (fb->format->has_alpha) ctrl |= TXP_ALPHA_ENABLE; + else + /* + * If TXP_ALPHA_ENABLE isn't set and TXP_ALPHA_INVERT is, the + * hardware will force the output padding to be 0xff. + */ + ctrl |= TXP_ALPHA_INVERT; gem = drm_fb_cma_get_gem_obj(fb, 0); TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]); diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c index bd5b8eb58b18..c6bd168a5898 100644 --- a/drivers/gpu/drm/vc4/vc4_vec.c +++ b/drivers/gpu/drm/vc4/vc4_vec.c @@ -257,7 +257,7 @@ static void vc4_vec_ntsc_j_mode_set(struct vc4_vec *vec) static const struct drm_display_mode ntsc_mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 13500, 720, 720 + 14, 720 + 14 + 64, 720 + 14 + 64 + 60, 0, - 480, 480 + 3, 480 + 3 + 3, 480 + 3 + 3 + 16, 0, + 480, 480 + 7, 480 + 7 + 6, 525, 0, DRM_MODE_FLAG_INTERLACE) }; @@ -279,7 +279,7 @@ static void vc4_vec_pal_m_mode_set(struct vc4_vec *vec) static const struct drm_display_mode pal_mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 13500, 720, 720 + 20, 720 + 20 + 64, 720 + 20 + 64 + 60, 0, - 576, 576 + 2, 576 + 2 + 3, 576 + 2 + 3 + 20, 0, + 576, 576 + 4, 576 + 4 + 6, 625, 0, DRM_MODE_FLAG_INTERLACE) }; diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 3f7d5797dac8..6516f605c771 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -177,6 +177,8 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector) DRM_DEBUG("add mode: %dx%d\n", width, height); mode = drm_cvt_mode(connector->dev, width, height, 60, false, false, false); + if (!mode) + return count; mode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); count++; diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index c8da7adc6b30..33b8ebab178a 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -470,8 +470,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, spin_unlock(&vgdev->display_info_lock); /* not in cache - need to talk to hw */ - virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver, - &cache_ent); + ret = virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver, + &cache_ent); + if (ret) + return ret; virtio_gpu_notify(vgdev); copy_exit: diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index bc15db7e0f14..9028ba669e49 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -259,14 +259,14 @@ static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane, } static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_plane_state *state) { struct virtio_gpu_framebuffer *vgfb; - if (!plane->state->fb) + if (!state->fb) return; - vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + vgfb = to_virtio_gpu_framebuffer(state->fb); if (vgfb->fence) { dma_fence_put(&vgfb->fence->f); vgfb->fence = NULL; diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 5e40fa0f5e8f..e98a29d243c0 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -601,7 +601,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); - if (use_dma_api) + if (virtio_gpu_is_shmem(bo) && use_dma_api) dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, shmem->pages, DMA_TO_DEVICE); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index c59806d40e15..97d9d2557447 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -498,7 +498,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par, static int vmw_fb_kms_framebuffer(struct fb_info *info) { - struct drm_mode_fb_cmd2 mode_cmd; + struct drm_mode_fb_cmd2 mode_cmd = {0}; struct vmw_fb_par *par = info->par; struct fb_var_screeninfo *var = &info->var; struct drm_framebuffer *cur_fb; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 4bf0f5ec4fc2..2b6590344468 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -949,6 +949,10 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv) struct drm_device *dev = dev_priv->dev; int i, ret; + /* Screen objects won't work if GMR's aren't available */ + if (!dev_priv->has_gmr) + return -ENOSYS; + if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) { DRM_INFO("Not using screen objects," " missing cap SCREEN_OBJECT_2\n"); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 8659558b518d..9f674a8d5009 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -198,6 +198,10 @@ static void host1x_setup_sid_table(struct host1x *host) static bool host1x_wants_iommu(struct host1x *host1x) { + /* Our IOMMU usage policy doesn't currently play well with GART */ + if (of_machine_is_compatible("nvidia,tegra20")) + return false; + /* * If we support addressing a maximum of 32 bits of physical memory * and if the host1x firewall is enabled, there's no need to enable diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c index b4a31d506fcc..74eca68891ad 100644 --- a/drivers/gpu/ipu-v3/ipu-di.c +++ b/drivers/gpu/ipu-v3/ipu-di.c @@ -451,8 +451,9 @@ static void ipu_di_config_clock(struct ipu_di *di, error = rate / (sig->mode.pixelclock / 1000); - dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n", - rate, div, (signed)(error - 1000) / 10, error % 10); + dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %c%d.%d%%\n", + rate, div, error < 1000 ? '-' : '+', + abs(error - 1000) / 10, abs(error - 1000) % 10); /* Allow a 1% error */ if (error < 1010 && error >= 990) { diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index 6b665931147d..ef73fef1b3e3 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -830,6 +830,8 @@ static const struct hid_device_id alps_id[] = { USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1) }, + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, + USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY) }, { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_T4_BTNLESS) }, { } diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c index 74ad8bf98bfd..e8c5e3ac9fff 100644 --- a/drivers/hid/hid-bigbenff.c +++ b/drivers/hid/hid-bigbenff.c @@ -347,6 +347,12 @@ static int bigben_probe(struct hid_device *hid, bigben->report = list_entry(report_list->next, struct hid_report, list); + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + error = -ENODEV; + goto error_hw_stop; + } + hidinput = list_first_entry(&hid->inputs, struct hid_input, list); set_bit(FF_RUMBLE, hidinput->input->ffbit); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 5550c943f985..eaaf732f0630 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1310,6 +1310,9 @@ static s32 snto32(__u32 value, unsigned n) if (!value || !n) return 0; + if (n > 32) + n = 32; + switch (n) { case 8: return ((__s8)value); case 16: return ((__s16)value); diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 477baa30889c..172f20e88c6c 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -788,6 +788,11 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr, data->word = le16_to_cpup((__le16 *)buf); break; case I2C_SMBUS_I2C_BLOCK_DATA: + if (read_length > I2C_SMBUS_BLOCK_MAX) { + ret = -EINVAL; + goto power_normal; + } + memcpy(data->block + 1, buf, read_length); break; case I2C_SMBUS_BLOCK_DATA: diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c index 0e8f424025fe..838673303f77 100644 --- a/drivers/hid/hid-elan.c +++ b/drivers/hid/hid-elan.c @@ -188,7 +188,6 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi) ret = input_mt_init_slots(input, ELAN_MAX_FINGERS, INPUT_MT_POINTER); if (ret) { hid_err(hdev, "Failed to init elan MT slots: %d\n", ret); - input_free_device(input); return ret; } @@ -200,7 +199,6 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi) hid_err(hdev, "Failed to register elan input device: %d\n", ret); input_mt_destroy_slots(input); - input_free_device(input); return ret; } diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c index 978ee2aab2d4..b7704dd6809d 100644 --- a/drivers/hid/hid-hyperv.c +++ b/drivers/hid/hid-hyperv.c @@ -498,7 +498,7 @@ static int mousevsc_probe(struct hv_device *device, ret = hid_add_device(hid_dev); if (ret) - goto probe_err1; + goto probe_err2; ret = hid_parse(hid_dev); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 476cf05dc097..2b7f6ea26761 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -257,6 +257,7 @@ #define USB_DEVICE_ID_CH_AXIS_295 0x001c #define USB_VENDOR_ID_CHERRY 0x046a +#define USB_DEVICE_ID_CHERRY_MOUSE_000C 0x000c #define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023 #define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027 @@ -389,7 +390,9 @@ #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401 #define USB_DEVICE_ID_HP_X2 0x074d #define USB_DEVICE_ID_HP_X2_10_COVER 0x0755 +#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817 #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706 +#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A #define USB_VENDOR_ID_ELECOM 0x056e #define USB_DEVICE_ID_ELECOM_BM084 0x0061 @@ -743,6 +746,7 @@ #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5 +#define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe #define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E 0x600e #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019 @@ -824,6 +828,7 @@ #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 #define USB_DEVICE_ID_MADCATZ_RAT5 0x1705 #define USB_DEVICE_ID_MADCATZ_RAT9 0x1709 +#define USB_DEVICE_ID_MADCATZ_MMO7 0x1713 #define USB_VENDOR_ID_MCC 0x09db #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 @@ -870,6 +875,7 @@ #define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd #define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb #define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0 +#define USB_DEVICE_ID_MS_MOUSE_0783 0x0783 #define USB_VENDOR_ID_MOJO 0x8282 #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 @@ -1084,6 +1090,7 @@ #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0 #define USB_DEVICE_ID_SONY_PS5_CONTROLLER 0x0ce6 +#define USB_DEVICE_ID_SONY_PS5_CONTROLLER_2 0x0df2 #define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5 #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f #define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002 @@ -1148,7 +1155,9 @@ #define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968 #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 +#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002 0x73f4 #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5 +#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_017 0x73f6 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7 #define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047 @@ -1302,6 +1311,7 @@ #define USB_VENDOR_ID_PRIMAX 0x0461 #define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22 +#define USB_DEVICE_ID_PRIMAX_MOUSE_4E2A 0x4e2a #define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05 #define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72 #define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F 0x4d0f diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index a17d1dda9570..75a4d8d6bb0f 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -324,6 +324,10 @@ static const struct hid_device_id hid_battery_quirks[] = { HID_BATTERY_QUIRK_IGNORE }, { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN), HID_BATTERY_QUIRK_IGNORE }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15), + HID_BATTERY_QUIRK_IGNORE }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN), + HID_BATTERY_QUIRK_IGNORE }, {} }; diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c index 742c052b0110..b8cce9c196d8 100644 --- a/drivers/hid/hid-ite.c +++ b/drivers/hid/hid-ite.c @@ -18,10 +18,21 @@ static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); if (quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) { + /* For Acer Aspire Switch 10 SW5-012 keyboard-dock */ if (*rsize == 188 && rdesc[162] == 0x81 && rdesc[163] == 0x02) { - hid_info(hdev, "Fixing up ITE keyboard report descriptor\n"); + hid_info(hdev, "Fixing up Acer Sw5-012 ITE keyboard report descriptor\n"); rdesc[163] = HID_MAIN_ITEM_RELATIVE; } + /* For Acer One S1002/S1003 keyboard-dock */ + if (*rsize == 188 && rdesc[185] == 0x81 && rdesc[186] == 0x02) { + hid_info(hdev, "Fixing up Acer S1002/S1003 ITE keyboard report descriptor\n"); + rdesc[186] = HID_MAIN_ITEM_RELATIVE; + } + /* For Acer Aspire Switch 10E (SW3-016) keyboard-dock */ + if (*rsize == 210 && rdesc[184] == 0x81 && rdesc[185] == 0x02) { + hid_info(hdev, "Fixing up Acer Aspire Switch 10E (SW3-016) ITE keyboard report descriptor\n"); + rdesc[185] = HID_MAIN_ITEM_RELATIVE; + } } return rdesc; @@ -103,7 +114,18 @@ static const struct hid_device_id ite_devices[] = { /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_SYNAPTICS, - USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) }, + USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002), + .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT }, + /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */ + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_SYNAPTICS, + USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003), + .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT }, + /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */ + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_SYNAPTICS, + USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_017), + .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT }, { } }; MODULE_DEVICE_TABLE(hid, ite_devices); diff --git a/drivers/hid/hid-led.c b/drivers/hid/hid-led.c index c2c66ceca132..7d82f8d426bb 100644 --- a/drivers/hid/hid-led.c +++ b/drivers/hid/hid-led.c @@ -366,7 +366,7 @@ static const struct hidled_config hidled_configs[] = { .type = DREAM_CHEEKY, .name = "Dream Cheeky Webmail Notifier", .short_name = "dream_cheeky", - .max_brightness = 31, + .max_brightness = 63, .num_leds = 1, .report_size = 9, .report_type = RAW_REQUEST, diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c index 5e6a0cef2a06..e3fcf1353fb3 100644 --- a/drivers/hid/hid-lg4ff.c +++ b/drivers/hid/hid-lg4ff.c @@ -872,6 +872,12 @@ static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_att return -ENOMEM; i = strlen(lbuf); + + if (i == 0) { + kfree(lbuf); + return -EINVAL; + } + if (lbuf[i-1] == '\n') { if (i == 1) { kfree(lbuf); diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index fc4c07459753..28158d2f2352 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -387,7 +387,7 @@ static int magicmouse_raw_event(struct hid_device *hdev, magicmouse_raw_event(hdev, report, data + 2, data[1]); magicmouse_raw_event(hdev, report, data + 2 + data[1], size - 2 - data[1]); - break; + return 0; default: return 0; } diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c index 4211b9839209..de52e9f7bb8c 100644 --- a/drivers/hid/hid-mcp2221.c +++ b/drivers/hid/hid-mcp2221.c @@ -385,6 +385,9 @@ static int mcp_smbus_write(struct mcp2221 *mcp, u16 addr, data_len = 7; break; default: + if (len > I2C_SMBUS_BLOCK_MAX) + return -EINVAL; + memcpy(&mcp->txbuf[5], buf, len); data_len = len + 5; } diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index e5a3704b9fe8..a78ce16d4782 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1155,7 +1155,7 @@ static void mt_touch_report(struct hid_device *hid, int contact_count = -1; /* sticky fingers release in progress, abort */ - if (test_and_set_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags)) + if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags)) return; scantime = *app->scantime; @@ -1236,7 +1236,7 @@ static void mt_touch_report(struct hid_device *hid, del_timer(&td->release_timer); } - clear_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); + clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); } static int mt_touch_input_configured(struct hid_device *hdev, @@ -1671,11 +1671,11 @@ static void mt_expired_timeout(struct timer_list *t) * An input report came in just before we release the sticky fingers, * it will take care of the sticky fingers. */ - if (test_and_set_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags)) + if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags)) return; if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags)) mt_release_contacts(hdev); - clear_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); + clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); } static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) @@ -1990,6 +1990,12 @@ static const struct hid_device_id mt_devices[] = { USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB3) }, + /* Lenovo X12 TAB Gen 1 */ + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_LENOVO, + USB_DEVICE_ID_LENOVO_X12_TAB) }, + /* MosArt panels */ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, MT_USB_DEVICE(USB_VENDOR_ID_ASUS, @@ -2129,6 +2135,9 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_GOOGLE, HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_TOUCH_ROSE) }, + { .driver_data = MT_CLS_GOOGLE, + HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_GOOGLE, + USB_DEVICE_ID_GOOGLE_WHISKERS) }, /* Generic MT device */ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) }, diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c index ab7c82c2e886..0b58763bfd30 100644 --- a/drivers/hid/hid-playstation.c +++ b/drivers/hid/hid-playstation.c @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include #include @@ -38,11 +40,13 @@ struct ps_device { uint8_t battery_capacity; int battery_status; + const char *input_dev_name; /* Name of primary input device. */ uint8_t mac_address[6]; /* Note: stored in little endian order. */ uint32_t hw_version; uint32_t fw_version; int (*parse_report)(struct ps_device *dev, struct hid_report *report, u8 *data, int size); + void (*remove)(struct ps_device *dev); }; /* Calibration data for playstation motion sensors. */ @@ -53,6 +57,13 @@ struct ps_calibration_data { int sens_denom; }; +struct ps_led_info { + const char *name; + const char *color; + enum led_brightness (*brightness_get)(struct led_classdev *cdev); + int (*brightness_set)(struct led_classdev *cdev, enum led_brightness); +}; + /* Seed values for DualShock4 / DualSense CRC32 for different report types. */ #define PS_INPUT_CRC32_SEED 0xA1 #define PS_OUTPUT_CRC32_SEED 0xA2 @@ -97,6 +108,9 @@ struct ps_calibration_data { #define DS_STATUS_CHARGING GENMASK(7, 4) #define DS_STATUS_CHARGING_SHIFT 4 +/* Feature version from DualSense Firmware Info report. */ +#define DS_FEATURE_VERSION(major, minor) ((major & 0xff) << 8 | (minor & 0xff)) + /* * Status of a DualSense touch point contact. * Contact IDs, with highest bit set are 'inactive' @@ -115,6 +129,7 @@ struct ps_calibration_data { #define DS_OUTPUT_VALID_FLAG1_RELEASE_LEDS BIT(3) #define DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE BIT(4) #define DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE BIT(1) +#define DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2 BIT(2) #define DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE BIT(4) #define DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT BIT(1) @@ -132,6 +147,9 @@ struct dualsense { struct input_dev *sensors; struct input_dev *touchpad; + /* Update version is used as a feature/capability version. */ + uint16_t update_version; + /* Calibration data for accelerometer and gyroscope. */ struct ps_calibration_data accel_calib_data[3]; struct ps_calibration_data gyro_calib_data[3]; @@ -142,11 +160,13 @@ struct dualsense { uint32_t sensor_timestamp_us; /* Compatible rumble state */ + bool use_vibration_v2; bool update_rumble; uint8_t motor_left; uint8_t motor_right; /* RGB lightbar */ + struct led_classdev_mc lightbar; bool update_lightbar; uint8_t lightbar_red; uint8_t lightbar_green; @@ -163,6 +183,7 @@ struct dualsense { struct led_classdev player_leds[5]; struct work_struct output_worker; + bool output_worker_initialized; void *output_report_dmabuf; uint8_t output_seq; /* Sequence number for output report. */ }; @@ -288,6 +309,9 @@ static const struct {int x; int y; } ps_gamepad_hat_mapping[] = { {0, 0}, }; +static inline void dualsense_schedule_work(struct dualsense *ds); +static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t green, uint8_t blue); + /* * Add a new ps_device to ps_devices if it doesn't exist. * Return error on duplicate device, which can happen if the same @@ -525,6 +549,71 @@ static int ps_get_report(struct hid_device *hdev, uint8_t report_id, uint8_t *bu return 0; } +static int ps_led_register(struct ps_device *ps_dev, struct led_classdev *led, + const struct ps_led_info *led_info) +{ + int ret; + + led->name = devm_kasprintf(&ps_dev->hdev->dev, GFP_KERNEL, + "%s:%s:%s", ps_dev->input_dev_name, led_info->color, led_info->name); + + if (!led->name) + return -ENOMEM; + + led->brightness = 0; + led->max_brightness = 1; + led->flags = LED_CORE_SUSPENDRESUME; + led->brightness_get = led_info->brightness_get; + led->brightness_set_blocking = led_info->brightness_set; + + ret = devm_led_classdev_register(&ps_dev->hdev->dev, led); + if (ret) { + hid_err(ps_dev->hdev, "Failed to register LED %s: %d\n", led_info->name, ret); + return ret; + } + + return 0; +} + +/* Register a DualSense/DualShock4 RGB lightbar represented by a multicolor LED. */ +static int ps_lightbar_register(struct ps_device *ps_dev, struct led_classdev_mc *lightbar_mc_dev, + int (*brightness_set)(struct led_classdev *, enum led_brightness)) +{ + struct hid_device *hdev = ps_dev->hdev; + struct mc_subled *mc_led_info; + struct led_classdev *led_cdev; + int ret; + + mc_led_info = devm_kmalloc_array(&hdev->dev, 3, sizeof(*mc_led_info), + GFP_KERNEL | __GFP_ZERO); + if (!mc_led_info) + return -ENOMEM; + + mc_led_info[0].color_index = LED_COLOR_ID_RED; + mc_led_info[1].color_index = LED_COLOR_ID_GREEN; + mc_led_info[2].color_index = LED_COLOR_ID_BLUE; + + lightbar_mc_dev->subled_info = mc_led_info; + lightbar_mc_dev->num_colors = 3; + + led_cdev = &lightbar_mc_dev->led_cdev; + led_cdev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s:rgb:indicator", + ps_dev->input_dev_name); + if (!led_cdev->name) + return -ENOMEM; + led_cdev->brightness = 255; + led_cdev->max_brightness = 255; + led_cdev->brightness_set_blocking = brightness_set; + + ret = devm_led_classdev_multicolor_register(&hdev->dev, lightbar_mc_dev); + if (ret < 0) { + hid_err(hdev, "Cannot register multicolor LED device\n"); + return ret; + } + + return 0; +} + static struct input_dev *ps_sensors_create(struct hid_device *hdev, int accel_range, int accel_res, int gyro_range, int gyro_res) { @@ -614,15 +703,12 @@ static ssize_t hardware_version_show(struct device *dev, static DEVICE_ATTR_RO(hardware_version); -static struct attribute *ps_device_attributes[] = { +static struct attribute *ps_device_attrs[] = { &dev_attr_firmware_version.attr, &dev_attr_hardware_version.attr, NULL }; - -static const struct attribute_group ps_device_attribute_group = { - .attrs = ps_device_attributes, -}; +ATTRIBUTE_GROUPS(ps_device); static int dualsense_get_calibration_data(struct dualsense *ds) { @@ -714,6 +800,7 @@ err_free: return ret; } + static int dualsense_get_firmware_info(struct dualsense *ds) { uint8_t *buf; @@ -733,6 +820,15 @@ static int dualsense_get_firmware_info(struct dualsense *ds) ds->base.hw_version = get_unaligned_le32(&buf[24]); ds->base.fw_version = get_unaligned_le32(&buf[28]); + /* Update version is some kind of feature version. It is distinct from + * the firmware version as there can be many different variations of a + * controller over time with the same physical shell, but with different + * PCBs and other internal changes. The update version (internal name) is + * used as a means to detect what features are available and change behavior. + * Note: the version is different between DualSense and DualSense Edge. + */ + ds->update_version = get_unaligned_le16(&buf[44]); + err_free: kfree(buf); return ret; @@ -761,6 +857,53 @@ err_free: return ret; } +static int dualsense_lightbar_set_brightness(struct led_classdev *cdev, + enum led_brightness brightness) +{ + struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev); + struct dualsense *ds = container_of(mc_cdev, struct dualsense, lightbar); + uint8_t red, green, blue; + + led_mc_calc_color_components(mc_cdev, brightness); + red = mc_cdev->subled_info[0].brightness; + green = mc_cdev->subled_info[1].brightness; + blue = mc_cdev->subled_info[2].brightness; + + dualsense_set_lightbar(ds, red, green, blue); + return 0; +} + +static enum led_brightness dualsense_player_led_get_brightness(struct led_classdev *led) +{ + struct hid_device *hdev = to_hid_device(led->dev->parent); + struct dualsense *ds = hid_get_drvdata(hdev); + + return !!(ds->player_leds_state & BIT(led - ds->player_leds)); +} + +static int dualsense_player_led_set_brightness(struct led_classdev *led, enum led_brightness value) +{ + struct hid_device *hdev = to_hid_device(led->dev->parent); + struct dualsense *ds = hid_get_drvdata(hdev); + unsigned long flags; + unsigned int led_index; + + spin_lock_irqsave(&ds->base.lock, flags); + + led_index = led - ds->player_leds; + if (value == LED_OFF) + ds->player_leds_state &= ~BIT(led_index); + else + ds->player_leds_state |= BIT(led_index); + + ds->update_player_leds = true; + spin_unlock_irqrestore(&ds->base.lock, flags); + + dualsense_schedule_work(ds); + + return 0; +} + static void dualsense_init_output_report(struct dualsense *ds, struct dualsense_output_report *rp, void *buf) { @@ -800,6 +943,16 @@ static void dualsense_init_output_report(struct dualsense *ds, struct dualsense_ } } +static inline void dualsense_schedule_work(struct dualsense *ds) +{ + unsigned long flags; + + spin_lock_irqsave(&ds->base.lock, flags); + if (ds->output_worker_initialized) + schedule_work(&ds->output_worker); + spin_unlock_irqrestore(&ds->base.lock, flags); +} + /* * Helper function to send DualSense output reports. Applies a CRC at the end of a report * for Bluetooth reports. @@ -838,7 +991,10 @@ static void dualsense_output_worker(struct work_struct *work) if (ds->update_rumble) { /* Select classic rumble style haptics and enable it. */ common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT; - common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION; + if (ds->use_vibration_v2) + common->valid_flag2 |= DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2; + else + common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION; common->motor_left = ds->motor_left; common->motor_right = ds->motor_right; ds->update_rumble = false; @@ -960,7 +1116,7 @@ static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *r spin_unlock_irqrestore(&ps_dev->lock, flags); /* Schedule updating of microphone state at hardware level. */ - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); } ds->last_btn_mic_state = btn_mic_state; @@ -1075,10 +1231,22 @@ static int dualsense_play_effect(struct input_dev *dev, void *data, struct ff_ef ds->motor_right = effect->u.rumble.weak_magnitude / 256; spin_unlock_irqrestore(&ds->base.lock, flags); - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); return 0; } +static void dualsense_remove(struct ps_device *ps_dev) +{ + struct dualsense *ds = container_of(ps_dev, struct dualsense, base); + unsigned long flags; + + spin_lock_irqsave(&ds->base.lock, flags); + ds->output_worker_initialized = false; + spin_unlock_irqrestore(&ds->base.lock, flags); + + cancel_work_sync(&ds->output_worker); +} + static int dualsense_reset_leds(struct dualsense *ds) { struct dualsense_output_report report; @@ -1106,12 +1274,16 @@ static int dualsense_reset_leds(struct dualsense *ds) static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t green, uint8_t blue) { + unsigned long flags; + + spin_lock_irqsave(&ds->base.lock, flags); ds->update_lightbar = true; ds->lightbar_red = red; ds->lightbar_green = green; ds->lightbar_blue = blue; + spin_unlock_irqrestore(&ds->base.lock, flags); - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); } static void dualsense_set_player_leds(struct dualsense *ds) @@ -1134,7 +1306,7 @@ static void dualsense_set_player_leds(struct dualsense *ds) ds->update_player_leds = true; ds->player_leds_state = player_ids[player_id]; - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); } static struct ps_device *dualsense_create(struct hid_device *hdev) @@ -1142,7 +1314,20 @@ static struct ps_device *dualsense_create(struct hid_device *hdev) struct dualsense *ds; struct ps_device *ps_dev; uint8_t max_output_report_size; - int ret; + int i, ret; + + static const struct ps_led_info player_leds_info[] = { + { LED_FUNCTION_PLAYER1, "white", dualsense_player_led_get_brightness, + dualsense_player_led_set_brightness }, + { LED_FUNCTION_PLAYER2, "white", dualsense_player_led_get_brightness, + dualsense_player_led_set_brightness }, + { LED_FUNCTION_PLAYER3, "white", dualsense_player_led_get_brightness, + dualsense_player_led_set_brightness }, + { LED_FUNCTION_PLAYER4, "white", dualsense_player_led_get_brightness, + dualsense_player_led_set_brightness }, + { LED_FUNCTION_PLAYER5, "white", dualsense_player_led_get_brightness, + dualsense_player_led_set_brightness } + }; ds = devm_kzalloc(&hdev->dev, sizeof(*ds), GFP_KERNEL); if (!ds) @@ -1160,7 +1345,9 @@ static struct ps_device *dualsense_create(struct hid_device *hdev) ps_dev->battery_capacity = 100; /* initial value until parse_report. */ ps_dev->battery_status = POWER_SUPPLY_STATUS_UNKNOWN; ps_dev->parse_report = dualsense_parse_report; + ps_dev->remove = dualsense_remove; INIT_WORK(&ds->output_worker, dualsense_output_worker); + ds->output_worker_initialized = true; hid_set_drvdata(hdev, ds); max_output_report_size = sizeof(struct dualsense_output_report_bt); @@ -1181,6 +1368,21 @@ static struct ps_device *dualsense_create(struct hid_device *hdev) return ERR_PTR(ret); } + /* Original DualSense firmware simulated classic controller rumble through + * its new haptics hardware. It felt different from classic rumble users + * were used to. Since then new firmwares were introduced to change behavior + * and make this new 'v2' behavior default on PlayStation and other platforms. + * The original DualSense requires a new enough firmware as bundled with PS5 + * software released in 2021. DualSense edge supports it out of the box. + * Both devices also support the old mode, but it is not really used. + */ + if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER) { + /* Feature version 2.21 introduced new vibration method. */ + ds->use_vibration_v2 = ds->update_version >= DS_FEATURE_VERSION(2, 21); + } else if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) { + ds->use_vibration_v2 = true; + } + ret = ps_devices_list_add(ps_dev); if (ret) return ERR_PTR(ret); @@ -1196,6 +1398,8 @@ static struct ps_device *dualsense_create(struct hid_device *hdev) ret = PTR_ERR(ds->gamepad); goto err; } + /* Use gamepad input device name as primary device name for e.g. LEDs */ + ps_dev->input_dev_name = dev_name(&ds->gamepad->dev); ds->sensors = ps_sensors_create(hdev, DS_ACC_RANGE, DS_ACC_RES_PER_G, DS_GYRO_RANGE, DS_GYRO_RES_PER_DEG_S); @@ -1223,8 +1427,21 @@ static struct ps_device *dualsense_create(struct hid_device *hdev) if (ret) goto err; + ret = ps_lightbar_register(ps_dev, &ds->lightbar, dualsense_lightbar_set_brightness); + if (ret) + goto err; + + /* Set default lightbar color. */ dualsense_set_lightbar(ds, 0, 0, 128); /* blue */ + for (i = 0; i < ARRAY_SIZE(player_leds_info); i++) { + const struct ps_led_info *led_info = &player_leds_info[i]; + + ret = ps_led_register(ps_dev, &ds->player_leds[i], led_info); + if (ret < 0) + goto err; + } + ret = ps_device_set_player_id(ps_dev); if (ret) { hid_err(hdev, "Failed to assign player id for DualSense: %d\n", ret); @@ -1282,7 +1499,8 @@ static int ps_probe(struct hid_device *hdev, const struct hid_device_id *id) goto err_stop; } - if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER) { + if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER || + hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) { dev = dualsense_create(hdev); if (IS_ERR(dev)) { hid_err(hdev, "Failed to create dualsense.\n"); @@ -1291,12 +1509,6 @@ static int ps_probe(struct hid_device *hdev, const struct hid_device_id *id) } } - ret = devm_device_add_group(&hdev->dev, &ps_device_attribute_group); - if (ret) { - hid_err(hdev, "Failed to register sysfs nodes.\n"); - goto err_close; - } - return ret; err_close: @@ -1313,6 +1525,9 @@ static void ps_remove(struct hid_device *hdev) ps_devices_list_remove(dev); ps_device_release_player_id(dev); + if (dev->remove) + dev->remove(dev); + hid_hw_close(hdev); hid_hw_stop(hdev); } @@ -1320,6 +1535,8 @@ static void ps_remove(struct hid_device *hdev) static const struct hid_device_id ps_devices[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) }, { } }; MODULE_DEVICE_TABLE(hid, ps_devices); @@ -1330,6 +1547,9 @@ static struct hid_driver ps_driver = { .probe = ps_probe, .remove = ps_remove, .raw_event = ps_raw_event, + .driver = { + .dev_groups = ps_device_groups, + }, }; static int __init ps_init(void) diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 2ab71d717bb0..1efde40e5136 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -54,6 +54,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET }, + { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_MOUSE_000C), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS }, @@ -122,6 +123,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_MOUSE_0783), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS }, @@ -146,6 +148,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4E2A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D65), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22), HID_QUIRK_ALWAYS_POLL }, @@ -609,6 +612,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7) }, #endif #if IS_ENABLED(CONFIG_HID_SAMSUNG) { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c index 26373b82fe81..6da80e442fdd 100644 --- a/drivers/hid/hid-roccat.c +++ b/drivers/hid/hid-roccat.c @@ -257,6 +257,8 @@ int roccat_report_event(int minor, u8 const *data) if (!new_value) return -ENOMEM; + mutex_lock(&device->cbuf_lock); + report = &device->cbuf[device->cbuf_end]; /* passing NULL is safe */ @@ -276,6 +278,8 @@ int roccat_report_event(int minor, u8 const *data) reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE; } + mutex_unlock(&device->cbuf_lock); + wake_up_interruptible(&device->wait); return 0; } diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c index c7bf14c01960..b84e975977c4 100644 --- a/drivers/hid/hid-saitek.c +++ b/drivers/hid/hid-saitek.c @@ -187,6 +187,8 @@ static const struct hid_device_id saitek_devices[] = { .driver_data = SAITEK_RELEASE_MODE_RAT7 }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7), .driver_data = SAITEK_RELEASE_MODE_MMO7 }, + { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7), + .driver_data = SAITEK_RELEASE_MODE_MMO7 }, { } }; diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index a3b151b29bd7..fc616db4231b 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -134,6 +134,11 @@ static int steam_recv_report(struct steam_device *steam, int ret; r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0]; + if (!r) { + hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n"); + return -EINVAL; + } + if (hid_report_len(r) < 64) return -EINVAL; @@ -165,6 +170,11 @@ static int steam_send_report(struct steam_device *steam, int ret; r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0]; + if (!r) { + hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n"); + return -EINVAL; + } + if (hid_report_len(r) < 64) return -EINVAL; diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c index 4edb24195704..e4811d37ca77 100644 --- a/drivers/hid/hid-uclogic-core.c +++ b/drivers/hid/hid-uclogic-core.c @@ -172,6 +172,7 @@ static int uclogic_probe(struct hid_device *hdev, * than the pen, so use QUIRK_MULTI_INPUT for all tablets. */ hdev->quirks |= HID_QUIRK_MULTI_INPUT; + hdev->quirks |= HID_QUIRK_HIDINPUT_FORCE; /* Allocate and assign driver data */ drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL); diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 2eee5e31c2b7..fade7fcf6a14 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -346,10 +346,13 @@ static int hidraw_release(struct inode * inode, struct file * file) unsigned int minor = iminor(inode); struct hidraw_list *list = file->private_data; unsigned long flags; + int i; mutex_lock(&minors_lock); spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags); + for (i = list->tail; i < list->head; i++) + kfree(list->buffer[i].value); list_del(&list->node); spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags); kfree(list); diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h index 5ffd0da3cf1f..65af0ebef79f 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid.h +++ b/drivers/hid/intel-ish-hid/ishtp-hid.h @@ -110,7 +110,7 @@ struct report_list { * @multi_packet_cnt: Count of fragmented packet count * * This structure is used to store completion flags and per client data like - * like report description, number of HID devices etc. + * report description, number of HID devices etc. */ struct ishtp_cl_data { /* completion flags */ diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c index 1cc157126fce..c0d69303e3b0 100644 --- a/drivers/hid/intel-ish-hid/ishtp/client.c +++ b/drivers/hid/intel-ish-hid/ishtp/client.c @@ -626,13 +626,14 @@ static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb) } /** - * ipc_tx_callback() - IPC tx callback function + * ipc_tx_send() - IPC tx send function * @prm: Pointer to client device instance * - * Send message over IPC either first time or on callback on previous message - * completion + * Send message over IPC. Message will be split into fragments + * if message size is bigger than IPC FIFO size, and all + * fragments will be sent one by one. */ -static void ipc_tx_callback(void *prm) +static void ipc_tx_send(void *prm) { struct ishtp_cl *cl = prm; struct ishtp_cl_tx_ring *cl_msg; @@ -677,32 +678,41 @@ static void ipc_tx_callback(void *prm) list); rem = cl_msg->send_buf.size - cl->tx_offs; - ishtp_hdr.host_addr = cl->host_client_id; - ishtp_hdr.fw_addr = cl->fw_client_id; - ishtp_hdr.reserved = 0; - pmsg = cl_msg->send_buf.data + cl->tx_offs; + while (rem > 0) { + ishtp_hdr.host_addr = cl->host_client_id; + ishtp_hdr.fw_addr = cl->fw_client_id; + ishtp_hdr.reserved = 0; + pmsg = cl_msg->send_buf.data + cl->tx_offs; - if (rem <= dev->mtu) { - ishtp_hdr.length = rem; - ishtp_hdr.msg_complete = 1; - cl->sending = 0; - list_del_init(&cl_msg->list); /* Must be before write */ - spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); - /* Submit to IPC queue with no callback */ - ishtp_write_message(dev, &ishtp_hdr, pmsg); - spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); - list_add_tail(&cl_msg->list, &cl->tx_free_list.list); - ++cl->tx_ring_free_size; - spin_unlock_irqrestore(&cl->tx_free_list_spinlock, - tx_free_flags); - } else { - /* Send IPC fragment */ - spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); - cl->tx_offs += dev->mtu; - ishtp_hdr.length = dev->mtu; - ishtp_hdr.msg_complete = 0; - ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl); + if (rem <= dev->mtu) { + /* Last fragment or only one packet */ + ishtp_hdr.length = rem; + ishtp_hdr.msg_complete = 1; + /* Submit to IPC queue with no callback */ + ishtp_write_message(dev, &ishtp_hdr, pmsg); + cl->tx_offs = 0; + cl->sending = 0; + + break; + } else { + /* Send ipc fragment */ + ishtp_hdr.length = dev->mtu; + ishtp_hdr.msg_complete = 0; + /* All fregments submitted to IPC queue with no callback */ + ishtp_write_message(dev, &ishtp_hdr, pmsg); + cl->tx_offs += dev->mtu; + rem = cl_msg->send_buf.size - cl->tx_offs; + } } + + list_del_init(&cl_msg->list); + spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); + + spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); + list_add_tail(&cl_msg->list, &cl->tx_free_list.list); + ++cl->tx_ring_free_size; + spin_unlock_irqrestore(&cl->tx_free_list_spinlock, + tx_free_flags); } /** @@ -720,7 +730,7 @@ static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev, return; cl->tx_offs = 0; - ipc_tx_callback(cl); + ipc_tx_send(cl); ++cl->send_msg_cnt_ipc; } diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 329bb1a46f90..4dbf69078387 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -2124,7 +2124,7 @@ static int wacom_register_inputs(struct wacom *wacom) error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac); if (error) { - /* no pad in use on this interface */ + /* no pad events using this interface */ input_free_device(pad_input_dev); wacom_wac->pad_input = NULL; pad_input_dev = NULL; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index d90bfa8b7313..d8d127fcc82a 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -638,9 +638,26 @@ static int wacom_intuos_id_mangle(int tool_id) return (tool_id & ~0xFFF) << 4 | (tool_id & 0xFFF); } +static bool wacom_is_art_pen(int tool_id) +{ + bool is_art_pen = false; + + switch (tool_id) { + case 0x885: /* Intuos3 Marker Pen */ + case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */ + case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */ + is_art_pen = true; + break; + } + return is_art_pen; +} + static int wacom_intuos_get_tool_type(int tool_id) { - int tool_type; + int tool_type = BTN_TOOL_PEN; + + if (wacom_is_art_pen(tool_id)) + return tool_type; switch (tool_id) { case 0x812: /* Inking pen */ @@ -655,12 +672,9 @@ static int wacom_intuos_get_tool_type(int tool_id) case 0x852: case 0x823: /* Intuos3 Grip Pen */ case 0x813: /* Intuos3 Classic Pen */ - case 0x885: /* Intuos3 Marker Pen */ case 0x802: /* Intuos4/5 13HD/24HD General Pen */ - case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */ case 0x8e2: /* IntuosHT2 pen */ case 0x022: - case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */ case 0x10842: /* MobileStudio Pro Pro Pen slim */ case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */ case 0x16802: /* Cintiq 13HD Pro Pen */ @@ -718,10 +732,6 @@ static int wacom_intuos_get_tool_type(int tool_id) case 0x10902: /* Intuos4/5 13HD/24HD Airbrush */ tool_type = BTN_TOOL_AIRBRUSH; break; - - default: /* Unknown tool */ - tool_type = BTN_TOOL_PEN; - break; } return tool_type; } @@ -2006,7 +2016,6 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev, wacom_wac->has_mute_touch_switch = true; usage->type = EV_SW; usage->code = SW_MUTE_DEVICE; - features->device_type |= WACOM_DEVICETYPE_PAD; break; case WACOM_HID_WD_TOUCHSTRIP: wacom_map_usage(input, usage, field, EV_ABS, ABS_RX, 0); @@ -2086,6 +2095,30 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field wacom_wac->hid_data.inrange_state |= value; } + /* Process touch switch state first since it is reported through touch interface, + * which is indepentent of pad interface. In the case when there are no other pad + * events, the pad interface will not even be created. + */ + if ((equivalent_usage == WACOM_HID_WD_MUTE_DEVICE) || + (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)) { + if (wacom_wac->shared->touch_input) { + bool *is_touch_on = &wacom_wac->shared->is_touch_on; + + if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value) + *is_touch_on = !(*is_touch_on); + else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF) + *is_touch_on = value; + + input_report_switch(wacom_wac->shared->touch_input, + SW_MUTE_DEVICE, !(*is_touch_on)); + input_sync(wacom_wac->shared->touch_input); + } + return; + } + + if (!input) + return; + switch (equivalent_usage) { case WACOM_HID_WD_TOUCHRING: /* @@ -2121,22 +2154,6 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field input_event(input, usage->type, usage->code, 0); break; - case WACOM_HID_WD_MUTE_DEVICE: - case WACOM_HID_WD_TOUCHONOFF: - if (wacom_wac->shared->touch_input) { - bool *is_touch_on = &wacom_wac->shared->is_touch_on; - - if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value) - *is_touch_on = !(*is_touch_on); - else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF) - *is_touch_on = value; - - input_report_switch(wacom_wac->shared->touch_input, - SW_MUTE_DEVICE, !(*is_touch_on)); - input_sync(wacom_wac->shared->touch_input); - } - break; - case WACOM_HID_WD_MODE_CHANGE: if (wacom_wac->is_direct_mode != value) { wacom_wac->is_direct_mode = value; @@ -2312,6 +2329,9 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field } return; case HID_DG_TWIST: + /* don't modify the value if the pen doesn't support the feature */ + if (!wacom_is_art_pen(wacom_wac->id[0])) return; + /* * Userspace expects pen twist to have its zero point when * the buttons/finger is on the tablet's left. HID values @@ -2763,7 +2783,7 @@ void wacom_wac_event(struct hid_device *hdev, struct hid_field *field, /* usage tests must precede field tests */ if (WACOM_BATTERY_USAGE(usage)) wacom_wac_battery_event(hdev, field, usage, value); - else if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input) + else if (WACOM_PAD_FIELD(field)) wacom_wac_pad_event(hdev, field, usage, value); else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input) wacom_wac_pen_event(hdev, field, usage, value); diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c index 44a3f5660c10..eb9820158318 100644 --- a/drivers/hsi/controllers/omap_ssi_core.c +++ b/drivers/hsi/controllers/omap_ssi_core.c @@ -524,6 +524,7 @@ static int ssi_probe(struct platform_device *pd) if (!childpdev) { err = -ENODEV; dev_err(&pd->dev, "failed to create ssi controller port\n"); + of_node_put(child); goto out3; } } diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c index a0cb5be246e1..b9495b720f1b 100644 --- a/drivers/hsi/controllers/omap_ssi_port.c +++ b/drivers/hsi/controllers/omap_ssi_port.c @@ -230,10 +230,10 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch) if (msg->ttype == HSI_MSG_READ) { err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, DMA_FROM_DEVICE); - if (err < 0) { + if (!err) { dev_dbg(&ssi->device, "DMA map SG failed !\n"); pm_runtime_put_autosuspend(omap_port->pdev); - return err; + return -EIO; } csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT | SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT | @@ -247,10 +247,10 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch) } else { err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, DMA_TO_DEVICE); - if (err < 0) { + if (!err) { dev_dbg(&ssi->device, "DMA map SG failed !\n"); pm_runtime_put_autosuspend(omap_port->pdev); - return err; + return -EIO; } csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT | SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT | diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig index 210e532ac277..79e5356a737a 100644 --- a/drivers/hv/Kconfig +++ b/drivers/hv/Kconfig @@ -17,7 +17,6 @@ config HYPERV_TIMER config HYPERV_UTILS tristate "Microsoft Hyper-V Utilities driver" depends on HYPERV && CONNECTOR && NLS - depends on PTP_1588_CLOCK_OPTIONAL help Select this option to enable the Hyper-V Utilities. diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 6476bfe193af..5b902adb0d1b 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -350,7 +350,7 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel) * execute: * * (a) In the "normal (i.e., not resuming from hibernation)" path, - * the full barrier in smp_store_mb() guarantees that the store + * the full barrier in virt_store_mb() guarantees that the store * is propagated to all CPUs before the add_channel_work work * is queued. In turn, add_channel_work is queued before the * channel's ring buffer is allocated/initialized and the @@ -362,14 +362,14 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel) * recv_int_page before retrieving the channel pointer from the * array of channels. * - * (b) In the "resuming from hibernation" path, the smp_store_mb() + * (b) In the "resuming from hibernation" path, the virt_store_mb() * guarantees that the store is propagated to all CPUs before * the VMBus connection is marked as ready for the resume event * (cf. check_ready_for_resume_event()). The interrupt handler * of the VMBus driver and vmbus_chan_sched() can not run before * vmbus_bus_resume() has completed execution (cf. resume_noirq). */ - smp_store_mb( + virt_store_mb( vmbus_connection.channels[channel->offermsg.child_relid], channel); } @@ -501,13 +501,17 @@ static void vmbus_add_channel_work(struct work_struct *work) * Add the new device to the bus. This will kick off device-driver * binding which eventually invokes the device driver's AddDevice() * method. + * + * If vmbus_device_register() fails, the 'device_obj' is freed in + * vmbus_device_release() as called by device_unregister() in the + * error path of vmbus_device_register(). In the outside error + * path, there's no need to free it. */ ret = vmbus_device_register(newchannel->device_obj); if (ret != 0) { pr_err("unable to add child device object (relid %d)\n", newchannel->offermsg.child_relid); - kfree(newchannel->device_obj); goto err_deq_chan; } @@ -606,6 +610,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) */ if (newchannel->offermsg.offer.sub_channel_index == 0) { mutex_unlock(&vmbus_connection.channel_mutex); + cpus_read_unlock(); /* * Don't call free_channel(), because newchannel->kobj * is not initialized yet. diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 356e22159e83..769851b6e74c 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -378,7 +378,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel, static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi) { u32 priv_read_loc = rbi->priv_read_index; - u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index); + u32 write_loc; + + /* + * The Hyper-V host writes the packet data, then uses + * store_release() to update the write_index. Use load_acquire() + * here to prevent loads of the packet data from being re-ordered + * before the read of the write_index and potentially getting + * stale data. + */ + write_loc = virt_load_acquire(&rbi->ring_buffer->write_index); if (write_loc >= priv_read_loc) return write_loc - priv_read_loc; diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 362da2a83b47..e99400f3ae1d 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1351,7 +1351,7 @@ static void vmbus_isr(void) tasklet_schedule(&hv_cpu->msg_dpc); } - add_interrupt_randomness(hv_get_vector(), 0); + add_interrupt_randomness(hv_get_vector()); } /* @@ -2020,6 +2020,7 @@ int vmbus_device_register(struct hv_device *child_device_obj) ret = device_register(&child_device_obj->device); if (ret) { pr_err("Unable to register child device\n"); + put_device(&child_device_obj->device); return ret; } @@ -2251,7 +2252,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, bool fb_overlap_ok) { struct resource *iter, *shadow; - resource_size_t range_min, range_max, start; + resource_size_t range_min, range_max, start, end; const char *dev_n = dev_name(&device_obj->device); int retval; @@ -2286,6 +2287,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, range_max = iter->end; start = (range_min + align - 1) & ~(align - 1); for (; start + size - 1 <= range_max; start += align) { + end = start + size - 1; + + /* Skip the whole fb_mmio region if not fb_overlap_ok */ + if (!fb_overlap_ok && fb_mmio && + (((start >= fb_mmio->start) && (start <= fb_mmio->end)) || + ((end >= fb_mmio->start) && (end <= fb_mmio->end)))) + continue; + shadow = __request_region(iter, start, size, NULL, IORESOURCE_BUSY); if (!shadow) @@ -2673,10 +2682,15 @@ static void __exit vmbus_exit(void) if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { kmsg_dump_unregister(&hv_kmsg_dumper); unregister_die_notifier(&hyperv_die_block); - atomic_notifier_chain_unregister(&panic_notifier_list, - &hyperv_panic_block); } + /* + * The panic notifier is always registered, hence we should + * also unconditionally unregister it here as well. + */ + atomic_notifier_chain_unregister(&panic_notifier_list, + &hyperv_panic_block); + free_page((unsigned long)hv_panic_page); unregister_sysctl_table(hv_ctl_table_hdr); hv_ctl_table_hdr = NULL; diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 0c2b032ee617..f741c7492ee4 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -922,7 +922,7 @@ config SENSORS_LTC4261 config SENSORS_LTQ_CPUTEMP bool "Lantiq cpu temperature sensor driver" - depends on LANTIQ + depends on SOC_XWAY help If you say yes here you get support for the temperature sensor inside your CPU. diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index 740f39a54ab0..71e357956ce4 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c @@ -20,6 +20,7 @@ #include #include #include +#include /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2C, 0x2E, 0x2F, I2C_CLIENT_END }; @@ -260,11 +261,10 @@ static int adt7470_update_thread(void *p) adt7470_read_temperatures(client, data); mutex_unlock(&data->lock); - set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) break; - schedule_timeout(msecs_to_jiffies(data->auto_update_interval)); + schedule_timeout_interruptible(msecs_to_jiffies(data->auto_update_interval)); } return 0; diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index bb9211215a68..42b84ebff057 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -46,9 +46,6 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) -#define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id) -#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO) - #ifdef CONFIG_SMP #define for_each_sibling(i, cpu) \ for_each_cpu(i, topology_sibling_cpumask(cpu)) @@ -91,6 +88,8 @@ struct temp_data { struct platform_data { struct device *hwmon_dev; u16 pkg_id; + u16 cpu_map[NUM_REAL_CORES]; + struct ida ida; struct cpumask cpumask; struct temp_data *core_data[MAX_CORE_DATA]; struct device_attribute name_attr; @@ -243,10 +242,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) */ if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) { for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) { - if (host_bridge->device == tjmax_pci_table[i].device) + if (host_bridge->device == tjmax_pci_table[i].device) { + pci_dev_put(host_bridge); return tjmax_pci_table[i].tjmax; + } } } + pci_dev_put(host_bridge); for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) { if (strstr(c->x86_model_id, tjmax_table[i].id)) @@ -441,7 +443,7 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag) MSR_IA32_THERM_STATUS; tdata->is_pkg_data = pkg_flag; tdata->cpu = cpu; - tdata->cpu_core_id = TO_CORE_ID(cpu); + tdata->cpu_core_id = topology_core_id(cpu); tdata->attr_size = MAX_CORE_ATTRS; mutex_init(&tdata->update_lock); return tdata; @@ -454,7 +456,7 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, struct platform_data *pdata = platform_get_drvdata(pdev); struct cpuinfo_x86 *c = &cpu_data(cpu); u32 eax, edx; - int err, attr_no; + int err, index, attr_no; /* * Find attr number for sysfs: @@ -462,14 +464,26 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, * The attr number is always core id + 2 * The Pkgtemp will always show up as temp1_*, if available */ - attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu); + if (pkg_flag) { + attr_no = PKG_SYSFS_ATTR_NO; + } else { + index = ida_alloc(&pdata->ida, GFP_KERNEL); + if (index < 0) + return index; + pdata->cpu_map[index] = topology_core_id(cpu); + attr_no = index + BASE_SYSFS_ATTR_NO; + } - if (attr_no > MAX_CORE_DATA - 1) - return -ERANGE; + if (attr_no > MAX_CORE_DATA - 1) { + err = -ERANGE; + goto ida_free; + } tdata = init_temp_data(cpu, pkg_flag); - if (!tdata) - return -ENOMEM; + if (!tdata) { + err = -ENOMEM; + goto ida_free; + } /* Test if we can access the status register */ err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx); @@ -505,6 +519,9 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, exit_free: pdata->core_data[attr_no] = NULL; kfree(tdata); +ida_free: + if (!pkg_flag) + ida_free(&pdata->ida, index); return err; } @@ -519,11 +536,18 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx) { struct temp_data *tdata = pdata->core_data[indx]; + /* if we errored on add then this is already gone */ + if (!tdata) + return; + /* Remove the sysfs attributes */ sysfs_remove_group(&pdata->hwmon_dev->kobj, &tdata->attr_group); kfree(pdata->core_data[indx]); pdata->core_data[indx] = NULL; + + if (indx >= BASE_SYSFS_ATTR_NO) + ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO); } static int coretemp_probe(struct platform_device *pdev) @@ -537,6 +561,7 @@ static int coretemp_probe(struct platform_device *pdev) return -ENOMEM; pdata->pkg_id = pdev->id; + ida_init(&pdata->ida); platform_set_drvdata(pdev, pdata); pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME, @@ -553,6 +578,7 @@ static int coretemp_remove(struct platform_device *pdev) if (pdata->core_data[i]) coretemp_remove_core(pdata, i); + ida_destroy(&pdata->ida); return 0; } @@ -647,7 +673,7 @@ static int coretemp_cpu_offline(unsigned int cpu) struct platform_device *pdev = coretemp_get_pdev(cpu); struct platform_data *pd; struct temp_data *tdata; - int indx, target; + int i, indx = -1, target; /* * Don't execute this on suspend as the device remove locks @@ -660,12 +686,19 @@ static int coretemp_cpu_offline(unsigned int cpu) if (!pdev) return 0; - /* The core id is too big, just return */ - indx = TO_ATTR_NO(cpu); - if (indx > MAX_CORE_DATA - 1) + pd = platform_get_drvdata(pdev); + + for (i = 0; i < NUM_REAL_CORES; i++) { + if (pd->cpu_map[i] == topology_core_id(cpu)) { + indx = i + BASE_SYSFS_ATTR_NO; + break; + } + } + + /* Too many cores and this core is not populated, just return */ + if (indx < 0) return 0; - pd = platform_get_drvdata(pdev); tdata = pd->core_data[indx]; cpumask_clear_cpu(cpu, &pd->cpumask); diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c index 72c760373957..00303af82a77 100644 --- a/drivers/hwmon/drivetemp.c +++ b/drivers/hwmon/drivetemp.c @@ -621,3 +621,4 @@ module_exit(drivetemp_exit); MODULE_AUTHOR("Guenter Roeck "); MODULE_DESCRIPTION("Hard drive temperature monitor"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:drivetemp"); diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index 4dec793fd07d..94b35723ee7a 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c @@ -1577,8 +1577,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, temp *= 125; if (sign) temp -= 128000; - } else - temp = data->temp[nr] * 1000; + } else { + temp = ((s8)data->temp[nr]) * 1000; + } return sprintf(buf, "%d\n", temp); } diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index 3ea4021f267c..d96e435cc42b 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c @@ -391,6 +391,9 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev, if (!fan_data) return -EINVAL; + if (state >= fan_data->num_speed) + return -EINVAL; + set_fan_speed(fan_data, state); return 0; } diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c index 1fe37418ff46..f29ce49294da 100644 --- a/drivers/hwmon/gsc-hwmon.c +++ b/drivers/hwmon/gsc-hwmon.c @@ -267,6 +267,7 @@ gsc_hwmon_get_devtree_pdata(struct device *dev) pdata->nchannels = nchannels; /* fan controller base address */ + of_node_get(dev->parent->of_node); fan = of_find_compatible_node(dev->parent->of_node, NULL, "gw,gsc-fan"); if (fan && of_property_read_u32(fan, "reg", &pdata->fan_base)) { dev_err(dev, "fan node without base\n"); diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c index 360f5aee1394..d4be03f43fb4 100644 --- a/drivers/hwmon/i5500_temp.c +++ b/drivers/hwmon/i5500_temp.c @@ -108,7 +108,7 @@ static int i5500_temp_probe(struct pci_dev *pdev, u32 tstimer; s8 tsfsc; - err = pci_enable_device(pdev); + err = pcim_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Failed to enable device\n"); return err; diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c index a4ec85207782..2e6d6a5cffa1 100644 --- a/drivers/hwmon/ibmaem.c +++ b/drivers/hwmon/ibmaem.c @@ -550,7 +550,7 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle) res = platform_device_add(data->pdev); if (res) - goto ipmi_err; + goto dev_add_err; platform_set_drvdata(data->pdev, data); @@ -598,7 +598,9 @@ hwmon_reg_err: ipmi_destroy_user(data->ipmi.user); ipmi_err: platform_set_drvdata(data->pdev, NULL); - platform_device_unregister(data->pdev); + platform_device_del(data->pdev); +dev_add_err: + platform_device_put(data->pdev); dev_err: ida_simple_remove(&aem_ida, data->id); id_err: @@ -690,7 +692,7 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe, res = platform_device_add(data->pdev); if (res) - goto ipmi_err; + goto dev_add_err; platform_set_drvdata(data->pdev, data); @@ -738,7 +740,9 @@ hwmon_reg_err: ipmi_destroy_user(data->ipmi.user); ipmi_err: platform_set_drvdata(data->pdev, NULL); - platform_device_unregister(data->pdev); + platform_device_del(data->pdev); +dev_add_err: + platform_device_put(data->pdev); dev_err: ida_simple_remove(&aem_ida, data->id); id_err: diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c index b2ab83c9fd9a..fe90f0536d76 100644 --- a/drivers/hwmon/ibmpex.c +++ b/drivers/hwmon/ibmpex.c @@ -502,6 +502,7 @@ static void ibmpex_register_bmc(int iface, struct device *dev) return; out_register: + list_del(&data->list); hwmon_device_unregister(data->hwmon_dev); out_user: ipmi_destroy_user(data->user); diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c index ad11cbddc3a7..d3c98115042b 100644 --- a/drivers/hwmon/ina3221.c +++ b/drivers/hwmon/ina3221.c @@ -230,7 +230,7 @@ static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg, * Shunt Voltage Sum register has 14-bit value with 1-bit shift * Other Shunt Voltage registers have 12 bits with 3-bit shift */ - if (reg == INA3221_SHUNT_SUM) + if (reg == INA3221_SHUNT_SUM || reg == INA3221_CRIT_SUM) *val = sign_extend32(regval >> 1, 14); else *val = sign_extend32(regval >> 3, 12); @@ -465,7 +465,7 @@ static int ina3221_write_curr(struct device *dev, u32 attr, * SHUNT_SUM: (1 / 40uV) << 1 = 1 / 20uV * SHUNT[1-3]: (1 / 40uV) << 3 = 1 / 5uV */ - if (reg == INA3221_SHUNT_SUM) + if (reg == INA3221_SHUNT_SUM || reg == INA3221_CRIT_SUM) regval = DIV_ROUND_CLOSEST(voltage_uv, 20) & 0xfffe; else regval = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8; diff --git a/drivers/hwmon/ltc2947-core.c b/drivers/hwmon/ltc2947-core.c index 5423466de697..e918490f3ff7 100644 --- a/drivers/hwmon/ltc2947-core.c +++ b/drivers/hwmon/ltc2947-core.c @@ -396,7 +396,7 @@ static int ltc2947_read_temp(struct device *dev, const u32 attr, long *val, return ret; /* in milidegrees celcius, temp is given by: */ - *val = (__val * 204) + 550; + *val = (__val * 204) + 5500; return 0; } diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c index 046523d47c29..41e3d3b54baf 100644 --- a/drivers/hwmon/mr75203.c +++ b/drivers/hwmon/mr75203.c @@ -68,8 +68,9 @@ /* VM Individual Macro Register */ #define VM_COM_REG_SIZE 0x200 -#define VM_SDIF_DONE(n) (VM_COM_REG_SIZE + 0x34 + 0x200 * (n)) -#define VM_SDIF_DATA(n) (VM_COM_REG_SIZE + 0x40 + 0x200 * (n)) +#define VM_SDIF_DONE(vm) (VM_COM_REG_SIZE + 0x34 + 0x200 * (vm)) +#define VM_SDIF_DATA(vm, ch) \ + (VM_COM_REG_SIZE + 0x40 + 0x200 * (vm) + 0x4 * (ch)) /* SDA Slave Register */ #define IP_CTRL 0x00 @@ -115,6 +116,7 @@ struct pvt_device { u32 t_num; u32 p_num; u32 v_num; + u32 c_num; u32 ip_freq; u8 *vm_idx; }; @@ -178,14 +180,15 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val) { struct pvt_device *pvt = dev_get_drvdata(dev); struct regmap *v_map = pvt->v_map; + u8 vm_idx, ch_idx; u32 n, stat; - u8 vm_idx; int ret; - if (channel >= pvt->v_num) + if (channel >= pvt->v_num * pvt->c_num) return -EINVAL; - vm_idx = pvt->vm_idx[channel]; + vm_idx = pvt->vm_idx[channel / pvt->c_num]; + ch_idx = channel % pvt->c_num; switch (attr) { case hwmon_in_input: @@ -196,13 +199,23 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val) if (ret) return ret; - ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx), &n); + ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx, ch_idx), &n); if(ret < 0) return ret; n &= SAMPLE_DATA_MSK; - /* Convert the N bitstream count into voltage */ - *val = (PVT_N_CONST * n - PVT_R_CONST) >> PVT_CONV_BITS; + /* + * Convert the N bitstream count into voltage. + * To support negative voltage calculation for 64bit machines + * n must be cast to long, since n and *val differ both in + * signedness and in size. + * Division is used instead of right shift, because for signed + * numbers, the sign bit is used to fill the vacated bit + * positions, and if the number is negative, 1 is used. + * BIT(x) may not be used instead of (1 << x) because it's + * unsigned. + */ + *val = (PVT_N_CONST * (long)n - PVT_R_CONST) / (1 << PVT_CONV_BITS); return 0; default: @@ -385,6 +398,19 @@ static int pvt_init(struct pvt_device *pvt) if (ret) return ret; + val = (BIT(pvt->c_num) - 1) | VM_CH_INIT | + IP_POLL << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG; + ret = regmap_write(v_map, SDIF_W, val); + if (ret < 0) + return ret; + + ret = regmap_read_poll_timeout(v_map, SDIF_STAT, + val, !(val & SDIF_BUSY), + PVT_POLL_DELAY_US, + PVT_POLL_TIMEOUT_US); + if (ret) + return ret; + val = CFG1_VOL_MEAS_MODE | CFG1_PARALLEL_OUT | CFG1_14_BIT | IP_CFG << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG; @@ -499,8 +525,8 @@ static int pvt_reset_control_deassert(struct device *dev, struct pvt_device *pvt static int mr75203_probe(struct platform_device *pdev) { + u32 ts_num, vm_num, pd_num, ch_num, val, index, i; const struct hwmon_channel_info **pvt_info; - u32 ts_num, vm_num, pd_num, val, index, i; struct device *dev = &pdev->dev; u32 *temp_config, *in_config; struct device *hwmon_dev; @@ -541,9 +567,11 @@ static int mr75203_probe(struct platform_device *pdev) ts_num = (val & TS_NUM_MSK) >> TS_NUM_SFT; pd_num = (val & PD_NUM_MSK) >> PD_NUM_SFT; vm_num = (val & VM_NUM_MSK) >> VM_NUM_SFT; + ch_num = (val & CH_NUM_MSK) >> CH_NUM_SFT; pvt->t_num = ts_num; pvt->p_num = pd_num; pvt->v_num = vm_num; + pvt->c_num = ch_num; val = 0; if (ts_num) val++; @@ -580,7 +608,7 @@ static int mr75203_probe(struct platform_device *pdev) } if (vm_num) { - u32 num = vm_num; + u32 total_ch; ret = pvt_get_regmap(pdev, "vm", pvt); if (ret) @@ -594,30 +622,30 @@ static int mr75203_probe(struct platform_device *pdev) ret = device_property_read_u8_array(dev, "intel,vm-map", pvt->vm_idx, vm_num); if (ret) { - num = 0; + /* + * Incase intel,vm-map property is not defined, we + * assume incremental channel numbers. + */ + for (i = 0; i < vm_num; i++) + pvt->vm_idx[i] = i; } else { for (i = 0; i < vm_num; i++) if (pvt->vm_idx[i] >= vm_num || pvt->vm_idx[i] == 0xff) { - num = i; + pvt->v_num = i; + vm_num = i; break; } } - /* - * Incase intel,vm-map property is not defined, we assume - * incremental channel numbers. - */ - for (i = num; i < vm_num; i++) - pvt->vm_idx[i] = i; - - in_config = devm_kcalloc(dev, num + 1, + total_ch = ch_num * vm_num; + in_config = devm_kcalloc(dev, total_ch + 1, sizeof(*in_config), GFP_KERNEL); if (!in_config) return -ENOMEM; - memset32(in_config, HWMON_I_INPUT, num); - in_config[num] = 0; + memset32(in_config, HWMON_I_INPUT, total_ch); + in_config[total_ch] = 0; pvt_in.config = in_config; pvt_info[index++] = &pvt_in; diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c index 9dc210b55e69..48466b0a4bb0 100644 --- a/drivers/hwmon/tmp401.c +++ b/drivers/hwmon/tmp401.c @@ -730,10 +730,21 @@ static int tmp401_probe(struct i2c_client *client) return 0; } +static const struct of_device_id __maybe_unused tmp4xx_of_match[] = { + { .compatible = "ti,tmp401", }, + { .compatible = "ti,tmp411", }, + { .compatible = "ti,tmp431", }, + { .compatible = "ti,tmp432", }, + { .compatible = "ti,tmp435", }, + { }, +}; +MODULE_DEVICE_TABLE(of, tmp4xx_of_match); + static struct i2c_driver tmp401_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "tmp401", + .of_match_table = of_match_ptr(tmp4xx_of_match), }, .probe_new = tmp401_probe, .id_table = tmp401_id, diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c index 364710966665..e49914664863 100644 --- a/drivers/hwspinlock/qcom_hwspinlock.c +++ b/drivers/hwspinlock/qcom_hwspinlock.c @@ -105,7 +105,7 @@ static const struct regmap_config tcsr_mutex_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, - .max_register = 0x40000, + .max_register = 0x20000, .fast_io = true, }; diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index 4956a8159227..53dc2aa3a371 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -1376,7 +1376,7 @@ static int coresight_fixup_device_conns(struct coresight_device *csdev) continue; conn->child_dev = coresight_find_csdev_by_fwnode(conn->child_fwnode); - if (conn->child_dev) { + if (conn->child_dev && conn->child_dev->has_conns_grp) { ret = coresight_make_links(csdev, conn, conn->child_dev); if (ret) @@ -1421,6 +1421,7 @@ static int coresight_remove_match(struct device *dev, void *data) * platform data. */ fwnode_handle_put(conn->child_fwnode); + conn->child_fwnode = NULL; /* No need to continue */ break; } @@ -1568,6 +1569,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) int nr_refcnts = 1; atomic_t *refcnts = NULL; struct coresight_device *csdev; + bool registered = false; csdev = kzalloc(sizeof(*csdev), GFP_KERNEL); if (!csdev) { @@ -1588,7 +1590,8 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL); if (!refcnts) { ret = -ENOMEM; - goto err_free_csdev; + kfree(csdev); + goto err_out; } csdev->refcnt = refcnts; @@ -1613,6 +1616,13 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev)); dev_set_name(&csdev->dev, "%s", desc->name); + /* + * Make sure the device registration and the connection fixup + * are synchronised, so that we don't see uninitialised devices + * on the coresight bus while trying to resolve the connections. + */ + mutex_lock(&coresight_mutex); + ret = device_register(&csdev->dev); if (ret) { put_device(&csdev->dev); @@ -1620,7 +1630,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) * All resources are free'd explicitly via * coresight_device_release(), triggered from put_device(). */ - goto err_out; + goto out_unlock; } if (csdev->type == CORESIGHT_DEV_TYPE_SINK || @@ -1635,11 +1645,11 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) * from put_device(), which is in turn called from * function device_unregister(). */ - goto err_out; + goto out_unlock; } } - - mutex_lock(&coresight_mutex); + /* Device is now registered */ + registered = true; ret = coresight_create_conns_sysfs_group(csdev); if (!ret) @@ -1649,16 +1659,18 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) if (!ret && cti_assoc_ops && cti_assoc_ops->add) cti_assoc_ops->add(csdev); +out_unlock: mutex_unlock(&coresight_mutex); - if (ret) { + /* Success */ + if (!ret) + return csdev; + + /* Unregister the device if needed */ + if (registered) { coresight_unregister(csdev); return ERR_PTR(ret); } - return csdev; - -err_free_csdev: - kfree(csdev); err_out: /* Cleanup the connection information */ coresight_release_platform_data(NULL, desc->pdata); diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c index 2dcf13de751f..1e98562f4287 100644 --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c @@ -379,9 +379,10 @@ static int debug_notifier_call(struct notifier_block *self, int cpu; struct debug_drvdata *drvdata; - mutex_lock(&debug_lock); + /* Bail out if we can't acquire the mutex or the functionality is off */ + if (!mutex_trylock(&debug_lock)) + return NOTIFY_DONE; - /* Bail out if the functionality is disabled */ if (!debug_enable) goto skip_dump; @@ -400,7 +401,7 @@ static int debug_notifier_call(struct notifier_block *self, skip_dump: mutex_unlock(&debug_lock); - return 0; + return NOTIFY_DONE; } static struct notifier_block debug_notifier = { diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c index 8988b2ed2ea6..dcd607a0c41a 100644 --- a/drivers/hwtracing/coresight/coresight-cti-core.c +++ b/drivers/hwtracing/coresight/coresight-cti-core.c @@ -90,11 +90,9 @@ void cti_write_all_hw_regs(struct cti_drvdata *drvdata) static int cti_enable_hw(struct cti_drvdata *drvdata) { struct cti_config *config = &drvdata->config; - struct device *dev = &drvdata->csdev->dev; unsigned long flags; int rc = 0; - pm_runtime_get_sync(dev->parent); spin_lock_irqsave(&drvdata->spinlock, flags); /* no need to do anything if enabled or unpowered*/ @@ -119,7 +117,6 @@ cti_state_unchanged: /* cannot enable due to error */ cti_err_not_enabled: spin_unlock_irqrestore(&drvdata->spinlock, flags); - pm_runtime_put(dev->parent); return rc; } @@ -153,7 +150,6 @@ cti_hp_not_enabled: static int cti_disable_hw(struct cti_drvdata *drvdata) { struct cti_config *config = &drvdata->config; - struct device *dev = &drvdata->csdev->dev; struct coresight_device *csdev = drvdata->csdev; spin_lock(&drvdata->spinlock); @@ -175,7 +171,6 @@ static int cti_disable_hw(struct cti_drvdata *drvdata) coresight_disclaim_device_unlocked(csdev); CS_LOCK(drvdata->base); spin_unlock(&drvdata->spinlock); - pm_runtime_put(dev->parent); return 0; /* not disabled this call */ diff --git a/drivers/hwtracing/intel_th/msu-sink.c b/drivers/hwtracing/intel_th/msu-sink.c index 2c7f5116be12..891b28ea25fe 100644 --- a/drivers/hwtracing/intel_th/msu-sink.c +++ b/drivers/hwtracing/intel_th/msu-sink.c @@ -71,6 +71,9 @@ static int msu_sink_alloc_window(void *data, struct sg_table **sgt, size_t size) block = dma_alloc_coherent(priv->dev->parent->parent, PAGE_SIZE, &sg_dma_address(sg_ptr), GFP_KERNEL); + if (!block) + return -ENOMEM; + sg_set_buf(sg_ptr, block, PAGE_SIZE); } diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index 3a77551fb4fc..24f56a7c0fcf 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -1053,6 +1053,16 @@ msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {} static inline void msc_buffer_set_wb(struct msc_window *win) {} #endif /* CONFIG_X86 */ +static struct page *msc_sg_page(struct scatterlist *sg) +{ + void *addr = sg_virt(sg); + + if (is_vmalloc_addr(addr)) + return vmalloc_to_page(addr); + + return sg_page(sg); +} + /** * msc_buffer_win_alloc() - alloc a window for a multiblock mode * @msc: MSC device @@ -1125,7 +1135,7 @@ static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) int i; for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) { - struct page *page = sg_page(sg); + struct page *page = msc_sg_page(sg); page->mapping = NULL; dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, @@ -1387,7 +1397,7 @@ found: pgoff -= win->pgoff; for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { - struct page *page = sg_page(sg); + struct page *page = msc_sg_page(sg); size_t pgsz = PFN_DOWN(sg->length); if (pgoff < pgsz) diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 817cdb29bbd8..e25438025b9f 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -100,8 +100,10 @@ static int intel_th_pci_probe(struct pci_dev *pdev, } th = intel_th_alloc(&pdev->dev, drvdata, resource, r); - if (IS_ERR(th)) - return PTR_ERR(th); + if (IS_ERR(th)) { + err = PTR_ERR(th); + goto err_free_irq; + } th->activate = intel_th_pci_activate; th->deactivate = intel_th_pci_deactivate; @@ -109,6 +111,10 @@ static int intel_th_pci_probe(struct pci_dev *pdev, pci_set_master(pdev); return 0; + +err_free_irq: + pci_free_irq_vectors(pdev); + return err; } static void intel_th_pci_remove(struct pci_dev *pdev) @@ -278,6 +284,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x54a6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Meteor Lake-P */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7e24), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Raptor Lake-S */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7a26), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Raptor Lake-S CPU */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa76f), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { /* Alder Lake CPU */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f), diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c index 66864f9cf7ac..7960fa4b8c5b 100644 --- a/drivers/i2c/busses/i2c-at91-master.c +++ b/drivers/i2c/busses/i2c-at91-master.c @@ -657,6 +657,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) unsigned int_addr_flag = 0; struct i2c_msg *m_start = msg; bool is_read; + u8 *dma_buf = NULL; dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num); @@ -704,7 +705,17 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) dev->msg = m_start; dev->recv_len_abort = false; + if (dev->use_dma) { + dma_buf = i2c_get_dma_safe_msg_buf(m_start, 1); + if (!dma_buf) { + ret = -ENOMEM; + goto out; + } + dev->buf = dma_buf; + } + ret = at91_do_twi_transfer(dev); + i2c_put_dma_safe_msg_buf(dma_buf, m_start, !ret); ret = (ret < 0) ? ret : num; out: diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index c1bbc4caeb5c..50928216b3f2 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -386,9 +386,9 @@ static irqreturn_t cdns_i2c_slave_isr(void *ptr) */ static irqreturn_t cdns_i2c_master_isr(void *ptr) { - unsigned int isr_status, avail_bytes, updatetx; + unsigned int isr_status, avail_bytes; unsigned int bytes_to_send; - bool hold_quirk; + bool updatetx; struct cdns_i2c *id = ptr; /* Signal completion only after everything is updated */ int done_flag = 0; @@ -408,11 +408,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) * Check if transfer size register needs to be updated again for a * large data receive operation. */ - updatetx = 0; - if (id->recv_count > id->curr_recv_count) - updatetx = 1; - - hold_quirk = (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) && updatetx; + updatetx = id->recv_count > id->curr_recv_count; /* When receiving, handle data interrupt and completion interrupt */ if (id->p_recv_buf && @@ -443,7 +439,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) break; } - if (cdns_is_holdquirk(id, hold_quirk)) + if (cdns_is_holdquirk(id, updatetx)) break; } @@ -454,7 +450,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) * maintain transfer size non-zero while performing a large * receive operation. */ - if (cdns_is_holdquirk(id, hold_quirk)) { + if (cdns_is_holdquirk(id, updatetx)) { /* wait while fifo is full */ while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) != (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH)) @@ -476,22 +472,6 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) CDNS_I2C_XFER_SIZE_OFFSET); id->curr_recv_count = id->recv_count; } - } else if (id->recv_count && !hold_quirk && - !id->curr_recv_count) { - - /* Set the slave address in address register*/ - cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK, - CDNS_I2C_ADDR_OFFSET); - - if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) { - cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE, - CDNS_I2C_XFER_SIZE_OFFSET); - id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE; - } else { - cdns_i2c_writereg(id->recv_count, - CDNS_I2C_XFER_SIZE_OFFSET); - id->curr_recv_count = id->recv_count; - } } /* Clear hold (if not repeated start) and signal completion */ @@ -586,8 +566,13 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); ctrl_reg |= CDNS_I2C_CR_RW | CDNS_I2C_CR_CLR_FIFO; + /* + * Receive up to I2C_SMBUS_BLOCK_MAX data bytes, plus one message length + * byte, plus one checksum byte if PEC is enabled. p_msg->len will be 2 if + * PEC is enabled, otherwise 1. + */ if (id->p_msg->flags & I2C_M_RECV_LEN) - id->recv_count = I2C_SMBUS_BLOCK_MAX + 1; + id->recv_count = I2C_SMBUS_BLOCK_MAX + id->p_msg->len; id->curr_recv_count = id->recv_count; @@ -724,7 +709,7 @@ static void cdns_i2c_master_reset(struct i2c_adapter *adap) static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg, struct i2c_adapter *adap) { - unsigned long time_left; + unsigned long time_left, msg_timeout; u32 reg; id->p_msg = msg; @@ -749,8 +734,16 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg, else cdns_i2c_msend(id); + /* Minimal time to execute this message */ + msg_timeout = msecs_to_jiffies((1000 * msg->len * BITS_PER_BYTE) / id->i2c_clk); + /* Plus some wiggle room */ + msg_timeout += msecs_to_jiffies(500); + + if (msg_timeout < adap->timeout) + msg_timeout = adap->timeout; + /* Wait for the signal of completion */ - time_left = wait_for_completion_timeout(&id->xfer_done, adap->timeout); + time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout); if (time_left == 0) { cdns_i2c_master_reset(adap); dev_err(id->adap.dev.parent, @@ -765,6 +758,9 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg, if (id->err_status & CDNS_I2C_IXR_ARB_LOST) return -EAGAIN; + if (msg->flags & I2C_M_RECV_LEN) + msg->len += min_t(unsigned int, msg->buf[0], I2C_SMBUS_BLOCK_MAX); + return 0; } @@ -1281,6 +1277,7 @@ static int cdns_i2c_probe(struct platform_device *pdev) return 0; err_clk_dis: + clk_notifier_unregister(id->clk, &id->clk_rate_change_nb); clk_disable_unprepare(id->clk); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index 3c19aada4b30..9468c6c89b3f 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -474,9 +474,6 @@ int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare) { int ret; - if (IS_ERR(dev->clk)) - return PTR_ERR(dev->clk); - if (prepare) { /* Optional interface clock */ ret = clk_prepare_enable(dev->pclk); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 0dfeb2d11603..ad91c7c0faa5 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -266,8 +266,17 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) goto exit_reset; } - dev->clk = devm_clk_get(&pdev->dev, NULL); - if (!i2c_dw_prepare_clk(dev, true)) { + dev->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(dev->clk)) { + ret = PTR_ERR(dev->clk); + goto exit_reset; + } + + ret = i2c_dw_prepare_clk(dev, true); + if (ret) + goto exit_reset; + + if (dev->clk) { u64 clk_khz; dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz; diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 5618c1ff34dc..45682d30d705 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1275,6 +1275,7 @@ static const struct { */ { "Latitude 5480", 0x29 }, { "Vostro V131", 0x1d }, + { "Vostro 5568", 0x29 }, }; static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv) diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 72af4b4d1318..b4fb4336b4e8 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -843,7 +843,8 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, int i, result; unsigned int temp; int block_data = msgs->flags & I2C_M_RECV_LEN; - int use_dma = i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data; + int use_dma = i2c_imx->dma && msgs->flags & I2C_M_DMA_SAFE && + msgs->len >= DMA_THRESHOLD && !block_data; dev_dbg(&i2c_imx->adapter.dev, "<%s> write slave address: addr=0x%x\n", @@ -1011,7 +1012,8 @@ static int i2c_imx_xfer_common(struct i2c_adapter *adapter, result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg, atomic); } else { if (!atomic && - i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD) + i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD && + msgs[i].flags & I2C_M_DMA_SAFE) result = i2c_imx_dma_write(i2c_imx, &msgs[i]); else result = i2c_imx_write(i2c_imx, &msgs[i], atomic); @@ -1280,9 +1282,7 @@ static int i2c_imx_remove(struct platform_device *pdev) struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev); int irq, ret; - ret = pm_runtime_resume_and_get(&pdev->dev); - if (ret < 0) - return ret; + ret = pm_runtime_get_sync(&pdev->dev); /* remove adapter */ dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n"); @@ -1291,17 +1291,21 @@ static int i2c_imx_remove(struct platform_device *pdev) if (i2c_imx->dma) i2c_imx_dma_free(i2c_imx); - /* setup chip registers to defaults */ - imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR); - imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR); - imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR); - imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR); + if (ret >= 0) { + /* setup chip registers to defaults */ + imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR); + imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR); + imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR); + imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR); + clk_disable(i2c_imx->clk); + } clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); irq = platform_get_irq(pdev, 0); if (irq >= 0) free_irq(irq, i2c_imx); - clk_disable_unprepare(i2c_imx->clk); + + clk_unprepare(i2c_imx->clk); pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index a35a27c320e7..3d2d92640651 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -82,6 +82,7 @@ #define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */ #define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */ +#define ISMT_LOG_ENTRIES 3 /* number of interrupt cause log entries */ /* Hardware Descriptor Constants - Control Field */ #define ISMT_DESC_CWRL 0x01 /* Command/Write Length */ @@ -175,6 +176,8 @@ struct ismt_priv { u8 head; /* ring buffer head pointer */ struct completion cmp; /* interrupt completion */ u8 buffer[I2C_SMBUS_BLOCK_MAX + 16]; /* temp R/W data buffer */ + dma_addr_t log_dma; + u32 *log; }; static const struct pci_device_id ismt_ids[] = { @@ -409,6 +412,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, memset(desc, 0, sizeof(struct ismt_desc)); desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write); + /* Always clear the log entries */ + memset(priv->log, 0, ISMT_LOG_ENTRIES * sizeof(u32)); + /* Initialize common control bits */ if (likely(pci_dev_msi_enabled(priv->pci_dev))) desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR; @@ -693,6 +699,8 @@ static void ismt_hw_init(struct ismt_priv *priv) /* initialize the Master Descriptor Base Address (MDBA) */ writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA); + writeq(priv->log_dma, priv->smba + ISMT_GR_SMTICL); + /* initialize the Master Control Register (MCTRL) */ writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL); @@ -780,6 +788,12 @@ static int ismt_dev_init(struct ismt_priv *priv) priv->head = 0; init_completion(&priv->cmp); + priv->log = dmam_alloc_coherent(&priv->pci_dev->dev, + ISMT_LOG_ENTRIES * sizeof(u32), + &priv->log_dma, GFP_KERNEL); + if (!priv->log) + return -ENOMEM; + return 0; } diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c index ab261d762dea..90c488a60693 100644 --- a/drivers/i2c/busses/i2c-mlxbf.c +++ b/drivers/i2c/busses/i2c-mlxbf.c @@ -6,6 +6,7 @@ */ #include +#include #include #include #include @@ -63,13 +64,14 @@ */ #define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000) /* Reference clock for Bluefield - 156 MHz. */ -#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000) +#define MLXBF_I2C_PLL_IN_FREQ 156250000ULL /* Constant used to determine the PLL frequency. */ -#define MLNXBF_I2C_COREPLL_CONST 16384 +#define MLNXBF_I2C_COREPLL_CONST 16384ULL + +#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000ULL /* PLL registers. */ -#define MLXBF_I2C_CORE_PLL_REG0 0x0 #define MLXBF_I2C_CORE_PLL_REG1 0x4 #define MLXBF_I2C_CORE_PLL_REG2 0x8 @@ -187,22 +189,15 @@ enum { #define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ /* Core PLL TYU configuration. */ -#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(12, 0) -#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(3, 0) -#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(5, 0) - -#define MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT 3 -#define MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT 16 -#define MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT 20 +#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(15, 3) +#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(19, 16) +#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(25, 20) /* Core PLL YU configuration. */ #define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0) #define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0) -#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(5, 0) +#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(31, 26) -#define MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT 0 -#define MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT 1 -#define MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT 26 /* Core PLL frequency. */ static u64 mlxbf_i2c_corepll_frequency; @@ -317,6 +312,7 @@ static u64 mlxbf_i2c_corepll_frequency; * exact. */ #define MLXBF_I2C_SMBUS_TIMEOUT (300 * 1000) /* 300ms */ +#define MLXBF_I2C_SMBUS_LOCK_POLL_TIMEOUT (300 * 1000) /* 300ms */ /* Encapsulates timing parameters. */ struct mlxbf_i2c_timings { @@ -485,8 +481,6 @@ static struct mutex mlxbf_i2c_bus_lock; #define MLXBF_I2C_MASK_8 GENMASK(7, 0) #define MLXBF_I2C_MASK_16 GENMASK(15, 0) -#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000 - /* * Function to poll a set of bits at a specific address; it checks whether * the bits are equal to zero when eq_zero is set to 'true', and not equal @@ -527,6 +521,25 @@ static bool mlxbf_smbus_master_wait_for_idle(struct mlxbf_i2c_priv *priv) return false; } +/* + * wait for the lock to be released before acquiring it. + */ +static bool mlxbf_i2c_smbus_master_lock(struct mlxbf_i2c_priv *priv) +{ + if (mlxbf_smbus_poll(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_GW, + MLXBF_I2C_MASTER_LOCK_BIT, true, + MLXBF_I2C_SMBUS_LOCK_POLL_TIMEOUT)) + return true; + + return false; +} + +static void mlxbf_i2c_smbus_master_unlock(struct mlxbf_i2c_priv *priv) +{ + /* Clear the gw to clear the lock */ + writel(0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_GW); +} + static bool mlxbf_i2c_smbus_transaction_success(u32 master_status, u32 cause_status) { @@ -675,7 +688,7 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave, /* Clear status bits. */ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS); /* Set the cause data. */ - writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR); + writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR); /* Zero PEC byte. */ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC); /* Zero byte count. */ @@ -718,10 +731,19 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, slave = request->slave & GENMASK(6, 0); addr = slave << 1; - /* First of all, check whether the HW is idle. */ - if (WARN_ON(!mlxbf_smbus_master_wait_for_idle(priv))) + /* + * Try to acquire the smbus gw lock before any reads of the GW register since + * a read sets the lock. + */ + if (WARN_ON(!mlxbf_i2c_smbus_master_lock(priv))) return -EBUSY; + /* Check whether the HW is idle */ + if (WARN_ON(!mlxbf_smbus_master_wait_for_idle(priv))) { + ret = -EBUSY; + goto out_unlock; + } + /* Set first byte. */ data_desc[data_idx++] = addr; @@ -744,6 +766,11 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, if (flags & MLXBF_I2C_F_WRITE) { write_en = 1; write_len += operation->length; + if (data_idx + operation->length > + MLXBF_I2C_MASTER_DATA_DESC_SIZE) { + ret = -ENOBUFS; + goto out_unlock; + } memcpy(data_desc + data_idx, operation->buffer, operation->length); data_idx += operation->length; @@ -775,7 +802,7 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, ret = mlxbf_i2c_smbus_enable(priv, slave, write_len, block_en, pec_en, 0); if (ret) - return ret; + goto out_unlock; } if (read_en) { @@ -802,6 +829,9 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_FSM); } +out_unlock: + mlxbf_i2c_smbus_master_unlock(priv); + return ret; } @@ -1413,24 +1443,19 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev, return 0; } -static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) +static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) { - u64 core_frequency, pad_frequency; + u64 core_frequency; u8 core_od, core_r; u32 corepll_val; u16 core_f; - pad_frequency = MLXBF_I2C_PLL_IN_FREQ; - corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1); /* Get Core PLL configuration bits. */ - core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_F_TYU_MASK; - core_od = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK; - core_r = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_R_TYU_MASK; + core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val); + core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val); + core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val); /* * Compute PLL output frequency as follow: @@ -1442,31 +1467,26 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency * and PadFrequency, respectively. */ - core_frequency = pad_frequency * (++core_f); + core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f); core_frequency /= (++core_r) * (++core_od); return core_frequency; } -static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) +static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) { u32 corepll_reg1_val, corepll_reg2_val; - u64 corepll_frequency, pad_frequency; + u64 corepll_frequency; u8 core_od, core_r; u32 core_f; - pad_frequency = MLXBF_I2C_PLL_IN_FREQ; - corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1); corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2); /* Get Core PLL configuration bits */ - core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_F_YU_MASK; - core_r = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_R_YU_MASK; - core_od = rol32(corepll_reg2_val, MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT) & - MLXBF_I2C_COREPLL_CORE_OD_YU_MASK; + core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val); + core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val); + core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val); /* * Compute PLL output frequency as follow: @@ -1478,7 +1498,7 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency * and PadFrequency, respectively. */ - corepll_frequency = (pad_frequency * core_f) / MLNXBF_I2C_COREPLL_CONST; + corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST; corepll_frequency /= (++core_r) * (++core_od); return corepll_frequency; @@ -2186,14 +2206,14 @@ static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = { [1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1], [2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1] }, - .calculate_freq = mlxbf_calculate_freq_from_tyu + .calculate_freq = mlxbf_i2c_calculate_freq_from_tyu }, [MLXBF_I2C_CHIP_TYPE_2] = { .type = MLXBF_I2C_CHIP_TYPE_2, .shared_res = { [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2] }, - .calculate_freq = mlxbf_calculate_freq_from_yu + .calculate_freq = mlxbf_i2c_calculate_freq_from_yu } }; diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c index 45fe4a7fe0c0..901f0fb04fee 100644 --- a/drivers/i2c/busses/i2c-mt7621.c +++ b/drivers/i2c/busses/i2c-mt7621.c @@ -304,7 +304,8 @@ static int mtk_i2c_probe(struct platform_device *pdev) if (i2c->bus_freq == 0) { dev_warn(i2c->dev, "clock-frequency 0 not supported\n"); - return -EINVAL; + ret = -EINVAL; + goto err_disable_clk; } adap = &i2c->adap; @@ -322,10 +323,15 @@ static int mtk_i2c_probe(struct platform_device *pdev) ret = i2c_add_adapter(adap); if (ret < 0) - return ret; + goto err_disable_clk; dev_info(&pdev->dev, "clock %u kHz\n", i2c->bus_freq / 1000); + return 0; + +err_disable_clk: + clk_disable_unprepare(i2c->clk); + return ret; } diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index 2ad166355ec9..c1b679737240 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -123,11 +123,11 @@ enum i2c_addr { * Since the addr regs are sprinkled all over the address space, * use this array to get the address or each register. */ -#define I2C_NUM_OWN_ADDR 10 +#define I2C_NUM_OWN_ADDR 2 +#define I2C_NUM_OWN_ADDR_SUPPORTED 2 + static const int npcm_i2caddr[I2C_NUM_OWN_ADDR] = { - NPCM_I2CADDR1, NPCM_I2CADDR2, NPCM_I2CADDR3, NPCM_I2CADDR4, - NPCM_I2CADDR5, NPCM_I2CADDR6, NPCM_I2CADDR7, NPCM_I2CADDR8, - NPCM_I2CADDR9, NPCM_I2CADDR10, + NPCM_I2CADDR1, NPCM_I2CADDR2, }; #endif @@ -359,14 +359,14 @@ static int npcm_i2c_get_SCL(struct i2c_adapter *_adap) { struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap); - return !!(I2CCTL3_SCL_LVL & ioread32(bus->reg + NPCM_I2CCTL3)); + return !!(I2CCTL3_SCL_LVL & ioread8(bus->reg + NPCM_I2CCTL3)); } static int npcm_i2c_get_SDA(struct i2c_adapter *_adap) { struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap); - return !!(I2CCTL3_SDA_LVL & ioread32(bus->reg + NPCM_I2CCTL3)); + return !!(I2CCTL3_SDA_LVL & ioread8(bus->reg + NPCM_I2CCTL3)); } static inline u16 npcm_i2c_get_index(struct npcm_i2c *bus) @@ -391,14 +391,10 @@ static void npcm_i2c_disable(struct npcm_i2c *bus) #if IS_ENABLED(CONFIG_I2C_SLAVE) int i; - /* select bank 0 for I2C addresses */ - npcm_i2c_select_bank(bus, I2C_BANK_0); - /* Slave addresses removal */ - for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR; i++) + for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR_SUPPORTED; i++) iowrite8(0, bus->reg + npcm_i2caddr[i]); - npcm_i2c_select_bank(bus, I2C_BANK_1); #endif /* Disable module */ i2cctl2 = ioread8(bus->reg + NPCM_I2CCTL2); @@ -563,6 +559,15 @@ static inline void npcm_i2c_nack(struct npcm_i2c *bus) iowrite8(val, bus->reg + NPCM_I2CCTL1); } +static inline void npcm_i2c_clear_master_status(struct npcm_i2c *bus) +{ + u8 val; + + /* Clear NEGACK, STASTR and BER bits */ + val = NPCM_I2CST_BER | NPCM_I2CST_NEGACK | NPCM_I2CST_STASTR; + iowrite8(val, bus->reg + NPCM_I2CST); +} + #if IS_ENABLED(CONFIG_I2C_SLAVE) static void npcm_i2c_slave_int_enable(struct npcm_i2c *bus, bool enable) { @@ -594,8 +599,7 @@ static int npcm_i2c_slave_enable(struct npcm_i2c *bus, enum i2c_addr addr_type, i2cctl1 &= ~NPCM_I2CCTL1_GCMEN; iowrite8(i2cctl1, bus->reg + NPCM_I2CCTL1); return 0; - } - if (addr_type == I2C_ARP_ADDR) { + } else if (addr_type == I2C_ARP_ADDR) { i2cctl3 = ioread8(bus->reg + NPCM_I2CCTL3); if (enable) i2cctl3 |= I2CCTL3_ARPMEN; @@ -604,16 +608,16 @@ static int npcm_i2c_slave_enable(struct npcm_i2c *bus, enum i2c_addr addr_type, iowrite8(i2cctl3, bus->reg + NPCM_I2CCTL3); return 0; } + if (addr_type > I2C_SLAVE_ADDR2 && addr_type <= I2C_SLAVE_ADDR10) + dev_err(bus->dev, "try to enable more than 2 SA not supported\n"); + if (addr_type >= I2C_ARP_ADDR) return -EFAULT; - /* select bank 0 for address 3 to 10 */ - if (addr_type > I2C_SLAVE_ADDR2) - npcm_i2c_select_bank(bus, I2C_BANK_0); + /* Set and enable the address */ iowrite8(sa_reg, bus->reg + npcm_i2caddr[addr_type]); npcm_i2c_slave_int_enable(bus, enable); - if (addr_type > I2C_SLAVE_ADDR2) - npcm_i2c_select_bank(bus, I2C_BANK_1); + return 0; } #endif @@ -642,8 +646,8 @@ static void npcm_i2c_reset(struct npcm_i2c *bus) iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST); iowrite8(0xFF, bus->reg + NPCM_I2CST); - /* Clear EOB bit */ - iowrite8(NPCM_I2CCST3_EO_BUSY, bus->reg + NPCM_I2CCST3); + /* Clear and disable EOB */ + npcm_i2c_eob_int(bus, false); /* Clear all fifo bits: */ iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS); @@ -655,6 +659,9 @@ static void npcm_i2c_reset(struct npcm_i2c *bus) } #endif + /* clear status bits for spurious interrupts */ + npcm_i2c_clear_master_status(bus); + bus->state = I2C_IDLE; } @@ -815,15 +822,6 @@ static void npcm_i2c_read_fifo(struct npcm_i2c *bus, u8 bytes_in_fifo) } } -static inline void npcm_i2c_clear_master_status(struct npcm_i2c *bus) -{ - u8 val; - - /* Clear NEGACK, STASTR and BER bits */ - val = NPCM_I2CST_BER | NPCM_I2CST_NEGACK | NPCM_I2CST_STASTR; - iowrite8(val, bus->reg + NPCM_I2CST); -} - static void npcm_i2c_master_abort(struct npcm_i2c *bus) { /* Only current master is allowed to issue a stop condition */ @@ -840,15 +838,11 @@ static u8 npcm_i2c_get_slave_addr(struct npcm_i2c *bus, enum i2c_addr addr_type) { u8 slave_add; - /* select bank 0 for address 3 to 10 */ - if (addr_type > I2C_SLAVE_ADDR2) - npcm_i2c_select_bank(bus, I2C_BANK_0); + if (addr_type > I2C_SLAVE_ADDR2 && addr_type <= I2C_SLAVE_ADDR10) + dev_err(bus->dev, "get slave: try to use more than 2 SA not supported\n"); slave_add = ioread8(bus->reg + npcm_i2caddr[(int)addr_type]); - if (addr_type > I2C_SLAVE_ADDR2) - npcm_i2c_select_bank(bus, I2C_BANK_1); - return slave_add; } @@ -858,12 +852,12 @@ static int npcm_i2c_remove_slave_addr(struct npcm_i2c *bus, u8 slave_add) /* Set the enable bit */ slave_add |= 0x80; - npcm_i2c_select_bank(bus, I2C_BANK_0); - for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR; i++) { + + for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR_SUPPORTED; i++) { if (ioread8(bus->reg + npcm_i2caddr[i]) == slave_add) iowrite8(0, bus->reg + npcm_i2caddr[i]); } - npcm_i2c_select_bank(bus, I2C_BANK_1); + return 0; } @@ -918,11 +912,15 @@ static int npcm_i2c_slave_get_wr_buf(struct npcm_i2c *bus) for (i = 0; i < I2C_HW_FIFO_SIZE; i++) { if (bus->slv_wr_size >= I2C_HW_FIFO_SIZE) break; - i2c_slave_event(bus->slave, I2C_SLAVE_READ_REQUESTED, &value); + if (bus->state == I2C_SLAVE_MATCH) { + i2c_slave_event(bus->slave, I2C_SLAVE_READ_REQUESTED, &value); + bus->state = I2C_OPER_STARTED; + } else { + i2c_slave_event(bus->slave, I2C_SLAVE_READ_PROCESSED, &value); + } ind = (bus->slv_wr_ind + bus->slv_wr_size) % I2C_HW_FIFO_SIZE; bus->slv_wr_buf[ind] = value; bus->slv_wr_size++; - i2c_slave_event(bus->slave, I2C_SLAVE_READ_PROCESSED, &value); } return I2C_HW_FIFO_SIZE - ret; } @@ -970,7 +968,6 @@ static void npcm_i2c_slave_xmit(struct npcm_i2c *bus, u16 nwrite, if (nwrite == 0) return; - bus->state = I2C_OPER_STARTED; bus->operation = I2C_WRITE_OPER; /* get the next buffer */ @@ -1231,7 +1228,16 @@ static irqreturn_t npcm_i2c_int_slave_handler(struct npcm_i2c *bus) ret = IRQ_HANDLED; } /* SDAST */ - return ret; + /* + * if irq is not one of the above, make sure EOB is disabled and all + * status bits are cleared. + */ + if (ret == IRQ_NONE) { + npcm_i2c_eob_int(bus, false); + npcm_i2c_clear_master_status(bus); + } + + return IRQ_HANDLED; } static int npcm_i2c_reg_slave(struct i2c_client *client) @@ -1467,6 +1473,9 @@ static void npcm_i2c_irq_handle_nack(struct npcm_i2c *bus) npcm_i2c_eob_int(bus, false); npcm_i2c_master_stop(bus); + /* Clear SDA Status bit (by reading dummy byte) */ + npcm_i2c_rd_byte(bus); + /* * The bus is released from stall only after the SW clears * NEGACK bit. Then a Stop condition is sent. @@ -1474,6 +1483,8 @@ static void npcm_i2c_irq_handle_nack(struct npcm_i2c *bus) npcm_i2c_clear_master_status(bus); readx_poll_timeout_atomic(ioread8, bus->reg + NPCM_I2CCST, val, !(val & NPCM_I2CCST_BUSY), 10, 200); + /* verify no status bits are still set after bus is released */ + npcm_i2c_clear_master_status(bus); } bus->state = I2C_IDLE; @@ -1672,10 +1683,10 @@ static int npcm_i2c_recovery_tgclk(struct i2c_adapter *_adap) int iter = 27; if ((npcm_i2c_get_SDA(_adap) == 1) && (npcm_i2c_get_SCL(_adap) == 1)) { - dev_dbg(bus->dev, "bus%d recovery skipped, bus not stuck", - bus->num); + dev_dbg(bus->dev, "bus%d-0x%x recovery skipped, bus not stuck", + bus->num, bus->dest_addr); npcm_i2c_reset(bus); - return status; + return 0; } npcm_i2c_int_enable(bus, false); @@ -1909,6 +1920,7 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode, bus_freq_hz < I2C_FREQ_MIN_HZ || bus_freq_hz > I2C_FREQ_MAX_HZ) return -EINVAL; + npcm_i2c_int_enable(bus, false); npcm_i2c_disable(bus); /* Configure FIFO mode : */ @@ -1937,10 +1949,17 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode, val = (val | NPCM_I2CCTL1_NMINTE) & ~NPCM_I2CCTL1_RWS; iowrite8(val, bus->reg + NPCM_I2CCTL1); - npcm_i2c_int_enable(bus, true); - npcm_i2c_reset(bus); + /* check HW is OK: SDA and SCL should be high at this point. */ + if ((npcm_i2c_get_SDA(&bus->adap) == 0) || (npcm_i2c_get_SCL(&bus->adap) == 0)) { + dev_err(bus->dev, "I2C%d init fail: lines are low\n", bus->num); + dev_err(bus->dev, "SDA=%d SCL=%d\n", npcm_i2c_get_SDA(&bus->adap), + npcm_i2c_get_SCL(&bus->adap)); + return -ENXIO; + } + + npcm_i2c_int_enable(bus, true); return 0; } @@ -1988,10 +2007,14 @@ static irqreturn_t npcm_i2c_bus_irq(int irq, void *dev_id) #if IS_ENABLED(CONFIG_I2C_SLAVE) if (bus->slave) { bus->master_or_slave = I2C_SLAVE; - return npcm_i2c_int_slave_handler(bus); + if (npcm_i2c_int_slave_handler(bus)) + return IRQ_HANDLED; } #endif - return IRQ_NONE; + /* clear status bits for spurious interrupts */ + npcm_i2c_clear_master_status(bus); + + return IRQ_HANDLED; } static bool npcm_i2c_master_start_xmit(struct npcm_i2c *bus, @@ -2047,8 +2070,7 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, u16 nwrite, nread; u8 *write_data, *read_data; u8 slave_addr; - int timeout; - int ret = 0; + unsigned long timeout; bool read_block = false; bool read_PEC = false; u8 bus_busy; @@ -2099,13 +2121,13 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, * 9: bits per transaction (including the ack/nack) */ timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite); - timeout = max(msecs_to_jiffies(35), usecs_to_jiffies(timeout_usec)); + timeout = max_t(unsigned long, bus->adap.timeout, usecs_to_jiffies(timeout_usec)); if (nwrite >= 32 * 1024 || nread >= 32 * 1024) { dev_err(bus->dev, "i2c%d buffer too big\n", bus->num); return -EINVAL; } - time_left = jiffies + msecs_to_jiffies(DEFAULT_STALL_COUNT) + 1; + time_left = jiffies + timeout + 1; do { /* * we must clear slave address immediately when the bus is not @@ -2138,12 +2160,12 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, bus->read_block_use = read_block; reinit_completion(&bus->cmd_complete); - if (!npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread, - write_data, read_data, read_PEC, - read_block)) - ret = -EBUSY; - if (ret != -EBUSY) { + npcm_i2c_int_enable(bus, true); + + if (npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread, + write_data, read_data, read_PEC, + read_block)) { time_left = wait_for_completion_timeout(&bus->cmd_complete, timeout); @@ -2157,26 +2179,31 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, } } } - ret = bus->cmd_err; /* if there was BER, check if need to recover the bus: */ if (bus->cmd_err == -EAGAIN) - ret = i2c_recover_bus(adap); + bus->cmd_err = i2c_recover_bus(adap); /* * After any type of error, check if LAST bit is still set, * due to a HW issue. * It cannot be cleared without resetting the module. */ - if (bus->cmd_err && - (NPCM_I2CRXF_CTL_LAST_PEC & ioread8(bus->reg + NPCM_I2CRXF_CTL))) + else if (bus->cmd_err && + (NPCM_I2CRXF_CTL_LAST_PEC & ioread8(bus->reg + NPCM_I2CRXF_CTL))) npcm_i2c_reset(bus); + /* after any xfer, successful or not, stall and EOB must be disabled */ + npcm_i2c_stall_after_start(bus, false); + npcm_i2c_eob_int(bus, false); + #if IS_ENABLED(CONFIG_I2C_SLAVE) /* reenable slave if it was enabled */ if (bus->slave) iowrite8((bus->slave->addr & 0x7F) | NPCM_I2CADDR_SAEN, bus->reg + NPCM_I2CADDR1); +#else + npcm_i2c_int_enable(bus, false); #endif return bus->cmd_err; } @@ -2269,7 +2296,7 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev) adap = &bus->adap; adap->owner = THIS_MODULE; adap->retries = 3; - adap->timeout = HZ; + adap->timeout = msecs_to_jiffies(35); adap->algo = &npcm_i2c_algo; adap->quirks = &npcm_i2c_quirks; adap->algo_data = bus; @@ -2335,8 +2362,16 @@ static struct platform_driver npcm_i2c_bus_driver = { static int __init npcm_i2c_init(void) { + int ret; + npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL); - platform_driver_register(&npcm_i2c_bus_driver); + + ret = platform_driver_register(&npcm_i2c_bus_driver); + if (ret) { + debugfs_remove_recursive(npcm_i2c_debugfs_dir); + return ret; + } + return 0; } module_init(npcm_i2c_init); diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c index 20f2772c0e79..2c909522f0f3 100644 --- a/drivers/i2c/busses/i2c-pasemi.c +++ b/drivers/i2c/busses/i2c-pasemi.c @@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter, TXFIFO_WR(smbus, msg->buf[msg->len-1] | (stop ? MTXFIFO_STOP : 0)); + + if (stop) { + err = pasemi_smb_waitready(smbus); + if (err) + goto reset_out; + } } return 0; diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 8c1b31ed0c42..aa1d3657ab4e 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -961,6 +961,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) "", &piix4_main_adapters[0]); if (retval < 0) return retval; + piix4_adapter_count = 1; } /* Check for auxiliary SMBus on some AMD chipsets */ diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c index 09e599069a81..06c87c79bae7 100644 --- a/drivers/i2c/busses/i2c-qcom-cci.c +++ b/drivers/i2c/busses/i2c-qcom-cci.c @@ -638,6 +638,11 @@ static int cci_probe(struct platform_device *pdev) if (ret < 0) goto error; + pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + for (i = 0; i < cci->data->num_masters; i++) { if (!cci->master[i].cci) continue; @@ -649,14 +654,12 @@ static int cci_probe(struct platform_device *pdev) } } - pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); - pm_runtime_use_autosuspend(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - return 0; error_i2c: + pm_runtime_disable(dev); + pm_runtime_dont_use_autosuspend(dev); + for (--i ; i >= 0; i--) { if (cci->master[i].cci) { i2c_del_adapter(&cci->master[i].adap); diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 8722ca23f889..6a7a7a074a97 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -999,8 +999,10 @@ static int rcar_i2c_probe(struct platform_device *pdev) pm_runtime_enable(dev); pm_runtime_get_sync(dev); ret = rcar_i2c_clock_calculate(priv); - if (ret < 0) - goto out_pm_put; + if (ret < 0) { + pm_runtime_put(dev); + goto out_pm_disable; + } rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ @@ -1029,19 +1031,19 @@ static int rcar_i2c_probe(struct platform_device *pdev) ret = platform_get_irq(pdev, 0); if (ret < 0) - goto out_pm_disable; + goto out_pm_put; priv->irq = ret; ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv); if (ret < 0) { dev_err(dev, "cannot get irq %d\n", priv->irq); - goto out_pm_disable; + goto out_pm_put; } platform_set_drvdata(pdev, priv); ret = i2c_add_numbered_adapter(adap); if (ret < 0) - goto out_pm_disable; + goto out_pm_put; if (priv->flags & ID_P_HOST_NOTIFY) { priv->host_notify_client = i2c_new_slave_host_notify_device(adap); @@ -1058,7 +1060,8 @@ static int rcar_i2c_probe(struct platform_device *pdev) out_del_device: i2c_del_adapter(&priv->adap); out_pm_put: - pm_runtime_put(dev); + if (priv->flags & ID_P_PM_BLOCKED) + pm_runtime_put(dev); out_pm_disable: pm_runtime_disable(dev); return ret; diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 8b113ae32dc7..42f1db60ad6f 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -283,6 +283,7 @@ struct tegra_i2c_dev { struct dma_chan *tx_dma_chan; struct dma_chan *rx_dma_chan; unsigned int dma_buf_size; + struct device *dma_dev; dma_addr_t dma_phys; void *dma_buf; @@ -419,7 +420,7 @@ static int tegra_i2c_dma_submit(struct tegra_i2c_dev *i2c_dev, size_t len) static void tegra_i2c_release_dma(struct tegra_i2c_dev *i2c_dev) { if (i2c_dev->dma_buf) { - dma_free_coherent(i2c_dev->dev, i2c_dev->dma_buf_size, + dma_free_coherent(i2c_dev->dma_dev, i2c_dev->dma_buf_size, i2c_dev->dma_buf, i2c_dev->dma_phys); i2c_dev->dma_buf = NULL; } @@ -466,10 +467,13 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev) i2c_dev->tx_dma_chan = chan; + WARN_ON(i2c_dev->tx_dma_chan->device != i2c_dev->rx_dma_chan->device); + i2c_dev->dma_dev = chan->device->dev; + i2c_dev->dma_buf_size = i2c_dev->hw->quirks->max_write_len + I2C_PACKET_HEADER_SIZE; - dma_buf = dma_alloc_coherent(i2c_dev->dev, i2c_dev->dma_buf_size, + dma_buf = dma_alloc_coherent(i2c_dev->dma_dev, i2c_dev->dma_buf_size, &dma_phys, GFP_KERNEL | __GFP_NOWARN); if (!dma_buf) { dev_err(i2c_dev->dev, "failed to allocate DMA buffer\n"); @@ -1255,7 +1259,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, if (i2c_dev->dma_mode) { if (i2c_dev->msg_read) { - dma_sync_single_for_device(i2c_dev->dev, + dma_sync_single_for_device(i2c_dev->dma_dev, i2c_dev->dma_phys, xfer_size, DMA_FROM_DEVICE); @@ -1263,7 +1267,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, if (err) return err; } else { - dma_sync_single_for_cpu(i2c_dev->dev, + dma_sync_single_for_cpu(i2c_dev->dma_dev, i2c_dev->dma_phys, xfer_size, DMA_TO_DEVICE); } @@ -1276,7 +1280,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, memcpy(i2c_dev->dma_buf + I2C_PACKET_HEADER_SIZE, msg->buf, msg->len); - dma_sync_single_for_device(i2c_dev->dev, + dma_sync_single_for_device(i2c_dev->dma_dev, i2c_dev->dma_phys, xfer_size, DMA_TO_DEVICE); @@ -1327,7 +1331,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, } if (i2c_dev->msg_read && i2c_dev->msg_err == I2C_ERR_NONE) { - dma_sync_single_for_cpu(i2c_dev->dev, + dma_sync_single_for_cpu(i2c_dev->dma_dev, i2c_dev->dma_phys, xfer_size, DMA_FROM_DEVICE); diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c index 12c90aa0900e..a77cd86fe75e 100644 --- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c +++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c @@ -213,6 +213,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev, i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info; i2c->adap.dev.parent = dev; i2c->adap.dev.of_node = pdev->dev.of_node; + i2c->adap.dev.fwnode = dev->fwnode; snprintf(i2c->adap.name, sizeof(i2c->adap.name), "Cavium ThunderX i2c adapter at %s", dev_name(dev)); i2c_set_adapdata(&i2c->adap, i2c); diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 8dabb6ffb1a4..3b564e68130b 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -935,6 +935,7 @@ static struct platform_driver xiic_i2c_driver = { module_platform_driver(xiic_i2c_driver); +MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_AUTHOR("info@mocean-labs.com"); MODULE_DESCRIPTION("Xilinx I2C bus driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index c0315596d5cd..d041b7fa5c9c 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -2436,8 +2436,9 @@ void i2c_put_adapter(struct i2c_adapter *adap) if (!adap) return; - put_device(&adap->dev); module_put(adap->owner); + /* Should be last, otherwise we risk use-after-free with 'adap' */ + put_device(&adap->dev); } EXPORT_SYMBOL(i2c_put_adapter); diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c index d3acd8d66c32..33024acaac02 100644 --- a/drivers/i2c/muxes/i2c-mux-gpmux.c +++ b/drivers/i2c/muxes/i2c-mux-gpmux.c @@ -134,6 +134,7 @@ static int i2c_mux_probe(struct platform_device *pdev) return 0; err_children: + of_node_put(child); i2c_mux_del_adapters(muxc); err_parent: i2c_put_adapter(parent); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index d79335506ecd..b92b032fb6d1 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -47,11 +47,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include @@ -93,6 +95,12 @@ static unsigned int mwait_substates __initdata; */ #define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15) +/* + * Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE + * above. + */ +#define CPUIDLE_FLAG_IBRS BIT(16) + /* * MWAIT takes an 8-bit "hint" in EAX "suggesting" * the C-state (top nibble) and sub-state (bottom nibble) @@ -132,6 +140,24 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev, return index; } +static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + bool smt_active = sched_smt_active(); + u64 spec_ctrl = spec_ctrl_current(); + int ret; + + if (smt_active) + wrmsrl(MSR_IA32_SPEC_CTRL, 0); + + ret = intel_idle(dev, drv, index); + + if (smt_active) + wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl); + + return ret; +} + /** * intel_idle_s2idle - Ask the processor to enter the given idle state. * @dev: cpuidle device of the target CPU. @@ -653,7 +679,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { { .name = "C6", .desc = "MWAIT 0x20", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 85, .target_residency = 200, .enter = &intel_idle, @@ -661,7 +687,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { { .name = "C7s", .desc = "MWAIT 0x33", - .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 124, .target_residency = 800, .enter = &intel_idle, @@ -669,7 +695,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { { .name = "C8", .desc = "MWAIT 0x40", - .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 200, .target_residency = 800, .enter = &intel_idle, @@ -677,7 +703,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { { .name = "C9", .desc = "MWAIT 0x50", - .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 480, .target_residency = 5000, .enter = &intel_idle, @@ -685,7 +711,7 @@ static struct cpuidle_state skl_cstates[] __initdata = { { .name = "C10", .desc = "MWAIT 0x60", - .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 890, .target_residency = 5000, .enter = &intel_idle, @@ -714,7 +740,7 @@ static struct cpuidle_state skx_cstates[] __initdata = { { .name = "C6", .desc = "MWAIT 0x20", - .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, .exit_latency = 133, .target_residency = 600, .enter = &intel_idle, @@ -1501,6 +1527,11 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) /* Structure copy. */ drv->states[drv->state_count] = cpuidle_state_table[cstate]; + if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) && + cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IBRS) { + drv->states[drv->state_count].enter = intel_idle_ibrs; + } + if ((disabled_states_mask & BIT(drv->state_count)) || ((icpu->use_acpi || force_use_acpi) && intel_idle_off_by_default(mwait_hint) && diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c index da56488182d0..6aa5a72c89b2 100644 --- a/drivers/iio/accel/bma180.c +++ b/drivers/iio/accel/bma180.c @@ -1068,11 +1068,12 @@ static int bma180_probe(struct i2c_client *client, data->trig->dev.parent = dev; data->trig->ops = &bma180_trigger_ops; iio_trigger_set_drvdata(data->trig, indio_dev); - indio_dev->trig = iio_trigger_get(data->trig); ret = iio_trigger_register(data->trig); if (ret) goto err_trigger_free; + + indio_dev->trig = iio_trigger_get(data->trig); } ret = iio_triggered_buffer_setup(indio_dev, NULL, diff --git a/drivers/iio/accel/bma400.h b/drivers/iio/accel/bma400.h index 5ad10db9819f..416090c6b1e8 100644 --- a/drivers/iio/accel/bma400.h +++ b/drivers/iio/accel/bma400.h @@ -83,8 +83,27 @@ #define BMA400_ACC_ODR_MIN_WHOLE_HZ 25 #define BMA400_ACC_ODR_MIN_HZ 12 -#define BMA400_SCALE_MIN 38357 -#define BMA400_SCALE_MAX 306864 +/* + * BMA400_SCALE_MIN macro value represents m/s^2 for 1 LSB before + * converting to micro values for +-2g range. + * + * For +-2g - 1 LSB = 0.976562 milli g = 0.009576 m/s^2 + * For +-4g - 1 LSB = 1.953125 milli g = 0.019153 m/s^2 + * For +-16g - 1 LSB = 7.8125 milli g = 0.076614 m/s^2 + * + * The raw value which is used to select the different ranges is determined + * by the first bit set position from the scale value, so BMA400_SCALE_MIN + * should be odd. + * + * Scale values for +-2g, +-4g, +-8g and +-16g are populated into bma400_scales + * array by left shifting BMA400_SCALE_MIN. + * e.g.: + * To select +-2g = 9577 << 0 = raw value to write is 0. + * To select +-8g = 9577 << 2 = raw value to write is 2. + * To select +-16g = 9577 << 3 = raw value to write is 3. + */ +#define BMA400_SCALE_MIN 9577 +#define BMA400_SCALE_MAX 76617 #define BMA400_NUM_REGULATORS 2 #define BMA400_VDD_REGULATOR 0 diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c index 7eeba80e32cb..58aa6a0e1180 100644 --- a/drivers/iio/accel/bma400_core.c +++ b/drivers/iio/accel/bma400_core.c @@ -13,14 +13,14 @@ #include #include -#include -#include #include #include #include #include #include +#include + #include "bma400.h" /* diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index e7e280282774..b12e80464706 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -1496,10 +1496,14 @@ static int mma8452_reset(struct i2c_client *client) int i; int ret; - ret = i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2, + /* + * Find on fxls8471, after config reset bit, it reset immediately, + * and will not give ACK, so here do not check the return value. + * The following code will read the reset register, and check whether + * this reset works. + */ + i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2, MMA8452_CTRL_REG2_RST); - if (ret < 0) - return ret; for (i = 0; i < 10; i++) { usleep_range(100, 200); @@ -1542,11 +1546,13 @@ static int mma8452_probe(struct i2c_client *client, mutex_init(&data->lock); data->chip_info = device_get_match_data(&client->dev); - if (!data->chip_info && id) { - data->chip_info = &mma_chip_info_table[id->driver_data]; - } else { - dev_err(&client->dev, "unknown device model\n"); - return -ENODEV; + if (!data->chip_info) { + if (id) { + data->chip_info = &mma_chip_info_table[id->driver_data]; + } else { + dev_err(&client->dev, "unknown device model\n"); + return -ENODEV; + } } data->vdd_reg = devm_regulator_get(&client->dev, "vdd"); diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c index 5a2b0ffbb145..ecd9d8ad5928 100644 --- a/drivers/iio/accel/mxc4005.c +++ b/drivers/iio/accel/mxc4005.c @@ -461,8 +461,6 @@ static int mxc4005_probe(struct i2c_client *client, data->dready_trig->dev.parent = &client->dev; data->dready_trig->ops = &mxc4005_trigger_ops; iio_trigger_set_drvdata(data->dready_trig, indio_dev); - indio_dev->trig = data->dready_trig; - iio_trigger_get(indio_dev->trig); ret = devm_iio_trigger_register(&client->dev, data->dready_trig); if (ret) { @@ -470,6 +468,8 @@ static int mxc4005_probe(struct i2c_client *client, "failed to register trigger\n"); return ret; } + + indio_dev->trig = iio_trigger_get(data->dready_trig); } return devm_iio_device_register(&client->dev, indio_dev); diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index bd3500995037..19ab7d7251bc 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -170,7 +170,6 @@ static const struct iio_chan_spec ad7124_channel_template = { .sign = 'u', .realbits = 24, .storagebits = 32, - .shift = 8, .endianness = IIO_BE, }, }; diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c index ab204e9199e9..3e6ece05854d 100644 --- a/drivers/iio/adc/ad7292.c +++ b/drivers/iio/adc/ad7292.c @@ -289,10 +289,8 @@ static int ad7292_probe(struct spi_device *spi) ret = devm_add_action_or_reset(&spi->dev, ad7292_regulator_disable, st); - if (ret) { - regulator_disable(st->reg); + if (ret) return ret; - } ret = regulator_get_voltage(st->reg); if (ret < 0) diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c index 8c1e866f72e8..96eeda433ad6 100644 --- a/drivers/iio/adc/ad7923.c +++ b/drivers/iio/adc/ad7923.c @@ -93,6 +93,7 @@ enum ad7923_id { .sign = 'u', \ .realbits = (bits), \ .storagebits = 16, \ + .shift = 12 - (bits), \ .endianness = IIO_BE, \ }, \ } @@ -274,7 +275,8 @@ static int ad7923_read_raw(struct iio_dev *indio_dev, return ret; if (chan->address == EXTRACT(ret, 12, 4)) - *val = EXTRACT(ret, 0, 12); + *val = EXTRACT(ret, chan->scan_type.shift, + chan->scan_type.realbits); else return -EIO; diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c index 9109da2d2e15..cbe1011a2408 100644 --- a/drivers/iio/adc/adi-axi-adc.c +++ b/drivers/iio/adc/adi-axi-adc.c @@ -334,16 +334,19 @@ static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev) if (!try_module_get(cl->dev->driver->owner)) { mutex_unlock(®istered_clients_lock); + of_node_put(cln); return ERR_PTR(-ENODEV); } get_device(cl->dev); cl->info = info; mutex_unlock(®istered_clients_lock); + of_node_put(cln); return cl; } mutex_unlock(®istered_clients_lock); + of_node_put(cln); return ERR_PTR(-EPROBE_DEFER); } diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index 4ede7e766765..250b78ee1625 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -74,7 +74,7 @@ #define AT91_SAMA5D2_MR_ANACH BIT(23) /* Tracking Time */ #define AT91_SAMA5D2_MR_TRACKTIM(v) ((v) << 24) -#define AT91_SAMA5D2_MR_TRACKTIM_MAX 0xff +#define AT91_SAMA5D2_MR_TRACKTIM_MAX 0xf /* Transfer Time */ #define AT91_SAMA5D2_MR_TRANSFER(v) ((v) << 28) #define AT91_SAMA5D2_MR_TRANSFER_MAX 0x3 @@ -1353,10 +1353,12 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev, ret = at91_adc_read_position(st, chan->channel, &tmp_val); *val = tmp_val; + if (ret > 0) + ret = at91_adc_adjust_val_osr(st, val); mutex_unlock(&st->lock); iio_device_release_direct_mode(indio_dev); - return at91_adc_adjust_val_osr(st, val); + return ret; } if (chan->type == IIO_PRESSURE) { ret = iio_device_claim_direct_mode(indio_dev); @@ -1367,10 +1369,12 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev, ret = at91_adc_read_pressure(st, chan->channel, &tmp_val); *val = tmp_val; + if (ret > 0) + ret = at91_adc_adjust_val_osr(st, val); mutex_unlock(&st->lock); iio_device_release_direct_mode(indio_dev); - return at91_adc_adjust_val_osr(st, val); + return ret; } /* in this case we have a voltage channel */ @@ -1461,16 +1465,20 @@ static int at91_adc_write_raw(struct iio_dev *indio_dev, /* if no change, optimize out */ if (val == st->oversampling_ratio) return 0; + mutex_lock(&st->lock); st->oversampling_ratio = val; /* update ratio */ at91_adc_config_emr(st); + mutex_unlock(&st->lock); return 0; case IIO_CHAN_INFO_SAMP_FREQ: if (val < st->soc_info.min_sample_rate || val > st->soc_info.max_sample_rate) return -EINVAL; + mutex_lock(&st->lock); at91_adc_setup_samp_freq(indio_dev, val); + mutex_unlock(&st->lock); return 0; default: return -EINVAL; @@ -1899,6 +1907,9 @@ static __maybe_unused int at91_adc_suspend(struct device *dev) struct iio_dev *indio_dev = dev_get_drvdata(dev); struct at91_adc_state *st = iio_priv(indio_dev); + if (iio_buffer_enabled(indio_dev)) + at91_adc_buffer_postdisable(indio_dev); + /* * Do a sofware reset of the ADC before we go to suspend. * this will ensure that all pins are free from being muxed by the ADC @@ -1942,14 +1953,11 @@ static __maybe_unused int at91_adc_resume(struct device *dev) if (!iio_buffer_enabled(indio_dev)) return 0; - /* check if we are enabling triggered buffer or the touchscreen */ - if (at91_adc_current_chan_is_touch(indio_dev)) - return at91_adc_configure_touch(st, true); - else - return at91_adc_configure_trigger(st->trig, true); + ret = at91_adc_buffer_prepare(indio_dev); + if (ret) + goto vref_disable_resume; - /* not needed but more explicit */ - return 0; + return at91_adc_configure_trigger(st->trig, true); vref_disable_resume: regulator_disable(st->vref); diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 0a793e7cd53e..38d4a910bc52 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -616,8 +616,10 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *idev, trig->ops = &at91_adc_trigger_ops; ret = iio_trigger_register(trig); - if (ret) + if (ret) { + iio_trigger_free(trig); return NULL; + } return trig; } diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index 5f5e8b39e4d2..84dbe9e2f0ef 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c @@ -196,6 +196,14 @@ static const struct dmi_system_id axp288_adc_ts_bias_override[] = { }, .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA, }, + { + /* Nuvision Solo 10 Draw */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TMAX"), + DMI_MATCH(DMI_PRODUCT_NAME, "TM101W610L"), + }, + .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA, + }, {} }; diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c index 1adddf5a88a9..61f373fab9a1 100644 --- a/drivers/iio/adc/ltc2497.c +++ b/drivers/iio/adc/ltc2497.c @@ -41,6 +41,19 @@ static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata, } *val = (be32_to_cpu(st->buf) >> 14) - (1 << 17); + + /* + * The part started a new conversion at the end of the above i2c + * transfer, so if the address didn't change since the last call + * everything is fine and we can return early. + * If not (which should only happen when some sort of bulk + * conversion is implemented) we have to program the new + * address. Note that this probably fails as the conversion that + * was triggered above is like not complete yet and the two + * operations have to be done in a single transfer. + */ + if (ddata->addr_prev == address) + return 0; } ret = i2c_smbus_write_byte(st->client, diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c index e573da5397bb..65278270a75c 100644 --- a/drivers/iio/adc/mcp3911.c +++ b/drivers/iio/adc/mcp3911.c @@ -38,8 +38,8 @@ #define MCP3911_CHANNEL(x) (MCP3911_REG_CHANNEL0 + x * 3) #define MCP3911_OFFCAL(x) (MCP3911_REG_OFFCAL_CH0 + x * 6) -/* Internal voltage reference in uV */ -#define MCP3911_INT_VREF_UV 1200000 +/* Internal voltage reference in mV */ +#define MCP3911_INT_VREF_MV 1200 #define MCP3911_REG_READ(reg, id) ((((reg) << 1) | ((id) << 5) | (1 << 0)) & 0xff) #define MCP3911_REG_WRITE(reg, id) ((((reg) << 1) | ((id) << 5) | (0 << 0)) & 0xff) @@ -111,6 +111,8 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev, if (ret) goto out; + *val = sign_extend32(*val, 23); + ret = IIO_VAL_INT; break; @@ -135,11 +137,18 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev, *val = ret / 1000; } else { - *val = MCP3911_INT_VREF_UV; + *val = MCP3911_INT_VREF_MV; } - *val2 = 24; - ret = IIO_VAL_FRACTIONAL_LOG2; + /* + * For 24bit Conversion + * Raw = ((Voltage)/(Vref) * 2^23 * Gain * 1.5 + * Voltage = Raw * (Vref)/(2^23 * Gain * 1.5) + */ + + /* val2 = (2^23 * 1.5) */ + *val2 = 12582912; + ret = IIO_VAL_FRACTIONAL; break; } diff --git a/drivers/iio/adc/mp2629_adc.c b/drivers/iio/adc/mp2629_adc.c index 331a9a728217..acd9420c0416 100644 --- a/drivers/iio/adc/mp2629_adc.c +++ b/drivers/iio/adc/mp2629_adc.c @@ -56,7 +56,8 @@ static struct iio_map mp2629_adc_maps[] = { MP2629_MAP(SYSTEM_VOLT, "system-volt"), MP2629_MAP(INPUT_VOLT, "input-volt"), MP2629_MAP(BATT_CURRENT, "batt-current"), - MP2629_MAP(INPUT_CURRENT, "input-current") + MP2629_MAP(INPUT_CURRENT, "input-current"), + { } }; static int mp2629_read_raw(struct iio_dev *indio_dev, @@ -73,7 +74,7 @@ static int mp2629_read_raw(struct iio_dev *indio_dev, if (ret) return ret; - if (chan->address == MP2629_INPUT_VOLT) + if (chan->channel == MP2629_INPUT_VOLT) rval &= GENMASK(6, 0); *val = rval; return IIO_VAL_INT; diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c index aa32a1f385e2..2b463e1cf1c7 100644 --- a/drivers/iio/adc/sc27xx_adc.c +++ b/drivers/iio/adc/sc27xx_adc.c @@ -36,8 +36,8 @@ /* Bits and mask definition for SC27XX_ADC_CH_CFG register */ #define SC27XX_ADC_CHN_ID_MASK GENMASK(4, 0) -#define SC27XX_ADC_SCALE_MASK GENMASK(10, 8) -#define SC27XX_ADC_SCALE_SHIFT 8 +#define SC27XX_ADC_SCALE_MASK GENMASK(10, 9) +#define SC27XX_ADC_SCALE_SHIFT 9 /* Bits definitions for SC27XX_ADC_INT_EN registers */ #define SC27XX_ADC_IRQ_EN BIT(0) @@ -103,14 +103,14 @@ static struct sc27xx_adc_linear_graph small_scale_graph = { 100, 341, }; -static const struct sc27xx_adc_linear_graph big_scale_graph_calib = { - 4200, 856, - 3600, 733, +static const struct sc27xx_adc_linear_graph sc2731_big_scale_graph_calib = { + 4200, 850, + 3600, 728, }; -static const struct sc27xx_adc_linear_graph small_scale_graph_calib = { - 1000, 833, - 100, 80, +static const struct sc27xx_adc_linear_graph sc2731_small_scale_graph_calib = { + 1000, 838, + 100, 84, }; static int sc27xx_adc_get_calib_data(u32 calib_data, int calib_adc) @@ -130,11 +130,11 @@ static int sc27xx_adc_scale_calibration(struct sc27xx_adc_data *data, size_t len; if (big_scale) { - calib_graph = &big_scale_graph_calib; + calib_graph = &sc2731_big_scale_graph_calib; graph = &big_scale_graph; cell_name = "big_scale_calib"; } else { - calib_graph = &small_scale_graph_calib; + calib_graph = &sc2731_small_scale_graph_calib; graph = &small_scale_graph; cell_name = "small_scale_calib"; } diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c index a83199b212a4..20fc867e3998 100644 --- a/drivers/iio/adc/stm32-adc-core.c +++ b/drivers/iio/adc/stm32-adc-core.c @@ -64,6 +64,7 @@ struct stm32_adc_priv; * @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet) * @has_syscfg: SYSCFG capability flags * @num_irqs: number of interrupt lines + * @num_adcs: maximum number of ADC instances in the common registers */ struct stm32_adc_priv_cfg { const struct stm32_adc_common_regs *regs; @@ -71,6 +72,7 @@ struct stm32_adc_priv_cfg { u32 max_clk_rate_hz; unsigned int has_syscfg; unsigned int num_irqs; + unsigned int num_adcs; }; /** @@ -333,7 +335,7 @@ static void stm32_adc_irq_handler(struct irq_desc *desc) * before invoking the interrupt handler (e.g. call ISR only for * IRQ-enabled ADCs). */ - for (i = 0; i < priv->cfg->num_irqs; i++) { + for (i = 0; i < priv->cfg->num_adcs; i++) { if ((status & priv->cfg->regs->eoc_msk[i] && stm32_adc_eoc_enabled(priv, i)) || (status & priv->cfg->regs->ovr_msk[i])) @@ -784,6 +786,7 @@ static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = { .clk_sel = stm32f4_adc_clk_sel, .max_clk_rate_hz = 36000000, .num_irqs = 1, + .num_adcs = 3, }; static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = { @@ -792,14 +795,16 @@ static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = { .max_clk_rate_hz = 36000000, .has_syscfg = HAS_VBOOSTER, .num_irqs = 1, + .num_adcs = 2, }; static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = { .regs = &stm32h7_adc_common_regs, .clk_sel = stm32h7_adc_clk_sel, - .max_clk_rate_hz = 40000000, + .max_clk_rate_hz = 36000000, .has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD, .num_irqs = 2, + .num_adcs = 2, }; static const struct of_device_id stm32_adc_of_match[] = { diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 9939dee01743..e60ad48196ff 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -1265,7 +1265,6 @@ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data) struct stm32_adc *adc = iio_priv(indio_dev); const struct stm32_adc_regspec *regs = adc->cfg->regs; u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg); - u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg); /* Check ovr status right now, as ovr mask should be already disabled */ if (status & regs->isr_ovr.mask) { @@ -1280,11 +1279,6 @@ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data) return IRQ_HANDLED; } - if (!(status & mask)) - dev_err_ratelimited(&indio_dev->dev, - "Unexpected IRQ: IER=0x%08x, ISR=0x%08x\n", - mask, status); - return IRQ_NONE; } @@ -1294,10 +1288,6 @@ static irqreturn_t stm32_adc_isr(int irq, void *data) struct stm32_adc *adc = iio_priv(indio_dev); const struct stm32_adc_regspec *regs = adc->cfg->regs; u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg); - u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg); - - if (!(status & mask)) - return IRQ_WAKE_THREAD; if (status & regs->isr_ovr.mask) { /* diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c index fba659bfdb40..64305d9fa560 100644 --- a/drivers/iio/adc/stmpe-adc.c +++ b/drivers/iio/adc/stmpe-adc.c @@ -61,7 +61,7 @@ struct stmpe_adc { static int stmpe_read_voltage(struct stmpe_adc *info, struct iio_chan_spec const *chan, int *val) { - long ret; + unsigned long ret; mutex_lock(&info->lock); @@ -79,7 +79,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info, ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT); - if (ret <= 0) { + if (ret == 0) { stmpe_reg_write(info->stmpe, STMPE_REG_ADC_INT_STA, STMPE_ADC_CH(info->channel)); mutex_unlock(&info->lock); @@ -96,7 +96,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info, static int stmpe_read_temp(struct stmpe_adc *info, struct iio_chan_spec const *chan, int *val) { - long ret; + unsigned long ret; mutex_lock(&info->lock); @@ -114,7 +114,7 @@ static int stmpe_read_temp(struct stmpe_adc *info, ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT); - if (ret <= 0) { + if (ret == 0) { mutex_unlock(&info->lock); return -ETIMEDOUT; } diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c index 60dd87e96f5f..384d167b4fd6 100644 --- a/drivers/iio/chemical/ccs811.c +++ b/drivers/iio/chemical/ccs811.c @@ -500,11 +500,11 @@ static int ccs811_probe(struct i2c_client *client, data->drdy_trig->dev.parent = &client->dev; data->drdy_trig->ops = &ccs811_trigger_ops; iio_trigger_set_drvdata(data->drdy_trig, indio_dev); - indio_dev->trig = data->drdy_trig; - iio_trigger_get(indio_dev->trig); ret = iio_trigger_register(data->drdy_trig); if (ret) goto err_poweroff; + + indio_dev->trig = iio_trigger_get(data->drdy_trig); } ret = iio_triggered_buffer_setup(indio_dev, NULL, diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 7a69c1be7393..56206fdbceb9 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -70,16 +70,18 @@ st_sensors_match_odr_error: int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr) { - int err; + int err = 0; struct st_sensor_odr_avl odr_out = {0, 0}; struct st_sensor_data *sdata = iio_priv(indio_dev); + mutex_lock(&sdata->odr_lock); + if (!sdata->sensor_settings->odr.mask) - return 0; + goto unlock_mutex; err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out); if (err < 0) - goto st_sensors_match_odr_error; + goto unlock_mutex; if ((sdata->sensor_settings->odr.addr == sdata->sensor_settings->pw.addr) && @@ -102,7 +104,9 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr) if (err >= 0) sdata->odr = odr_out.hz; -st_sensors_match_odr_error: +unlock_mutex: + mutex_unlock(&sdata->odr_lock); + return err; } EXPORT_SYMBOL(st_sensors_set_odr); @@ -364,6 +368,8 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, struct st_sensors_platform_data *of_pdata; int err = 0; + mutex_init(&sdata->odr_lock); + /* If OF/DT pdata exists, it will take precedence of anything else */ of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata); if (IS_ERR(of_pdata)) @@ -557,18 +563,24 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev, err = -EBUSY; goto out; } else { + mutex_lock(&sdata->odr_lock); err = st_sensors_set_enable(indio_dev, true); - if (err < 0) + if (err < 0) { + mutex_unlock(&sdata->odr_lock); goto out; + } msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr); err = st_sensors_read_axis_data(indio_dev, ch, val); - if (err < 0) + if (err < 0) { + mutex_unlock(&sdata->odr_lock); goto out; + } *val = *val >> ch->scan_type.shift; err = st_sensors_set_enable(indio_dev, false); + mutex_unlock(&sdata->odr_lock); } out: mutex_unlock(&indio_dev->mlock); diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c index e86886ca5ee4..b168eb14c246 100644 --- a/drivers/iio/dac/ad5446.c +++ b/drivers/iio/dac/ad5446.c @@ -178,7 +178,7 @@ static int ad5446_read_raw(struct iio_dev *indio_dev, switch (m) { case IIO_CHAN_INFO_RAW: - *val = st->cached_val; + *val = st->cached_val >> chan->scan_type.shift; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: *val = st->vref_mv; diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c index 0405e92b9e8c..987264410278 100644 --- a/drivers/iio/dac/ad5592r-base.c +++ b/drivers/iio/dac/ad5592r-base.c @@ -523,7 +523,7 @@ static int ad5592r_alloc_channels(struct iio_dev *iio_dev) if (!ret) st->channel_modes[reg] = tmp; - fwnode_property_read_u32(child, "adi,off-state", &tmp); + ret = fwnode_property_read_u32(child, "adi,off-state", &tmp); if (!ret) st->channel_offstate[reg] = tmp; } diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c index 5b4df36fdc2a..4cc855c78121 100644 --- a/drivers/iio/dac/ad5593r.c +++ b/drivers/iio/dac/ad5593r.c @@ -13,6 +13,8 @@ #include #include +#include + #define AD5593R_MODE_CONF (0 << 4) #define AD5593R_MODE_DAC_WRITE (1 << 4) #define AD5593R_MODE_ADC_READBACK (4 << 4) @@ -20,6 +22,24 @@ #define AD5593R_MODE_GPIO_READBACK (6 << 4) #define AD5593R_MODE_REG_READBACK (7 << 4) +static int ad5593r_read_word(struct i2c_client *i2c, u8 reg, u16 *value) +{ + int ret; + u8 buf[2]; + + ret = i2c_smbus_write_byte(i2c, reg); + if (ret < 0) + return ret; + + ret = i2c_master_recv(i2c, buf, sizeof(buf)); + if (ret < 0) + return ret; + + *value = get_unaligned_be16(buf); + + return 0; +} + static int ad5593r_write_dac(struct ad5592r_state *st, unsigned chan, u16 value) { struct i2c_client *i2c = to_i2c_client(st->dev); @@ -38,13 +58,7 @@ static int ad5593r_read_adc(struct ad5592r_state *st, unsigned chan, u16 *value) if (val < 0) return (int) val; - val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_ADC_READBACK); - if (val < 0) - return (int) val; - - *value = (u16) val; - - return 0; + return ad5593r_read_word(i2c, AD5593R_MODE_ADC_READBACK, value); } static int ad5593r_reg_write(struct ad5592r_state *st, u8 reg, u16 value) @@ -58,25 +72,19 @@ static int ad5593r_reg_write(struct ad5592r_state *st, u8 reg, u16 value) static int ad5593r_reg_read(struct ad5592r_state *st, u8 reg, u16 *value) { struct i2c_client *i2c = to_i2c_client(st->dev); - s32 val; - val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_REG_READBACK | reg); - if (val < 0) - return (int) val; - - *value = (u16) val; - - return 0; + return ad5593r_read_word(i2c, AD5593R_MODE_REG_READBACK | reg, value); } static int ad5593r_gpio_read(struct ad5592r_state *st, u8 *value) { struct i2c_client *i2c = to_i2c_client(st->dev); - s32 val; + u16 val; + int ret; - val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_GPIO_READBACK); - if (val < 0) - return (int) val; + ret = ad5593r_read_word(i2c, AD5593R_MODE_GPIO_READBACK, &val); + if (ret) + return ret; *value = (u8) val; diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c index c0b7ef900735..c24f609c2ade 100644 --- a/drivers/iio/dummy/iio_simple_dummy.c +++ b/drivers/iio/dummy/iio_simple_dummy.c @@ -575,10 +575,9 @@ static struct iio_sw_device *iio_dummy_probe(const char *name) */ swd = kzalloc(sizeof(*swd), GFP_KERNEL); - if (!swd) { - ret = -ENOMEM; - goto error_kzalloc; - } + if (!swd) + return ERR_PTR(-ENOMEM); + /* * Allocate an IIO device. * @@ -590,7 +589,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name) indio_dev = iio_device_alloc(parent, sizeof(*st)); if (!indio_dev) { ret = -ENOMEM; - goto error_ret; + goto error_free_swd; } st = iio_priv(indio_dev); @@ -616,6 +615,10 @@ static struct iio_sw_device *iio_dummy_probe(const char *name) * indio_dev->name = spi_get_device_id(spi)->name; */ indio_dev->name = kstrdup(name, GFP_KERNEL); + if (!indio_dev->name) { + ret = -ENOMEM; + goto error_free_device; + } /* Provide description of available channels */ indio_dev->channels = iio_dummy_channels; @@ -632,7 +635,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name) ret = iio_simple_dummy_events_register(indio_dev); if (ret < 0) - goto error_free_device; + goto error_free_name; ret = iio_simple_dummy_configure_buffer(indio_dev); if (ret < 0) @@ -649,11 +652,12 @@ error_unconfigure_buffer: iio_simple_dummy_unconfigure_buffer(indio_dev); error_unregister_events: iio_simple_dummy_events_unregister(indio_dev); +error_free_name: + kfree(indio_dev->name); error_free_device: iio_device_free(indio_dev); -error_ret: +error_free_swd: kfree(swd); -error_kzalloc: return ERR_PTR(ret); } diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c index 39e1c4306c47..84c6ad4bcccb 100644 --- a/drivers/iio/gyro/mpu3050-core.c +++ b/drivers/iio/gyro/mpu3050-core.c @@ -872,6 +872,7 @@ static int mpu3050_power_up(struct mpu3050 *mpu3050) ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM, MPU3050_PWR_MGM_SLEEP, 0); if (ret) { + regulator_bulk_disable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs); dev_err(mpu3050->dev, "error setting power mode\n"); return ret; } diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c index 38734e4ce360..82d01ac36128 100644 --- a/drivers/iio/health/afe4403.c +++ b/drivers/iio/health/afe4403.c @@ -245,14 +245,14 @@ static int afe4403_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct afe4403_data *afe = iio_priv(indio_dev); - unsigned int reg = afe4403_channel_values[chan->address]; - unsigned int field = afe4403_channel_leds[chan->address]; + unsigned int reg, field; int ret; switch (chan->type) { case IIO_INTENSITY: switch (mask) { case IIO_CHAN_INFO_RAW: + reg = afe4403_channel_values[chan->address]; ret = afe4403_read(afe, reg, val); if (ret) return ret; @@ -262,6 +262,7 @@ static int afe4403_read_raw(struct iio_dev *indio_dev, case IIO_CURRENT: switch (mask) { case IIO_CHAN_INFO_RAW: + field = afe4403_channel_leds[chan->address]; ret = regmap_field_read(afe->fields[field], val); if (ret) return ret; diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c index 61fe4932d81d..0eaa34da59a8 100644 --- a/drivers/iio/health/afe4404.c +++ b/drivers/iio/health/afe4404.c @@ -250,20 +250,20 @@ static int afe4404_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct afe4404_data *afe = iio_priv(indio_dev); - unsigned int value_reg = afe4404_channel_values[chan->address]; - unsigned int led_field = afe4404_channel_leds[chan->address]; - unsigned int offdac_field = afe4404_channel_offdacs[chan->address]; + unsigned int value_reg, led_field, offdac_field; int ret; switch (chan->type) { case IIO_INTENSITY: switch (mask) { case IIO_CHAN_INFO_RAW: + value_reg = afe4404_channel_values[chan->address]; ret = regmap_read(afe->regmap, value_reg, val); if (ret) return ret; return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: + offdac_field = afe4404_channel_offdacs[chan->address]; ret = regmap_field_read(afe->fields[offdac_field], val); if (ret) return ret; @@ -273,6 +273,7 @@ static int afe4404_read_raw(struct iio_dev *indio_dev, case IIO_CURRENT: switch (mask) { case IIO_CHAN_INFO_RAW: + led_field = afe4404_channel_leds[chan->address]; ret = regmap_field_read(afe->fields[led_field], val); if (ret) return ret; @@ -295,19 +296,20 @@ static int afe4404_write_raw(struct iio_dev *indio_dev, int val, int val2, long mask) { struct afe4404_data *afe = iio_priv(indio_dev); - unsigned int led_field = afe4404_channel_leds[chan->address]; - unsigned int offdac_field = afe4404_channel_offdacs[chan->address]; + unsigned int led_field, offdac_field; switch (chan->type) { case IIO_INTENSITY: switch (mask) { case IIO_CHAN_INFO_OFFSET: + offdac_field = afe4404_channel_offdacs[chan->address]; return regmap_field_write(afe->fields[offdac_field], val); } break; case IIO_CURRENT: switch (mask) { case IIO_CHAN_INFO_RAW: + led_field = afe4404_channel_leds[chan->address]; return regmap_field_write(afe->fields[led_field], val); } break; diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c index 82f03a4dc47a..5fd61889f593 100644 --- a/drivers/iio/imu/bmi160/bmi160_core.c +++ b/drivers/iio/imu/bmi160/bmi160_core.c @@ -731,7 +731,7 @@ static int bmi160_chip_init(struct bmi160_data *data, bool use_spi) ret = regmap_write(data->regmap, BMI160_REG_CMD, BMI160_CMD_SOFTRESET); if (ret) - return ret; + goto disable_regulator; usleep_range(BMI160_SOFTRESET_USLEEP, BMI160_SOFTRESET_USLEEP + 1); @@ -742,29 +742,37 @@ static int bmi160_chip_init(struct bmi160_data *data, bool use_spi) if (use_spi) { ret = regmap_read(data->regmap, BMI160_REG_DUMMY, &val); if (ret) - return ret; + goto disable_regulator; } ret = regmap_read(data->regmap, BMI160_REG_CHIP_ID, &val); if (ret) { dev_err(dev, "Error reading chip id\n"); - return ret; + goto disable_regulator; } if (val != BMI160_CHIP_ID_VAL) { dev_err(dev, "Wrong chip id, got %x expected %x\n", val, BMI160_CHIP_ID_VAL); - return -ENODEV; + ret = -ENODEV; + goto disable_regulator; } ret = bmi160_set_mode(data, BMI160_ACCEL, true); if (ret) - return ret; + goto disable_regulator; ret = bmi160_set_mode(data, BMI160_GYRO, true); if (ret) - return ret; + goto disable_accel; return 0; + +disable_accel: + bmi160_set_mode(data, BMI160_ACCEL, false); + +disable_regulator: + regulator_bulk_disable(ARRAY_SIZE(data->supplies), data->supplies); + return ret; } static int bmi160_data_rdy_trigger_set_state(struct iio_trigger *trig, diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h index c0f5059b13b3..995a9dc06521 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h @@ -17,6 +17,7 @@ #include "inv_icm42600_buffer.h" enum inv_icm42600_chip { + INV_CHIP_INVALID, INV_CHIP_ICM42600, INV_CHIP_ICM42602, INV_CHIP_ICM42605, diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c index 8bd77185ccb7..dcbd4e928851 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c @@ -565,7 +565,7 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq, bool open_drain; int ret; - if (chip < 0 || chip >= INV_CHIP_NB) { + if (chip <= INV_CHIP_INVALID || chip >= INV_CHIP_NB) { dev_err(dev, "invalid chip = %d\n", chip); return -ENODEV; } diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c index 85b1934cec60..53891010a91d 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c @@ -18,12 +18,15 @@ static int inv_icm42600_i2c_bus_setup(struct inv_icm42600_state *st) unsigned int mask, val; int ret; - /* setup interface registers */ - ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG6, - INV_ICM42600_INTF_CONFIG6_MASK, - INV_ICM42600_INTF_CONFIG6_I3C_EN); - if (ret) - return ret; + /* + * setup interface registers + * This register write to REG_INTF_CONFIG6 enables a spike filter that + * is impacting the line and can prevent the I2C ACK to be seen by the + * controller. So we don't test the return value. + */ + regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG6, + INV_ICM42600_INTF_CONFIG6_MASK, + INV_ICM42600_INTF_CONFIG6_I3C_EN); ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG4, INV_ICM42600_INTF_CONFIG4_I3C_BUS_ONLY, 0); diff --git a/drivers/iio/industrialio-sw-trigger.c b/drivers/iio/industrialio-sw-trigger.c index 9ae793a70b8b..a7714d32a641 100644 --- a/drivers/iio/industrialio-sw-trigger.c +++ b/drivers/iio/industrialio-sw-trigger.c @@ -58,8 +58,12 @@ int iio_register_sw_trigger_type(struct iio_sw_trigger_type *t) t->group = configfs_register_default_group(iio_triggers_group, t->name, &iio_trigger_type_group_type); - if (IS_ERR(t->group)) + if (IS_ERR(t->group)) { + mutex_lock(&iio_trigger_types_lock); + list_del(&t->list); + mutex_unlock(&iio_trigger_types_lock); ret = PTR_ERR(t->group); + } return ret; } diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index 8c3faa797284..c32b2577dd99 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -136,9 +136,10 @@ static int __of_iio_channel_get(struct iio_channel *channel, idev = bus_find_device(&iio_bus_type, NULL, iiospec.np, iio_dev_node_match); - of_node_put(iiospec.np); - if (idev == NULL) + if (idev == NULL) { + of_node_put(iiospec.np); return -EPROBE_DEFER; + } indio_dev = dev_to_iio_dev(idev); channel->indio_dev = indio_dev; @@ -146,6 +147,7 @@ static int __of_iio_channel_get(struct iio_channel *channel, index = indio_dev->info->of_xlate(indio_dev, &iiospec); else index = __of_iio_simple_xlate(indio_dev, &iiospec); + of_node_put(iiospec.np); if (index < 0) goto err_put; channel->channel = &indio_dev->channels[index]; diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index 79637ef0eb63..2f31534d7c26 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -294,6 +294,8 @@ config RPR0521 tristate "ROHM RPR0521 ALS and proximity sensor driver" depends on I2C select REGMAP_I2C + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say Y here if you want to build support for ROHM's RPR0521 ambient light and proximity sensor device. diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c index 9afb3fcc74e6..4a7ccf268ebf 100644 --- a/drivers/iio/light/apds9960.c +++ b/drivers/iio/light/apds9960.c @@ -53,9 +53,6 @@ #define APDS9960_REG_CONTROL_PGAIN_MASK_SHIFT 2 #define APDS9960_REG_CONFIG_2 0x90 -#define APDS9960_REG_CONFIG_2_GGAIN_MASK 0x60 -#define APDS9960_REG_CONFIG_2_GGAIN_MASK_SHIFT 5 - #define APDS9960_REG_ID 0x92 #define APDS9960_REG_STATUS 0x93 @@ -76,6 +73,9 @@ #define APDS9960_REG_GCONF_1_GFIFO_THRES_MASK_SHIFT 6 #define APDS9960_REG_GCONF_2 0xa3 +#define APDS9960_REG_GCONF_2_GGAIN_MASK 0x60 +#define APDS9960_REG_GCONF_2_GGAIN_MASK_SHIFT 5 + #define APDS9960_REG_GOFFSET_U 0xa4 #define APDS9960_REG_GOFFSET_D 0xa5 #define APDS9960_REG_GPULSE 0xa6 @@ -395,9 +395,9 @@ static int apds9960_set_pxs_gain(struct apds9960_data *data, int val) } ret = regmap_update_bits(data->regmap, - APDS9960_REG_CONFIG_2, - APDS9960_REG_CONFIG_2_GGAIN_MASK, - idx << APDS9960_REG_CONFIG_2_GGAIN_MASK_SHIFT); + APDS9960_REG_GCONF_2, + APDS9960_REG_GCONF_2_GGAIN_MASK, + idx << APDS9960_REG_GCONF_2_GGAIN_MASK_SHIFT); if (!ret) data->pxs_gain = idx; mutex_unlock(&data->lock); diff --git a/drivers/iio/light/isl29028.c b/drivers/iio/light/isl29028.c index 2f8b494f3e08..74e75477660a 100644 --- a/drivers/iio/light/isl29028.c +++ b/drivers/iio/light/isl29028.c @@ -627,7 +627,7 @@ static int isl29028_probe(struct i2c_client *client, ISL29028_POWER_OFF_DELAY_MS); pm_runtime_use_autosuspend(&client->dev); - ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev); + ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "%s(): iio registration failed with error %d\n", diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c index 40b7dd266b31..e39d512145a6 100644 --- a/drivers/iio/light/tsl2583.c +++ b/drivers/iio/light/tsl2583.c @@ -856,7 +856,7 @@ static int tsl2583_probe(struct i2c_client *clientp, TSL2583_POWER_OFF_DELAY_MS); pm_runtime_use_autosuspend(&clientp->dev); - ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev); + ret = iio_device_register(indio_dev); if (ret) { dev_err(&clientp->dev, "%s: iio registration failed\n", __func__); diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index d988b6ac3659..3774e5975f77 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c @@ -389,6 +389,7 @@ static int ak8975_power_on(const struct ak8975_data *data) if (ret) { dev_warn(&data->client->dev, "Failed to enable specified Vid supply\n"); + regulator_disable(data->vdd); return ret; } diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c index 0730380ceb69..cf8b92fae1b3 100644 --- a/drivers/iio/pressure/dps310.c +++ b/drivers/iio/pressure/dps310.c @@ -89,6 +89,7 @@ struct dps310_data { s32 c00, c10, c20, c30, c01, c11, c21; s32 pressure_raw; s32 temp_raw; + bool timeout_recovery_failed; }; static const struct iio_chan_spec dps310_channels[] = { @@ -159,6 +160,102 @@ static int dps310_get_coefs(struct dps310_data *data) return 0; } +/* + * Some versions of the chip will read temperatures in the ~60C range when + * it's actually ~20C. This is the manufacturer recommended workaround + * to correct the issue. The registers used below are undocumented. + */ +static int dps310_temp_workaround(struct dps310_data *data) +{ + int rc; + int reg; + + rc = regmap_read(data->regmap, 0x32, ®); + if (rc) + return rc; + + /* + * If bit 1 is set then the device is okay, and the workaround does not + * need to be applied + */ + if (reg & BIT(1)) + return 0; + + rc = regmap_write(data->regmap, 0x0e, 0xA5); + if (rc) + return rc; + + rc = regmap_write(data->regmap, 0x0f, 0x96); + if (rc) + return rc; + + rc = regmap_write(data->regmap, 0x62, 0x02); + if (rc) + return rc; + + rc = regmap_write(data->regmap, 0x0e, 0x00); + if (rc) + return rc; + + return regmap_write(data->regmap, 0x0f, 0x00); +} + +static int dps310_startup(struct dps310_data *data) +{ + int rc; + int ready; + + /* + * Set up pressure sensor in single sample, one measurement per second + * mode + */ + rc = regmap_write(data->regmap, DPS310_PRS_CFG, 0); + if (rc) + return rc; + + /* + * Set up external (MEMS) temperature sensor in single sample, one + * measurement per second mode + */ + rc = regmap_write(data->regmap, DPS310_TMP_CFG, DPS310_TMP_EXT); + if (rc) + return rc; + + /* Temp and pressure shifts are disabled when PRC <= 8 */ + rc = regmap_write_bits(data->regmap, DPS310_CFG_REG, + DPS310_PRS_SHIFT_EN | DPS310_TMP_SHIFT_EN, 0); + if (rc) + return rc; + + /* MEAS_CFG doesn't update correctly unless first written with 0 */ + rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG, + DPS310_MEAS_CTRL_BITS, 0); + if (rc) + return rc; + + /* Turn on temperature and pressure measurement in the background */ + rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG, + DPS310_MEAS_CTRL_BITS, DPS310_PRS_EN | + DPS310_TEMP_EN | DPS310_BACKGROUND); + if (rc) + return rc; + + /* + * Calibration coefficients required for reporting temperature. + * They are available 40ms after the device has started + */ + rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready, + ready & DPS310_COEF_RDY, 10000, 40000); + if (rc) + return rc; + + rc = dps310_get_coefs(data); + if (rc) + return rc; + + return dps310_temp_workaround(data); +} + static int dps310_get_pres_precision(struct dps310_data *data) { int rc; @@ -297,11 +394,69 @@ static int dps310_get_temp_k(struct dps310_data *data) return scale_factors[ilog2(rc)]; } +static int dps310_reset_wait(struct dps310_data *data) +{ + int rc; + + rc = regmap_write(data->regmap, DPS310_RESET, DPS310_RESET_MAGIC); + if (rc) + return rc; + + /* Wait for device chip access: 2.5ms in specification */ + usleep_range(2500, 12000); + return 0; +} + +static int dps310_reset_reinit(struct dps310_data *data) +{ + int rc; + + rc = dps310_reset_wait(data); + if (rc) + return rc; + + return dps310_startup(data); +} + +static int dps310_ready_status(struct dps310_data *data, int ready_bit, int timeout) +{ + int sleep = DPS310_POLL_SLEEP_US(timeout); + int ready; + + return regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready, ready & ready_bit, + sleep, timeout); +} + +static int dps310_ready(struct dps310_data *data, int ready_bit, int timeout) +{ + int rc; + + rc = dps310_ready_status(data, ready_bit, timeout); + if (rc) { + if (rc == -ETIMEDOUT && !data->timeout_recovery_failed) { + /* Reset and reinitialize the chip. */ + if (dps310_reset_reinit(data)) { + data->timeout_recovery_failed = true; + } else { + /* Try again to get sensor ready status. */ + if (dps310_ready_status(data, ready_bit, timeout)) + data->timeout_recovery_failed = true; + else + return 0; + } + } + + return rc; + } + + data->timeout_recovery_failed = false; + return 0; +} + static int dps310_read_pres_raw(struct dps310_data *data) { int rc; int rate; - int ready; int timeout; s32 raw; u8 val[3]; @@ -313,9 +468,7 @@ static int dps310_read_pres_raw(struct dps310_data *data) timeout = DPS310_POLL_TIMEOUT_US(rate); /* Poll for sensor readiness; base the timeout upon the sample rate. */ - rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready, - ready & DPS310_PRS_RDY, - DPS310_POLL_SLEEP_US(timeout), timeout); + rc = dps310_ready(data, DPS310_PRS_RDY, timeout); if (rc) goto done; @@ -352,7 +505,6 @@ static int dps310_read_temp_raw(struct dps310_data *data) { int rc; int rate; - int ready; int timeout; if (mutex_lock_interruptible(&data->lock)) @@ -362,10 +514,8 @@ static int dps310_read_temp_raw(struct dps310_data *data) timeout = DPS310_POLL_TIMEOUT_US(rate); /* Poll for sensor readiness; base the timeout upon the sample rate. */ - rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready, - ready & DPS310_TMP_RDY, - DPS310_POLL_SLEEP_US(timeout), timeout); - if (rc < 0) + rc = dps310_ready(data, DPS310_TMP_RDY, timeout); + if (rc) goto done; rc = dps310_read_temp_ready(data); @@ -660,7 +810,7 @@ static void dps310_reset(void *action_data) { struct dps310_data *data = action_data; - regmap_write(data->regmap, DPS310_RESET, DPS310_RESET_MAGIC); + dps310_reset_wait(data); } static const struct regmap_config dps310_regmap_config = { @@ -677,52 +827,12 @@ static const struct iio_info dps310_info = { .write_raw = dps310_write_raw, }; -/* - * Some verions of chip will read temperatures in the ~60C range when - * its actually ~20C. This is the manufacturer recommended workaround - * to correct the issue. The registers used below are undocumented. - */ -static int dps310_temp_workaround(struct dps310_data *data) -{ - int rc; - int reg; - - rc = regmap_read(data->regmap, 0x32, ®); - if (rc < 0) - return rc; - - /* - * If bit 1 is set then the device is okay, and the workaround does not - * need to be applied - */ - if (reg & BIT(1)) - return 0; - - rc = regmap_write(data->regmap, 0x0e, 0xA5); - if (rc < 0) - return rc; - - rc = regmap_write(data->regmap, 0x0f, 0x96); - if (rc < 0) - return rc; - - rc = regmap_write(data->regmap, 0x62, 0x02); - if (rc < 0) - return rc; - - rc = regmap_write(data->regmap, 0x0e, 0x00); - if (rc < 0) - return rc; - - return regmap_write(data->regmap, 0x0f, 0x00); -} - static int dps310_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct dps310_data *data; struct iio_dev *iio; - int rc, ready; + int rc; iio = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!iio) @@ -747,54 +857,8 @@ static int dps310_probe(struct i2c_client *client, if (rc) return rc; - /* - * Set up pressure sensor in single sample, one measurement per second - * mode - */ - rc = regmap_write(data->regmap, DPS310_PRS_CFG, 0); - - /* - * Set up external (MEMS) temperature sensor in single sample, one - * measurement per second mode - */ - rc = regmap_write(data->regmap, DPS310_TMP_CFG, DPS310_TMP_EXT); - if (rc < 0) - return rc; - - /* Temp and pressure shifts are disabled when PRC <= 8 */ - rc = regmap_write_bits(data->regmap, DPS310_CFG_REG, - DPS310_PRS_SHIFT_EN | DPS310_TMP_SHIFT_EN, 0); - if (rc < 0) - return rc; - - /* MEAS_CFG doesn't update correctly unless first written with 0 */ - rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG, - DPS310_MEAS_CTRL_BITS, 0); - if (rc < 0) - return rc; - - /* Turn on temperature and pressure measurement in the background */ - rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG, - DPS310_MEAS_CTRL_BITS, DPS310_PRS_EN | - DPS310_TEMP_EN | DPS310_BACKGROUND); - if (rc < 0) - return rc; - - /* - * Calibration coefficients required for reporting temperature. - * They are available 40ms after the device has started - */ - rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready, - ready & DPS310_COEF_RDY, 10000, 40000); - if (rc < 0) - return rc; - - rc = dps310_get_coefs(data); - if (rc < 0) - return rc; - - rc = dps310_temp_workaround(data); - if (rc < 0) + rc = dps310_startup(data); + if (rc) return rc; rc = devm_iio_device_register(&client->dev, iio); diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h index bc06271fa38b..5e2d2d4d87b5 100644 --- a/drivers/iio/pressure/ms5611.h +++ b/drivers/iio/pressure/ms5611.h @@ -25,13 +25,6 @@ enum { MS5607, }; -struct ms5611_chip_info { - u16 prom[MS5611_PROM_WORDS_NB]; - - int (*temp_and_pressure_compensate)(struct ms5611_chip_info *chip_info, - s32 *temp, s32 *pressure); -}; - /* * OverSampling Rate descriptor. * Warning: cmd MUST be kept aligned on a word boundary (see @@ -50,12 +43,15 @@ struct ms5611_state { const struct ms5611_osr *pressure_osr; const struct ms5611_osr *temp_osr; - int (*reset)(struct device *dev); - int (*read_prom_word)(struct device *dev, int index, u16 *word); - int (*read_adc_temp_and_pressure)(struct device *dev, + u16 prom[MS5611_PROM_WORDS_NB]; + + int (*reset)(struct ms5611_state *st); + int (*read_prom_word)(struct ms5611_state *st, int index, u16 *word); + int (*read_adc_temp_and_pressure)(struct ms5611_state *st, s32 *temp, s32 *pressure); - struct ms5611_chip_info *chip_info; + int (*compensate_temp_and_pressure)(struct ms5611_state *st, s32 *temp, + s32 *pressure); struct regulator *vdd; }; diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c index 214b0d25f598..874a73b3ea9d 100644 --- a/drivers/iio/pressure/ms5611_core.c +++ b/drivers/iio/pressure/ms5611_core.c @@ -85,8 +85,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev) struct ms5611_state *st = iio_priv(indio_dev); for (i = 0; i < MS5611_PROM_WORDS_NB; i++) { - ret = st->read_prom_word(&indio_dev->dev, - i, &st->chip_info->prom[i]); + ret = st->read_prom_word(st, i, &st->prom[i]); if (ret < 0) { dev_err(&indio_dev->dev, "failed to read prom at %d\n", i); @@ -94,7 +93,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev) } } - if (!ms5611_prom_is_valid(st->chip_info->prom, MS5611_PROM_WORDS_NB)) { + if (!ms5611_prom_is_valid(st->prom, MS5611_PROM_WORDS_NB)) { dev_err(&indio_dev->dev, "PROM integrity check failed\n"); return -ENODEV; } @@ -108,28 +107,27 @@ static int ms5611_read_temp_and_pressure(struct iio_dev *indio_dev, int ret; struct ms5611_state *st = iio_priv(indio_dev); - ret = st->read_adc_temp_and_pressure(&indio_dev->dev, temp, pressure); + ret = st->read_adc_temp_and_pressure(st, temp, pressure); if (ret < 0) { dev_err(&indio_dev->dev, "failed to read temperature and pressure\n"); return ret; } - return st->chip_info->temp_and_pressure_compensate(st->chip_info, - temp, pressure); + return st->compensate_temp_and_pressure(st, temp, pressure); } -static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info, +static int ms5611_temp_and_pressure_compensate(struct ms5611_state *st, s32 *temp, s32 *pressure) { s32 t = *temp, p = *pressure; s64 off, sens, dt; - dt = t - (chip_info->prom[5] << 8); - off = ((s64)chip_info->prom[2] << 16) + ((chip_info->prom[4] * dt) >> 7); - sens = ((s64)chip_info->prom[1] << 15) + ((chip_info->prom[3] * dt) >> 8); + dt = t - (st->prom[5] << 8); + off = ((s64)st->prom[2] << 16) + ((st->prom[4] * dt) >> 7); + sens = ((s64)st->prom[1] << 15) + ((st->prom[3] * dt) >> 8); - t = 2000 + ((chip_info->prom[6] * dt) >> 23); + t = 2000 + ((st->prom[6] * dt) >> 23); if (t < 2000) { s64 off2, sens2, t2; @@ -155,17 +153,17 @@ static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_inf return 0; } -static int ms5607_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info, +static int ms5607_temp_and_pressure_compensate(struct ms5611_state *st, s32 *temp, s32 *pressure) { s32 t = *temp, p = *pressure; s64 off, sens, dt; - dt = t - (chip_info->prom[5] << 8); - off = ((s64)chip_info->prom[2] << 17) + ((chip_info->prom[4] * dt) >> 6); - sens = ((s64)chip_info->prom[1] << 16) + ((chip_info->prom[3] * dt) >> 7); + dt = t - (st->prom[5] << 8); + off = ((s64)st->prom[2] << 17) + ((st->prom[4] * dt) >> 6); + sens = ((s64)st->prom[1] << 16) + ((st->prom[3] * dt) >> 7); - t = 2000 + ((chip_info->prom[6] * dt) >> 23); + t = 2000 + ((st->prom[6] * dt) >> 23); if (t < 2000) { s64 off2, sens2, t2, tmp; @@ -196,7 +194,7 @@ static int ms5611_reset(struct iio_dev *indio_dev) int ret; struct ms5611_state *st = iio_priv(indio_dev); - ret = st->reset(&indio_dev->dev); + ret = st->reset(st); if (ret < 0) { dev_err(&indio_dev->dev, "failed to reset device\n"); return ret; @@ -343,15 +341,6 @@ static int ms5611_write_raw(struct iio_dev *indio_dev, static const unsigned long ms5611_scan_masks[] = {0x3, 0}; -static struct ms5611_chip_info chip_info_tbl[] = { - [MS5611] = { - .temp_and_pressure_compensate = ms5611_temp_and_pressure_compensate, - }, - [MS5607] = { - .temp_and_pressure_compensate = ms5607_temp_and_pressure_compensate, - } -}; - static const struct iio_chan_spec ms5611_channels[] = { { .type = IIO_PRESSURE, @@ -434,7 +423,20 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, struct ms5611_state *st = iio_priv(indio_dev); mutex_init(&st->lock); - st->chip_info = &chip_info_tbl[type]; + + switch (type) { + case MS5611: + st->compensate_temp_and_pressure = + ms5611_temp_and_pressure_compensate; + break; + case MS5607: + st->compensate_temp_and_pressure = + ms5607_temp_and_pressure_compensate; + break; + default: + return -EINVAL; + } + st->temp_osr = &ms5611_avail_temp_osr[ARRAY_SIZE(ms5611_avail_temp_osr) - 1]; st->pressure_osr = diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c index 7c04f730430c..cccc40f7df0b 100644 --- a/drivers/iio/pressure/ms5611_i2c.c +++ b/drivers/iio/pressure/ms5611_i2c.c @@ -20,17 +20,15 @@ #include "ms5611.h" -static int ms5611_i2c_reset(struct device *dev) +static int ms5611_i2c_reset(struct ms5611_state *st) { - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); - return i2c_smbus_write_byte(st->client, MS5611_RESET); } -static int ms5611_i2c_read_prom_word(struct device *dev, int index, u16 *word) +static int ms5611_i2c_read_prom_word(struct ms5611_state *st, int index, + u16 *word) { int ret; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); ret = i2c_smbus_read_word_swapped(st->client, MS5611_READ_PROM_WORD + (index << 1)); @@ -57,11 +55,10 @@ static int ms5611_i2c_read_adc(struct ms5611_state *st, s32 *val) return 0; } -static int ms5611_i2c_read_adc_temp_and_pressure(struct device *dev, +static int ms5611_i2c_read_adc_temp_and_pressure(struct ms5611_state *st, s32 *temp, s32 *pressure) { int ret; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); const struct ms5611_osr *osr = st->temp_osr; ret = i2c_smbus_write_byte(st->client, osr->cmd); diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c index 45d3a7d5be8e..3039fe8aa2a2 100644 --- a/drivers/iio/pressure/ms5611_spi.c +++ b/drivers/iio/pressure/ms5611_spi.c @@ -15,18 +15,17 @@ #include "ms5611.h" -static int ms5611_spi_reset(struct device *dev) +static int ms5611_spi_reset(struct ms5611_state *st) { u8 cmd = MS5611_RESET; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); return spi_write_then_read(st->client, &cmd, 1, NULL, 0); } -static int ms5611_spi_read_prom_word(struct device *dev, int index, u16 *word) +static int ms5611_spi_read_prom_word(struct ms5611_state *st, int index, + u16 *word) { int ret; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); ret = spi_w8r16be(st->client, MS5611_READ_PROM_WORD + (index << 1)); if (ret < 0) @@ -37,11 +36,10 @@ static int ms5611_spi_read_prom_word(struct device *dev, int index, u16 *word) return 0; } -static int ms5611_spi_read_adc(struct device *dev, s32 *val) +static int ms5611_spi_read_adc(struct ms5611_state *st, s32 *val) { int ret; u8 buf[3] = { MS5611_READ_ADC }; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); ret = spi_write_then_read(st->client, buf, 1, buf, 3); if (ret < 0) @@ -52,11 +50,10 @@ static int ms5611_spi_read_adc(struct device *dev, s32 *val) return 0; } -static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev, +static int ms5611_spi_read_adc_temp_and_pressure(struct ms5611_state *st, s32 *temp, s32 *pressure) { int ret; - struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev)); const struct ms5611_osr *osr = st->temp_osr; /* @@ -68,7 +65,7 @@ static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev, return ret; usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL)); - ret = ms5611_spi_read_adc(dev, temp); + ret = ms5611_spi_read_adc(st, temp); if (ret < 0) return ret; @@ -78,7 +75,7 @@ static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev, return ret; usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL)); - return ms5611_spi_read_adc(dev, pressure); + return ms5611_spi_read_adc(st, pressure); } static int ms5611_spi_probe(struct spi_device *spi) @@ -94,7 +91,7 @@ static int ms5611_spi_probe(struct spi_device *spi) spi_set_drvdata(spi, indio_dev); spi->mode = SPI_MODE_0; - spi->max_speed_hz = 20000000; + spi->max_speed_hz = min(spi->max_speed_hz, 20000000U); spi->bits_per_word = 8; ret = spi_setup(spi); if (ret < 0) diff --git a/drivers/iio/proximity/vl53l0x-i2c.c b/drivers/iio/proximity/vl53l0x-i2c.c index 235e125aeb3a..3d3ab86423ee 100644 --- a/drivers/iio/proximity/vl53l0x-i2c.c +++ b/drivers/iio/proximity/vl53l0x-i2c.c @@ -104,6 +104,7 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data, u16 tries = 20; u8 buffer[12]; int ret; + unsigned long time_left; ret = i2c_smbus_write_byte_data(client, VL_REG_SYSRANGE_START, 1); if (ret < 0) @@ -112,10 +113,8 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data, if (data->client->irq) { reinit_completion(&data->completion); - ret = wait_for_completion_timeout(&data->completion, HZ/10); - if (ret < 0) - return ret; - else if (ret == 0) + time_left = wait_for_completion_timeout(&data->completion, HZ/10); + if (time_left == 0) return -ETIMEDOUT; vl53l0x_clear_irq(data); diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c index 3b4a0e60e605..8306daa77908 100644 --- a/drivers/iio/temperature/ltc2983.c +++ b/drivers/iio/temperature/ltc2983.c @@ -1376,13 +1376,6 @@ static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio) return ret; } - st->iio_chan = devm_kzalloc(&st->spi->dev, - st->iio_channels * sizeof(*st->iio_chan), - GFP_KERNEL); - - if (!st->iio_chan) - return -ENOMEM; - ret = regmap_update_bits(st->regmap, LTC2983_GLOBAL_CONFIG_REG, LTC2983_NOTCH_FREQ_MASK, LTC2983_NOTCH_FREQ(st->filter_notch_freq)); @@ -1494,6 +1487,12 @@ static int ltc2983_probe(struct spi_device *spi) if (ret) return ret; + st->iio_chan = devm_kzalloc(&spi->dev, + st->iio_channels * sizeof(*st->iio_chan), + GFP_KERNEL); + if (!st->iio_chan) + return -ENOMEM; + ret = ltc2983_setup(st, true); if (ret) return ret; diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c index e09e58072872..9ed5b9405ade 100644 --- a/drivers/iio/trigger/iio-trig-sysfs.c +++ b/drivers/iio/trigger/iio-trig-sysfs.c @@ -196,6 +196,7 @@ static int iio_sysfs_trigger_remove(int id) } iio_trigger_unregister(t->trig); + irq_work_sync(&t->work); iio_trigger_free(t->trig); list_del(&t->l); @@ -208,9 +209,13 @@ static int iio_sysfs_trigger_remove(int id) static int __init iio_sysfs_trig_init(void) { + int ret; device_initialize(&iio_sysfs_trig_dev); dev_set_name(&iio_sysfs_trig_dev, "iio_sysfs_trigger"); - return device_add(&iio_sysfs_trig_dev); + ret = device_add(&iio_sysfs_trig_dev); + if (ret) + put_device(&iio_sysfs_trig_dev); + return ret; } module_init(iio_sysfs_trig_init); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index ee568bdf3c78..3133b6be6cab 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1280,8 +1280,10 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device, return ERR_CAST(cm_id_priv); err = cm_init_listen(cm_id_priv, service_id, 0); - if (err) + if (err) { + ib_destroy_cm_id(&cm_id_priv->id); return ERR_PTR(err); + } spin_lock_irq(&cm_id_priv->lock); listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler); @@ -1641,14 +1643,13 @@ static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num, static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg, struct sa_path_rec *primary_path, - struct sa_path_rec *alt_path) + struct sa_path_rec *alt_path, + struct ib_wc *wc) { u32 lid; if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) { - sa_path_set_dlid(primary_path, - IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, - req_msg)); + sa_path_set_dlid(primary_path, wc->slid); sa_path_set_slid(primary_path, IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg)); @@ -1685,7 +1686,8 @@ static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg, static void cm_format_paths_from_req(struct cm_req_msg *req_msg, struct sa_path_rec *primary_path, - struct sa_path_rec *alt_path) + struct sa_path_rec *alt_path, + struct ib_wc *wc) { primary_path->dgid = *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg); @@ -1743,7 +1745,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg, if (sa_path_is_roce(alt_path)) alt_path->roce.route_resolved = false; } - cm_format_path_lid_from_req(req_msg, primary_path, alt_path); + cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc); } static u16 cm_get_bth_pkey(struct cm_work *work) @@ -2161,7 +2163,7 @@ static int cm_req_handler(struct cm_work *work) if (cm_req_has_alt_path(req_msg)) work->path[1].rec_type = work->path[0].rec_type; cm_format_paths_from_req(req_msg, &work->path[0], - &work->path[1]); + &work->path[1], work->mad_recv_wc->wc); if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) sa_path_set_dmac(&work->path[0], cm_id_priv->av.ah_attr.roce.dmac); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 3c40aa50cd60..9ed5de38e372 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1437,7 +1437,7 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev, return false; memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_iif = net_dev->ifindex; + fl4.flowi4_oif = net_dev->ifindex; fl4.daddr = daddr; fl4.saddr = saddr; @@ -1722,8 +1722,8 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id, } if (!validate_net_dev(*net_dev, - (struct sockaddr *)&req->listen_addr_storage, - (struct sockaddr *)&req->src_addr_storage)) { + (struct sockaddr *)&req->src_addr_storage, + (struct sockaddr *)&req->listen_addr_storage)) { id_priv = ERR_PTR(-EHOSTUNREACH); goto err; } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index aa526c5ca0cf..d91892ffe243 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2759,10 +2759,18 @@ static int __init ib_core_init(void) nldev_init(); rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table); - roce_gid_mgmt_init(); + ret = roce_gid_mgmt_init(); + if (ret) { + pr_warn("Couldn't init RoCE GID management\n"); + goto err_parent; + } return 0; +err_parent: + rdma_nl_unregister(RDMA_NL_LS); + nldev_exit(); + unregister_pernet_device(&rdma_dev_net_ops); err_compat: unregister_blocking_lsm_notifier(&ibdev_lsm_nb); err_sa: diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 12d29d54a081..c90f6378d839 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -2181,7 +2181,7 @@ void __init nldev_init(void) rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table); } -void __exit nldev_exit(void) +void nldev_exit(void) { rdma_nl_unregister(RDMA_NL_NLDEV); } diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 323f6cf00682..af4af4789ef2 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -466,7 +466,7 @@ retry: mutex_unlock(&umem_odp->umem_mutex); out_put_mm: - mmput(owning_mm); + mmput_async(owning_mm); out_put_task: if (owning_process) put_task_struct(owning_process); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 466026825dd7..d7c90da9ce7f 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -749,6 +749,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs) mr->uobject = uobj; atomic_inc(&pd->usecnt); mr->iova = cmd.hca_va; + mr->length = cmd.length; rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); rdma_restrack_set_name(&mr->res, NULL); @@ -832,8 +833,10 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs) atomic_dec(&old_pd->usecnt); } - if (cmd.flags & IB_MR_REREG_TRANS) + if (cmd.flags & IB_MR_REREG_TRANS) { mr->iova = cmd.hca_va; + mr->length = cmd.length; + } memset(&resp, 0, sizeof(resp)); resp.lkey = mr->lkey; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 597e889ba831..5889639e90a1 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -2082,6 +2082,8 @@ struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, mr->pd = pd; mr->dm = NULL; atomic_inc(&pd->usecnt); + mr->iova = virt_addr; + mr->length = length; rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); rdma_restrack_parent_name(&mr->res, &pd->res); diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 329ee4f48d95..d84b1098762c 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -306,6 +306,8 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from) unsigned long dim = from->nr_segs; int idx; + if (!HFI1_CAP_IS_KSET(SDMA)) + return -EINVAL; idx = srcu_read_lock(&fd->pq_srcu); pq = srcu_dereference(fd->pq, &fd->pq_srcu); if (!cq || !pq) { @@ -1218,8 +1220,10 @@ static int setup_base_ctxt(struct hfi1_filedata *fd, goto done; ret = init_user_ctxt(fd, uctxt); - if (ret) + if (ret) { + hfi1_free_ctxt_rcv_groups(uctxt); goto done; + } user_init(uctxt); diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index fa2cd76747ff..837293aac68f 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -530,7 +530,7 @@ void set_link_ipg(struct hfi1_pportdata *ppd) u16 shift, mult; u64 src; u32 current_egress_rate; /* Mbits /sec */ - u32 max_pkt_time; + u64 max_pkt_time; /* * max_pkt_time is the maximum packet egress time in units * of the fabric clock period 1/(805 MHz). diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c index d213f65d4cdd..ed8a96ae61ce 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c @@ -121,6 +121,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) unsigned long flags; struct list_head del_list; + /* Prevent freeing of mm until we are completely finished. */ + mmgrab(handler->mn.mm); + /* Unregister first so we don't get any more notifications. */ mmu_notifier_unregister(&handler->mn, handler->mn.mm); @@ -143,6 +146,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) do_remove(handler, &del_list); + /* Now the mm may be freed. */ + mmdrop(handler->mn.mm); + kfree(handler); } diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 1cd8f80f097a..60eb3a64518f 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -955,8 +955,7 @@ void sc_disable(struct send_context *sc) spin_unlock(&sc->release_lock); write_seqlock(&sc->waitlock); - if (!list_empty(&sc->piowait)) - list_move(&sc->piowait, &wake_list); + list_splice_init(&sc->piowait, &wake_list); write_sequnlock(&sc->waitlock); while (!list_empty(&wake_list)) { struct iowait *wait; diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 0b73dc7847aa..a044bee257f9 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -1330,11 +1330,13 @@ void sdma_clean(struct hfi1_devdata *dd, size_t num_engines) kvfree(sde->tx_ring); sde->tx_ring = NULL; } - spin_lock_irq(&dd->sde_map_lock); - sdma_map_free(rcu_access_pointer(dd->sdma_map)); - RCU_INIT_POINTER(dd->sdma_map, NULL); - spin_unlock_irq(&dd->sde_map_lock); - synchronize_rcu(); + if (rcu_access_pointer(dd->sdma_map)) { + spin_lock_irq(&dd->sde_map_lock); + sdma_map_free(rcu_access_pointer(dd->sdma_map)); + RCU_INIT_POINTER(dd->sdma_map, NULL); + spin_unlock_irq(&dd->sde_map_lock); + synchronize_rcu(); + } kfree(dd->per_sdma); dd->per_sdma = NULL; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index abe882ec1bae..6dab03b7aca8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5642,8 +5642,8 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id) dev_err(dev, "AEQ overflow!\n"); - int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S; - roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); + roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, + 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S); /* Set reset level for reset_event() */ if (ops->set_default_reset_request) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index be7f2fe1e883..8a92faeb3d23 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -92,7 +92,7 @@ #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE -#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000 +#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000 #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2 #define HNS_ROCE_INVALID_LKEY 0x100 #define HNS_ROCE_CMQ_TX_TIMEOUT 30000 diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 027ec8413ac2..6d7cc724862f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -286,7 +286,6 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, goto err_alloc_pbl; mr->ibmr.rkey = mr->ibmr.lkey = mr->key; - mr->ibmr.length = length; return &mr->ibmr; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 291e06d63150..6fe98af7741b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -386,11 +386,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); - if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE) - hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); - else - hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * - hr_qp->rq.max_gs); + hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * + hr_qp->rq.max_gs); hr_qp->rq.wqe_cnt = cnt; if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 426fed005d53..811b4bb34524 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -439,7 +439,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, goto err_mr; mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; - mr->ibmr.length = length; mr->ibmr.page_size = 1U << shift; return &mr->ibmr; diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c index b3391ecedda7..0404e6f22d37 100644 --- a/drivers/infiniband/hw/mlx5/fs.c +++ b/drivers/infiniband/hw/mlx5/fs.c @@ -2081,12 +2081,10 @@ static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs, if (err) return err; - if (flags) { - mlx5_ib_ft_type_to_namespace( + if (flags) + return mlx5_ib_ft_type_to_namespace( MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX, &obj->ns_type); - return 0; - } } obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS; diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 9bb9bb058932..cca7a4a6bd82 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -166,6 +166,12 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num, mdev = dev->mdev; mdev_port_num = 1; } + if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) { + /* set local port to one for Function-Per-Port HCA. */ + mdev = dev->mdev; + mdev_port_num = 1; + } + /* Declaring support of extended counters */ if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { struct ib_class_port_info cpi = {}; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 6cd0cbd4fc9f..d827a4e44c94 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -531,8 +531,10 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) spin_lock_irq(&ent->lock); if (ent->disabled) goto out; - if (need_delay) + if (need_delay) { queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); + goto out; + } remove_cache_mr_locked(ent); queue_adjust_cache_locked(ent); } diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 967641662b24..d0bb61b7e419 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -374,6 +374,10 @@ static int qedr_alloc_resources(struct qedr_dev *dev) if (IS_IWARP(dev)) { xa_init(&dev->qps); dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq"); + if (!dev->iwarp_wq) { + rc = -ENOMEM; + goto err1; + } } /* Allocate Status blocks for CNQ */ @@ -381,7 +385,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev) GFP_KERNEL); if (!dev->sb_array) { rc = -ENOMEM; - goto err1; + goto err_destroy_wq; } dev->cnq_array = kcalloc(dev->num_cnq, @@ -432,6 +436,9 @@ err3: kfree(dev->cnq_array); err2: kfree(dev->sb_array); +err_destroy_wq: + if (IS_IWARP(dev)) + destroy_workqueue(dev->iwarp_wq); err1: kfree(dev->sgid_tbl); return rc; diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 9dde70373a55..8ef6eecc42a0 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -418,6 +418,7 @@ struct qedr_qp { u32 sq_psn; u32 qkey; u32 dest_qp_num; + u8 timeout; /* Relevant to qps created from kernel space only (ULPs) */ u8 prev_wqe_size; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index eeb87f31cd25..3543b9af10b7 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2622,6 +2622,8 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1 << max_t(int, attr->timeout - 8, 0); else qp_params.ack_timeout = 0; + + qp->timeout = attr->timeout; } if (attr_mask & IB_QP_RETRY_CNT) { @@ -2781,7 +2783,7 @@ int qedr_query_qp(struct ib_qp *ibqp, rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid.bytes[0]); rdma_ah_set_port_num(&qp_attr->ah_attr, 1); rdma_ah_set_sl(&qp_attr->ah_attr, 0); - qp_attr->timeout = params.timeout; + qp_attr->timeout = qp->timeout; qp_attr->rnr_retry = params.rnr_retry; qp_attr->retry_cnt = params.retry_cnt; qp_attr->min_rnr_timer = params.min_rnr_nak_timer; @@ -2987,7 +2989,11 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); if (rc) { - DP_ERR(dev, "roce alloc tid returned an error %d\n", rc); + if (rc == -EINVAL) + DP_ERR(dev, "Out of MR resources\n"); + else + DP_ERR(dev, "roce alloc tid returned error %d\n", rc); + goto err1; } @@ -3082,8 +3088,12 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); if (rc) { - DP_ERR(dev, "roce alloc tid returned an error %d\n", rc); - goto err0; + if (rc == -EINVAL) + DP_ERR(dev, "Out of MR resources\n"); + else + DP_ERR(dev, "roce alloc tid returned error %d\n", rc); + + goto err1; } /* Index only, 18 bit long, lkey = itid << 8 | key */ @@ -3107,7 +3117,7 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr); if (rc) { DP_ERR(dev, "roce register tid returned an error %d\n", rc); - goto err1; + goto err2; } mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key; @@ -3116,8 +3126,10 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey); return mr; -err1: +err2: dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid); +err1: + qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table); err0: kfree(mr); return ERR_PTR(rc); @@ -3212,7 +3224,11 @@ struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc) rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); if (rc) { - DP_ERR(dev, "roce alloc tid returned an error %d\n", rc); + if (rc == -EINVAL) + DP_ERR(dev, "Out of MR resources\n"); + else + DP_ERR(dev, "roce alloc tid returned error %d\n", rc); + goto err1; } diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 09f0dbf941c0..585a9c76e518 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -2826,7 +2826,7 @@ void rvt_qp_iter(struct rvt_dev_info *rdi, EXPORT_SYMBOL(rvt_qp_iter); /* - * This should be called with s_lock held. + * This should be called with s_lock and r_lock held. */ void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, enum ib_wc_status status) @@ -3185,7 +3185,9 @@ send_comp: rvp->n_loop_pkts++; flush_send: sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; + spin_lock(&sqp->r_lock); rvt_send_complete(sqp, wqe, send_status); + spin_unlock(&sqp->r_lock); if (local_ops) { atomic_dec(&sqp->local_ops_pending); local_ops = 0; @@ -3239,9 +3241,15 @@ serr: spin_unlock_irqrestore(&qp->r_lock, flags); serr_no_r_lock: spin_lock_irqsave(&sqp->s_lock, flags); + spin_lock(&sqp->r_lock); rvt_send_complete(sqp, wqe, send_status); + spin_unlock(&sqp->r_lock); if (sqp->ibqp.qp_type == IB_QPT_RC) { - int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); + int lastwqe; + + spin_lock(&sqp->r_lock); + lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); + spin_unlock(&sqp->r_lock); sqp->s_flags &= ~RVT_S_BUSY; spin_unlock_irqrestore(&sqp->s_lock, flags); diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h index f9fb56ec6dfd..dca86422b0a2 100644 --- a/drivers/infiniband/sw/rxe/rxe_param.h +++ b/drivers/infiniband/sw/rxe/rxe_param.h @@ -98,6 +98,12 @@ enum rxe_device_param { RXE_INFLIGHT_SKBS_PER_QP_HIGH = 64, RXE_INFLIGHT_SKBS_PER_QP_LOW = 16, + /* Max number of interations of each tasklet + * before yielding the cpu to let other + * work make progress + */ + RXE_MAX_ITERATIONS = 1024, + /* Delay before calling arbiter timer */ RXE_NSEC_ARB_TIMER_DELAY = 200, diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index a1b79015e6f2..2e4b008f0387 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -184,6 +184,14 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, spin_lock_init(&qp->grp_lock); spin_lock_init(&qp->state_lock); + spin_lock_init(&qp->req.task.state_lock); + spin_lock_init(&qp->resp.task.state_lock); + spin_lock_init(&qp->comp.task.state_lock); + + spin_lock_init(&qp->sq.sq_lock); + spin_lock_init(&qp->rq.producer_lock); + spin_lock_init(&qp->rq.consumer_lock); + atomic_set(&qp->ssn, 0); atomic_set(&qp->skb_out, 0); } @@ -239,7 +247,6 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, qp->req.opcode = -1; qp->comp.opcode = -1; - spin_lock_init(&qp->sq.sq_lock); skb_queue_head_init(&qp->req_pkts); rxe_init_task(rxe, &qp->req.task, qp, @@ -289,9 +296,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, } } - spin_lock_init(&qp->rq.producer_lock); - spin_lock_init(&qp->rq.consumer_lock); - skb_queue_head_init(&qp->resp_pkts); rxe_init_task(rxe, &qp->resp.task, qp, @@ -771,7 +775,9 @@ void rxe_qp_destroy(struct rxe_qp *qp) rxe_cleanup_task(&qp->comp.task); /* flush out any receive wr's or pending requests */ - __rxe_do_task(&qp->req.task); + if (qp->req.task.func) + __rxe_do_task(&qp->req.task); + if (qp->sq.queue) { __rxe_do_task(&qp->comp.task); __rxe_do_task(&qp->req.task); @@ -811,8 +817,10 @@ static void rxe_qp_do_cleanup(struct work_struct *work) free_rd_atomic_resources(qp); - kernel_sock_shutdown(qp->sk, SHUT_RDWR); - sock_release(qp->sk); + if (qp->sk) { + kernel_sock_shutdown(qp->sk, SHUT_RDWR); + sock_release(qp->sk); + } } /* called when the last reference to the qp is dropped */ diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index d4917646641a..69238f856c67 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -650,7 +650,7 @@ next_wqe: opcode = next_opcode(qp, wqe, wqe->wr.opcode); if (unlikely(opcode < 0)) { wqe->status = IB_WC_LOC_QP_OP_ERR; - goto exit; + goto err; } mask = rxe_opcode[opcode].mask; diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c index 6951fdcb31bf..568cf56c236b 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.c +++ b/drivers/infiniband/sw/rxe/rxe_task.c @@ -8,7 +8,7 @@ #include #include -#include "rxe_task.h" +#include "rxe.h" int __rxe_do_task(struct rxe_task *task) @@ -34,6 +34,7 @@ void rxe_do_task(struct tasklet_struct *t) int ret; unsigned long flags; struct rxe_task *task = from_tasklet(task, t, tasklet); + unsigned int iterations = RXE_MAX_ITERATIONS; spin_lock_irqsave(&task->state_lock, flags); switch (task->state) { @@ -62,13 +63,20 @@ void rxe_do_task(struct tasklet_struct *t) spin_lock_irqsave(&task->state_lock, flags); switch (task->state) { case TASK_STATE_BUSY: - if (ret) + if (ret) { task->state = TASK_STATE_START; - else + } else if (iterations--) { cont = 1; + } else { + /* reschedule the tasklet and exit + * the loop to give up the cpu + */ + tasklet_schedule(&task->tasklet); + task->state = TASK_STATE_START; + } break; - /* soneone tried to run the task since the last time we called + /* someone tried to run the task since the last time we called * func, so we will call one more time regardless of the * return value */ diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index 66764f7ef072..b87ba4c9fccf 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -725,11 +725,11 @@ static int siw_proc_mpareply(struct siw_cep *cep) enum mpa_v2_ctrl mpa_p2p_mode = MPA_V2_RDMA_NO_RTR; rv = siw_recv_mpa_rr(cep); - if (rv != -EAGAIN) - siw_cancel_mpatimer(cep); if (rv) goto out_err; + siw_cancel_mpatimer(cep); + rep = &cep->mpa.hdr; if (__mpa_rr_revision(rep->params.bits) > MPA_REVISION_2) { @@ -895,7 +895,8 @@ static int siw_proc_mpareply(struct siw_cep *cep) } out_err: - siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL); + if (rv != -EAGAIN) + siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL); return rv; } @@ -968,14 +969,15 @@ static void siw_accept_newconn(struct siw_cep *cep) siw_cep_set_inuse(new_cep); rv = siw_proc_mpareq(new_cep); - siw_cep_set_free(new_cep); - if (rv != -EAGAIN) { siw_cep_put(cep); new_cep->listen_cep = NULL; - if (rv) + if (rv) { + siw_cep_set_free(new_cep); goto error; + } } + siw_cep_set_free(new_cep); } return; diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index 875ea6f1b04a..fd721cc19682 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -961,27 +961,28 @@ out: static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream *srx) { struct sk_buff *skb = srx->skb; + int avail = min(srx->skb_new, srx->fpdu_part_rem); u8 *tbuf = (u8 *)&srx->trailer.crc - srx->pad; __wsum crc_in, crc_own = 0; siw_dbg_qp(qp, "expected %d, available %d, pad %u\n", srx->fpdu_part_rem, srx->skb_new, srx->pad); - if (srx->skb_new < srx->fpdu_part_rem) + skb_copy_bits(skb, srx->skb_offset, tbuf, avail); + + srx->skb_new -= avail; + srx->skb_offset += avail; + srx->skb_copied += avail; + srx->fpdu_part_rem -= avail; + + if (srx->fpdu_part_rem) return -EAGAIN; - skb_copy_bits(skb, srx->skb_offset, tbuf, srx->fpdu_part_rem); - - if (srx->mpa_crc_hd && srx->pad) - crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad); - - srx->skb_new -= srx->fpdu_part_rem; - srx->skb_offset += srx->fpdu_part_rem; - srx->skb_copied += srx->fpdu_part_rem; - if (!srx->mpa_crc_hd) return 0; + if (srx->pad) + crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad); /* * CRC32 is computed, transmitted and received directly in NBO, * so there's never a reason to convert byte order. @@ -1083,10 +1084,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx) * completely received. */ if (iwarp_pktinfo[opcode].hdr_len > sizeof(struct iwarp_ctrl_tagged)) { - bytes = iwarp_pktinfo[opcode].hdr_len - MIN_DDP_HDR; + int hdrlen = iwarp_pktinfo[opcode].hdr_len; - if (srx->skb_new < bytes) - return -EAGAIN; + bytes = min_t(int, hdrlen - MIN_DDP_HDR, srx->skb_new); skb_copy_bits(skb, srx->skb_offset, (char *)c_hdr + srx->fpdu_part_rcvd, bytes); @@ -1096,6 +1096,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx) srx->skb_new -= bytes; srx->skb_offset += bytes; srx->skb_copied += bytes; + + if (srx->fpdu_part_rcvd < hdrlen) + return -EAGAIN; } /* diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 7989c4043db4..3c3ae5ef2942 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); if (paddr) - return virt_to_page(paddr); + return virt_to_page((void *)paddr); return NULL; } @@ -523,13 +523,23 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) kunmap(p); } } else { - u64 va = sge->laddr + sge_off; + /* + * Cast to an uintptr_t to preserve all 64 bits + * in sge->laddr. + */ + uintptr_t va = (uintptr_t)(sge->laddr + sge_off); - page_array[seg] = virt_to_page(va & PAGE_MASK); + /* + * virt_to_page() takes a (void *) pointer + * so cast to a (void *) meaning it will be 64 + * bits on a 64 bit platform and 32 bits on a + * 32 bit platform. + */ + page_array[seg] = virt_to_page((void *)(va & PAGE_MASK)); if (do_crc) crypto_shash_update( c_tx->mpa_crc_hd, - (void *)(uintptr_t)va, + (void *)va, plen); } diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 3690e28cc7ea..a16e066989fa 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -499,6 +499,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, iser_conn->iscsi_conn = conn; out: + iscsi_put_endpoint(ep); mutex_unlock(&iser_conn->state_mutex); return error; } @@ -988,6 +989,7 @@ static struct iscsi_transport iscsi_iser_transport = { /* connection management */ .create_conn = iscsi_iser_conn_create, .bind_conn = iscsi_iser_conn_bind, + .unbind_conn = iscsi_conn_unbind, .destroy_conn = iscsi_conn_teardown, .attr_is_visible = iser_attr_is_visible, .set_param = iscsi_iser_set_param, diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 13634eda833d..5c39e4c4bef7 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -1728,11 +1728,6 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con, if (con->c.cid == 0) { queue_depth = le16_to_cpu(msg->queue_depth); - if (queue_depth > MAX_SESS_QUEUE_DEPTH) { - rtrs_err(clt, "Invalid RTRS message: queue=%d\n", - queue_depth); - return -ECONNRESET; - } if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) { rtrs_err(clt, "Error: queue depth changed\n"); diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h index 51c60f542876..c5ca123d52a8 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h +++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h @@ -23,6 +23,17 @@ #define RTRS_PROTO_VER_STRING __stringify(RTRS_PROTO_VER_MAJOR) "." \ __stringify(RTRS_PROTO_VER_MINOR) +/* + * Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS) + * and the minimum chunk size is 4096 (2^12). + * So the maximum sess_queue_depth is 65536 (2^16) in theory. + * But mempool_create, create_qp and ib_post_send fail with + * "cannot allocate memory" error if sess_queue_depth is too big. + * Therefore the pratical max value of sess_queue_depth is + * somewhere between 1 and 65534 and it depends on the system. + */ +#define MAX_SESS_QUEUE_DEPTH 65535 + enum rtrs_imm_const { MAX_IMM_TYPE_BITS = 4, MAX_IMM_TYPE_MASK = ((1 << MAX_IMM_TYPE_BITS) - 1), @@ -46,16 +57,7 @@ enum { MAX_PATHS_NUM = 128, - /* - * Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS) - * and the minimum chunk size is 4096 (2^12). - * So the maximum sess_queue_depth is 65536 (2^16) in theory. - * But mempool_create, create_qp and ib_post_send fail with - * "cannot allocate memory" error if sess_queue_depth is too big. - * Therefore the pratical max value of sess_queue_depth is - * somewhere between 1 and 65536 and it depends on the system. - */ - MAX_SESS_QUEUE_DEPTH = 65536, + MIN_CHUNK_SIZE = 8192, RTRS_HB_INTERVAL_MS = 5000, RTRS_HB_MISSED_MAX = 5, diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c index b033bfa9f383..b152a742cd3c 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c @@ -2193,9 +2193,9 @@ static int check_module_params(void) sess_queue_depth, 1, MAX_SESS_QUEUE_DEPTH); return -EINVAL; } - if (max_chunk_size < 4096 || !is_power_of_2(max_chunk_size)) { + if (max_chunk_size < MIN_CHUNK_SIZE || !is_power_of_2(max_chunk_size)) { pr_err("Invalid max_chunk_size value %d, has to be >= %d and should be power of two.\n", - max_chunk_size, 4096); + max_chunk_size, MIN_CHUNK_SIZE); return -EINVAL; } diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 07ecc7dc1822..c0ed08fcab48 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -565,12 +565,9 @@ static int srpt_refresh_port(struct srpt_port *sport) if (ret) return ret; - sport->port_guid_id.wwn.priv = sport; - srpt_format_guid(sport->port_guid_id.name, - sizeof(sport->port_guid_id.name), + srpt_format_guid(sport->guid_name, ARRAY_SIZE(sport->guid_name), &sport->gid.global.interface_id); - sport->port_gid_id.wwn.priv = sport; - snprintf(sport->port_gid_id.name, sizeof(sport->port_gid_id.name), + snprintf(sport->gid_name, ARRAY_SIZE(sport->gid_name), "0x%016llx%016llx", be64_to_cpu(sport->gid.global.subnet_prefix), be64_to_cpu(sport->gid.global.interface_id)); @@ -2310,31 +2307,35 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev, tag_num = ch->rq_size; tag_size = 1; /* ib_srpt does not use se_sess->sess_cmd_map */ - mutex_lock(&sport->port_guid_id.mutex); - list_for_each_entry(stpg, &sport->port_guid_id.tpg_list, entry) { - if (!IS_ERR_OR_NULL(ch->sess)) - break; - ch->sess = target_setup_session(&stpg->tpg, tag_num, + if (sport->guid_id) { + mutex_lock(&sport->guid_id->mutex); + list_for_each_entry(stpg, &sport->guid_id->tpg_list, entry) { + if (!IS_ERR_OR_NULL(ch->sess)) + break; + ch->sess = target_setup_session(&stpg->tpg, tag_num, tag_size, TARGET_PROT_NORMAL, ch->sess_name, ch, NULL); + } + mutex_unlock(&sport->guid_id->mutex); } - mutex_unlock(&sport->port_guid_id.mutex); - mutex_lock(&sport->port_gid_id.mutex); - list_for_each_entry(stpg, &sport->port_gid_id.tpg_list, entry) { - if (!IS_ERR_OR_NULL(ch->sess)) - break; - ch->sess = target_setup_session(&stpg->tpg, tag_num, + if (sport->gid_id) { + mutex_lock(&sport->gid_id->mutex); + list_for_each_entry(stpg, &sport->gid_id->tpg_list, entry) { + if (!IS_ERR_OR_NULL(ch->sess)) + break; + ch->sess = target_setup_session(&stpg->tpg, tag_num, tag_size, TARGET_PROT_NORMAL, i_port_id, ch, NULL); - if (!IS_ERR_OR_NULL(ch->sess)) - break; - /* Retry without leading "0x" */ - ch->sess = target_setup_session(&stpg->tpg, tag_num, + if (!IS_ERR_OR_NULL(ch->sess)) + break; + /* Retry without leading "0x" */ + ch->sess = target_setup_session(&stpg->tpg, tag_num, tag_size, TARGET_PROT_NORMAL, i_port_id + 2, ch, NULL); + } + mutex_unlock(&sport->gid_id->mutex); } - mutex_unlock(&sport->port_gid_id.mutex); if (IS_ERR_OR_NULL(ch->sess)) { WARN_ON_ONCE(ch->sess == NULL); @@ -2980,7 +2981,12 @@ static int srpt_release_sport(struct srpt_port *sport) return 0; } -static struct se_wwn *__srpt_lookup_wwn(const char *name) +struct port_and_port_id { + struct srpt_port *sport; + struct srpt_port_id **port_id; +}; + +static struct port_and_port_id __srpt_lookup_port(const char *name) { struct ib_device *dev; struct srpt_device *sdev; @@ -2995,25 +3001,38 @@ static struct se_wwn *__srpt_lookup_wwn(const char *name) for (i = 0; i < dev->phys_port_cnt; i++) { sport = &sdev->port[i]; - if (strcmp(sport->port_guid_id.name, name) == 0) - return &sport->port_guid_id.wwn; - if (strcmp(sport->port_gid_id.name, name) == 0) - return &sport->port_gid_id.wwn; + if (strcmp(sport->guid_name, name) == 0) { + kref_get(&sdev->refcnt); + return (struct port_and_port_id){ + sport, &sport->guid_id}; + } + if (strcmp(sport->gid_name, name) == 0) { + kref_get(&sdev->refcnt); + return (struct port_and_port_id){ + sport, &sport->gid_id}; + } } } - return NULL; + return (struct port_and_port_id){}; } -static struct se_wwn *srpt_lookup_wwn(const char *name) +/** + * srpt_lookup_port() - Look up an RDMA port by name + * @name: ASCII port name + * + * Increments the RDMA port reference count if an RDMA port pointer is returned. + * The caller must drop that reference count by calling srpt_port_put_ref(). + */ +static struct port_and_port_id srpt_lookup_port(const char *name) { - struct se_wwn *wwn; + struct port_and_port_id papi; spin_lock(&srpt_dev_lock); - wwn = __srpt_lookup_wwn(name); + papi = __srpt_lookup_port(name); spin_unlock(&srpt_dev_lock); - return wwn; + return papi; } static void srpt_free_srq(struct srpt_device *sdev) @@ -3098,6 +3117,18 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq) return ret; } +static void srpt_free_sdev(struct kref *refcnt) +{ + struct srpt_device *sdev = container_of(refcnt, typeof(*sdev), refcnt); + + kfree(sdev); +} + +static void srpt_sdev_put(struct srpt_device *sdev) +{ + kref_put(&sdev->refcnt, srpt_free_sdev); +} + /** * srpt_add_one - InfiniBand device addition callback function * @device: Describes a HCA. @@ -3115,6 +3146,7 @@ static int srpt_add_one(struct ib_device *device) if (!sdev) return -ENOMEM; + kref_init(&sdev->refcnt); sdev->device = device; mutex_init(&sdev->sdev_mutex); @@ -3178,10 +3210,6 @@ static int srpt_add_one(struct ib_device *device) sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE; sport->port_attrib.use_srq = false; INIT_WORK(&sport->work, srpt_refresh_port_work); - mutex_init(&sport->port_guid_id.mutex); - INIT_LIST_HEAD(&sport->port_guid_id.tpg_list); - mutex_init(&sport->port_gid_id.mutex); - INIT_LIST_HEAD(&sport->port_gid_id.tpg_list); ret = srpt_refresh_port(sport); if (ret) { @@ -3210,7 +3238,7 @@ err_ring: srpt_free_srq(sdev); ib_dealloc_pd(sdev->pd); free_dev: - kfree(sdev); + srpt_sdev_put(sdev); pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev)); return ret; } @@ -3254,7 +3282,7 @@ static void srpt_remove_one(struct ib_device *device, void *client_data) ib_dealloc_pd(sdev->pd); - kfree(sdev); + srpt_sdev_put(sdev); } static struct ib_client srpt_client = { @@ -3282,10 +3310,10 @@ static struct srpt_port_id *srpt_wwn_to_sport_id(struct se_wwn *wwn) { struct srpt_port *sport = wwn->priv; - if (wwn == &sport->port_guid_id.wwn) - return &sport->port_guid_id; - if (wwn == &sport->port_gid_id.wwn) - return &sport->port_gid_id; + if (sport->guid_id && &sport->guid_id->wwn == wwn) + return sport->guid_id; + if (sport->gid_id && &sport->gid_id->wwn == wwn) + return sport->gid_id; WARN_ON_ONCE(true); return NULL; } @@ -3800,7 +3828,31 @@ static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf, struct config_group *group, const char *name) { - return srpt_lookup_wwn(name) ? : ERR_PTR(-EINVAL); + struct port_and_port_id papi = srpt_lookup_port(name); + struct srpt_port *sport = papi.sport; + struct srpt_port_id *port_id; + + if (!papi.port_id) + return ERR_PTR(-EINVAL); + if (*papi.port_id) { + /* Attempt to create a directory that already exists. */ + WARN_ON_ONCE(true); + return &(*papi.port_id)->wwn; + } + port_id = kzalloc(sizeof(*port_id), GFP_KERNEL); + if (!port_id) { + srpt_sdev_put(sport->sdev); + return ERR_PTR(-ENOMEM); + } + mutex_init(&port_id->mutex); + INIT_LIST_HEAD(&port_id->tpg_list); + port_id->wwn.priv = sport; + memcpy(port_id->name, port_id == sport->guid_id ? sport->guid_name : + sport->gid_name, ARRAY_SIZE(port_id->name)); + + *papi.port_id = port_id; + + return &port_id->wwn; } /** @@ -3809,6 +3861,18 @@ static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf, */ static void srpt_drop_tport(struct se_wwn *wwn) { + struct srpt_port_id *port_id = container_of(wwn, typeof(*port_id), wwn); + struct srpt_port *sport = wwn->priv; + + if (sport->guid_id == port_id) + sport->guid_id = NULL; + else if (sport->gid_id == port_id) + sport->gid_id = NULL; + else + WARN_ON_ONCE(true); + + srpt_sdev_put(sport->sdev); + kfree(port_id); } static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf) diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h index bdeb010efee6..2bf381ecd482 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.h +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h @@ -376,7 +376,7 @@ struct srpt_tpg { }; /** - * struct srpt_port_id - information about an RDMA port name + * struct srpt_port_id - LIO RDMA port information * @mutex: Protects @tpg_list changes. * @tpg_list: TPGs associated with the RDMA port name. * @wwn: WWN associated with the RDMA port name. @@ -393,7 +393,7 @@ struct srpt_port_id { }; /** - * struct srpt_port - information associated by SRPT with a single IB port + * struct srpt_port - SRPT RDMA port information * @sdev: backpointer to the HCA information. * @mad_agent: per-port management datagram processing information. * @enabled: Whether or not this target port is enabled. @@ -402,8 +402,10 @@ struct srpt_port_id { * @lid: cached value of the port's lid. * @gid: cached value of the port's gid. * @work: work structure for refreshing the aforementioned cached values. - * @port_guid_id: target port GUID - * @port_gid_id: target port GID + * @guid_name: port name in GUID format. + * @guid_id: LIO target port information for the port name in GUID format. + * @gid_name: port name in GID format. + * @gid_id: LIO target port information for the port name in GID format. * @port_attrib: Port attributes that can be accessed through configfs. * @refcount: Number of objects associated with this port. * @freed_channels: Completion that will be signaled once @refcount becomes 0. @@ -419,8 +421,10 @@ struct srpt_port { u32 lid; union ib_gid gid; struct work_struct work; - struct srpt_port_id port_guid_id; - struct srpt_port_id port_gid_id; + char guid_name[64]; + struct srpt_port_id *guid_id; + char gid_name[64]; + struct srpt_port_id *gid_id; struct srpt_port_attrib port_attrib; atomic_t refcount; struct completion *freed_channels; @@ -430,6 +434,7 @@ struct srpt_port { /** * struct srpt_device - information associated by SRPT with a single HCA + * @refcnt: Reference count for this device. * @device: Backpointer to the struct ib_device managed by the IB core. * @pd: IB protection domain. * @lkey: L_Key (local key) with write access to all local memory. @@ -445,6 +450,7 @@ struct srpt_port { * @port: Information about the ports owned by this HCA. */ struct srpt_device { + struct kref refcnt; struct ib_device *device; struct ib_pd *pd; u32 lkey; diff --git a/drivers/input/input.c b/drivers/input/input.c index 3cfd2c18eebd..49504dcd5dc6 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -47,6 +47,17 @@ static DEFINE_MUTEX(input_mutex); static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; +static const unsigned int input_max_code[EV_CNT] = { + [EV_KEY] = KEY_MAX, + [EV_REL] = REL_MAX, + [EV_ABS] = ABS_MAX, + [EV_MSC] = MSC_MAX, + [EV_SW] = SW_MAX, + [EV_LED] = LED_MAX, + [EV_SND] = SND_MAX, + [EV_FF] = FF_MAX, +}; + static inline int is_event_supported(unsigned int code, unsigned long *bm, unsigned int max) { @@ -1976,6 +1987,14 @@ EXPORT_SYMBOL(input_get_timestamp); */ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code) { + if (type < EV_CNT && input_max_code[type] && + code > input_max_code[type]) { + pr_err("%s: invalid code %u for type %u\n", __func__, code, + type); + dump_stack(); + return; + } + switch (type) { case EV_KEY: __set_bit(code, dev->keybit); diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c index b2a68bc9f0b4..84b87526b7ba 100644 --- a/drivers/input/joystick/iforce/iforce-main.c +++ b/drivers/input/joystick/iforce/iforce-main.c @@ -50,6 +50,7 @@ static struct iforce_device iforce_device[] = { { 0x046d, 0xc291, "Logitech WingMan Formula Force", btn_wheel, abs_wheel, ff_iforce }, { 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_joystick_avb, abs_avb_pegasus, ff_iforce }, { 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_wheel, abs_wheel, ff_iforce }, + { 0x05ef, 0x8886, "Boeder Force Feedback Wheel", btn_wheel, abs_wheel, ff_iforce }, { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //? { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //? { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, @@ -272,22 +273,22 @@ int iforce_init_device(struct device *parent, u16 bustype, * Get device info. */ - if (!iforce_get_id_packet(iforce, 'M', buf, &len) || len < 3) + if (!iforce_get_id_packet(iforce, 'M', buf, &len) && len >= 3) input_dev->id.vendor = get_unaligned_le16(buf + 1); else dev_warn(&iforce->dev->dev, "Device does not respond to id packet M\n"); - if (!iforce_get_id_packet(iforce, 'P', buf, &len) || len < 3) + if (!iforce_get_id_packet(iforce, 'P', buf, &len) && len >= 3) input_dev->id.product = get_unaligned_le16(buf + 1); else dev_warn(&iforce->dev->dev, "Device does not respond to id packet P\n"); - if (!iforce_get_id_packet(iforce, 'B', buf, &len) || len < 3) + if (!iforce_get_id_packet(iforce, 'B', buf, &len) && len >= 3) iforce->device_memory.end = get_unaligned_le16(buf + 1); else dev_warn(&iforce->dev->dev, "Device does not respond to id packet B\n"); - if (!iforce_get_id_packet(iforce, 'N', buf, &len) || len < 2) + if (!iforce_get_id_packet(iforce, 'N', buf, &len) && len >= 2) ff_effects = buf[1]; else dev_warn(&iforce->dev->dev, "Device does not respond to id packet N\n"); diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c index f95a81b9fac7..2380546d7978 100644 --- a/drivers/input/joystick/iforce/iforce-serio.c +++ b/drivers/input/joystick/iforce/iforce-serio.c @@ -39,7 +39,7 @@ static void iforce_serio_xmit(struct iforce *iforce) again: if (iforce->xmit.head == iforce->xmit.tail) { - clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); + iforce_clear_xmit_and_wake(iforce); spin_unlock_irqrestore(&iforce->xmit_lock, flags); return; } @@ -64,7 +64,7 @@ again: if (test_and_clear_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags)) goto again; - clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); + iforce_clear_xmit_and_wake(iforce); spin_unlock_irqrestore(&iforce->xmit_lock, flags); } @@ -169,7 +169,7 @@ static irqreturn_t iforce_serio_irq(struct serio *serio, iforce_serio->cmd_response_len = iforce_serio->len; /* Signal that command is done */ - wake_up(&iforce->wait); + wake_up_all(&iforce->wait); } else if (likely(iforce->type)) { iforce_process_packet(iforce, iforce_serio->id, iforce_serio->data_in, diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c index ea58805c480f..cba92bd590a8 100644 --- a/drivers/input/joystick/iforce/iforce-usb.c +++ b/drivers/input/joystick/iforce/iforce-usb.c @@ -30,7 +30,7 @@ static void __iforce_usb_xmit(struct iforce *iforce) spin_lock_irqsave(&iforce->xmit_lock, flags); if (iforce->xmit.head == iforce->xmit.tail) { - clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); + iforce_clear_xmit_and_wake(iforce); spin_unlock_irqrestore(&iforce->xmit_lock, flags); return; } @@ -58,9 +58,9 @@ static void __iforce_usb_xmit(struct iforce *iforce) XMIT_INC(iforce->xmit.tail, n); if ( (n=usb_submit_urb(iforce_usb->out, GFP_ATOMIC)) ) { - clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); dev_warn(&iforce_usb->intf->dev, "usb_submit_urb failed %d\n", n); + iforce_clear_xmit_and_wake(iforce); } /* The IFORCE_XMIT_RUNNING bit is not cleared here. That's intended. @@ -175,15 +175,15 @@ static void iforce_usb_out(struct urb *urb) struct iforce *iforce = &iforce_usb->iforce; if (urb->status) { - clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); dev_dbg(&iforce_usb->intf->dev, "urb->status %d, exiting\n", urb->status); + iforce_clear_xmit_and_wake(iforce); return; } __iforce_usb_xmit(iforce); - wake_up(&iforce->wait); + wake_up_all(&iforce->wait); } static int iforce_usb_probe(struct usb_interface *intf, diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h index 6aa761ebbdf7..9ccb9107ccbe 100644 --- a/drivers/input/joystick/iforce/iforce.h +++ b/drivers/input/joystick/iforce/iforce.h @@ -119,6 +119,12 @@ static inline int iforce_get_id_packet(struct iforce *iforce, u8 id, response_data, response_len); } +static inline void iforce_clear_xmit_and_wake(struct iforce *iforce) +{ + clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); + wake_up_all(&iforce->wait); +} + /* Public functions */ /* iforce-main.c */ int iforce_init_device(struct device *parent, u16 bustype, diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index ba101afcfc27..70dedc0f7827 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -112,6 +112,8 @@ static const struct xpad_device { u8 xtype; } xpad_device[] = { { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 }, + { 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 }, + { 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 }, { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, @@ -242,6 +244,7 @@ static const struct xpad_device { { 0x0f0d, 0x0063, "Hori Real Arcade Pro Hayabusa (USA) Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE }, { 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, + { 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX }, { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, @@ -258,6 +261,7 @@ static const struct xpad_device { { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 }, { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, + { 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 }, { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE }, @@ -322,6 +326,7 @@ static const struct xpad_device { { 0x24c6, 0x5502, "Hori Fighting Stick VX Alt", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5503, "Hori Fighting Edge", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x5510, "Hori Fighting Commander ONE (Xbox 360/PC Mode)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x550d, "Hori GEM Xbox controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x550e, "Hori Real Arcade Pro V Kai 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE }, @@ -331,6 +336,14 @@ static const struct xpad_device { { 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 }, { 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 }, + { 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE }, + { 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 }, + { 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 }, + { 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 }, + { 0x31e3, 0x1220, "Wooting Two HE", 0, XTYPE_XBOX360 }, + { 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 }, + { 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 }, { 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 }, { 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX }, { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX }, @@ -416,6 +429,7 @@ static const signed short xpad_abs_triggers[] = { static const struct usb_device_id xpad_table[] = { { USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */ XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */ + XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */ XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */ XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */ @@ -426,6 +440,7 @@ static const struct usb_device_id xpad_table[] = { { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */ XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */ XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz GamePad */ + XPAD_XBOX360_VENDOR(0x0c12), /* Zeroplus X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ @@ -446,8 +461,12 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ + XPAD_XBOX360_VENDOR(0x2563), /* OneXPlayer Gamepad */ + XPAD_XBOX360_VENDOR(0x260d), /* Dareu H101 */ + XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller for Xbox */ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ + XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */ XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */ { } }; @@ -1964,7 +1983,6 @@ static struct usb_driver xpad_driver = { .disconnect = xpad_disconnect, .suspend = xpad_suspend, .resume = xpad_resume, - .reset_resume = xpad_resume, .id_table = xpad_table, }; diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c index 65286762b02a..ad8660be0127 100644 --- a/drivers/input/keyboard/snvs_pwrkey.c +++ b/drivers/input/keyboard/snvs_pwrkey.c @@ -20,7 +20,7 @@ #include #include -#define SNVS_HPVIDR1_REG 0xF8 +#define SNVS_HPVIDR1_REG 0xBF8 #define SNVS_LPSR_REG 0x4C /* LP Status Register */ #define SNVS_LPCR_REG 0x38 /* LP Control Register */ #define SNVS_HPSR_REG 0x14 diff --git a/drivers/input/misc/rk805-pwrkey.c b/drivers/input/misc/rk805-pwrkey.c index 02e7b6e29429..76982176335e 100644 --- a/drivers/input/misc/rk805-pwrkey.c +++ b/drivers/input/misc/rk805-pwrkey.c @@ -106,6 +106,7 @@ static struct platform_driver rk805_pwrkey_driver = { }; module_platform_driver(rk805_pwrkey_driver); +MODULE_ALIAS("platform:rk805-pwrkey"); MODULE_AUTHOR("Joseph Chen "); MODULE_DESCRIPTION("RK805 PMIC Power Key driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index cb6ec59a045d..31c02c2019c1 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -18,6 +18,10 @@ #include #include +static bool use_low_level_irq; +module_param(use_low_level_irq, bool, 0444); +MODULE_PARM_DESC(use_low_level_irq, "Use low-level triggered IRQ instead of edge triggered"); + struct soc_button_info { const char *name; int acpi_index; @@ -73,6 +77,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"), }, }, + { + /* Acer Switch V 10 SW5-017, same issue as Acer Switch 10 SW5-012. */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "SW5-017"), + }, + }, { /* * Acer One S1003. _LID method messes with power-button GPIO @@ -85,13 +96,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = { }, { /* - * Lenovo Yoga Tab2 1051L, something messes with the home-button + * Lenovo Yoga Tab2 1051F/1051L, something messes with the home-button * IRQ settings, leading to a non working home-button. */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "60073"), - DMI_MATCH(DMI_PRODUCT_VERSION, "1051L"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1051"), }, }, {} /* Terminating entry */ @@ -164,7 +175,8 @@ soc_button_device_create(struct platform_device *pdev, } /* See dmi_use_low_level_irq[] comment */ - if (!autorepeat && dmi_check_system(dmi_use_low_level_irq)) { + if (!autorepeat && (use_low_level_irq || + dmi_check_system(dmi_use_low_level_irq))) { irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW); gpio_keys[n_buttons].irq = irq; gpio_keys[n_buttons].gpio = -ENOENT; diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c index fe43e5557ed7..cdcb7737c46a 100644 --- a/drivers/input/misc/sparcspkr.c +++ b/drivers/input/misc/sparcspkr.c @@ -205,6 +205,7 @@ static int bbc_beep_probe(struct platform_device *op) info = &state->u.bbc; info->clock_freq = of_getintprop_default(dp, "clock-frequency", 0); + of_node_put(dp); if (!info->clock_freq) goto out_free; diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 59a14505b9cd..ca150618d32f 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -942,17 +942,22 @@ static int bcm5974_probe(struct usb_interface *iface, if (!dev->tp_data) goto err_free_bt_buffer; - if (dev->bt_urb) + if (dev->bt_urb) { usb_fill_int_urb(dev->bt_urb, udev, usb_rcvintpipe(udev, cfg->bt_ep), dev->bt_data, dev->cfg.bt_datalen, bcm5974_irq_button, dev, 1); + dev->bt_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + } + usb_fill_int_urb(dev->tp_urb, udev, usb_rcvintpipe(udev, cfg->tp_ep), dev->tp_data, dev->cfg.tp_datalen, bcm5974_irq_trackpad, dev, 1); + dev->tp_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + /* create bcm5974 device */ usb_make_path(udev, dev->phys, sizeof(dev->phys)); strlcat(dev->phys, "/input0", sizeof(dev->phys)); diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 82577095e175..f1013b950d57 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -191,6 +191,7 @@ static const char * const smbus_pnp_ids[] = { "SYN3221", /* HP 15-ay000 */ "SYN323d", /* HP Spectre X360 13-w013dx */ "SYN3257", /* HP Envy 13-ad105ng */ + "SYN3286", /* HP Laptop 15-da3001TU */ NULL }; diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c index 2f9775de3c5b..70ea03a35c60 100644 --- a/drivers/input/serio/gscps2.c +++ b/drivers/input/serio/gscps2.c @@ -350,6 +350,10 @@ static int __init gscps2_probe(struct parisc_device *dev) ps2port->port = serio; ps2port->padev = dev; ps2port->addr = ioremap(hpa, GSC_STATUS + 4); + if (!ps2port->addr) { + ret = -ENOMEM; + goto fail_nomem; + } spin_lock_init(&ps2port->lock); gscps2_reset(ps2port); diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index a9f68f535b72..8648b4c46138 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -1543,8 +1543,6 @@ static int i8042_probe(struct platform_device *dev) { int error; - i8042_platform_device = dev; - if (i8042_reset == I8042_RESET_ALWAYS) { error = i8042_controller_selftest(); if (error) @@ -1582,7 +1580,6 @@ static int i8042_probe(struct platform_device *dev) i8042_free_aux_ports(); /* in case KBD failed but AUX not */ i8042_free_irqs(); i8042_controller_reset(false); - i8042_platform_device = NULL; return error; } @@ -1592,7 +1589,6 @@ static int i8042_remove(struct platform_device *dev) i8042_unregister_ports(); i8042_free_irqs(); i8042_controller_reset(false); - i8042_platform_device = NULL; return 0; } diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 8df402a1ed44..3c152e934cb8 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -3134,8 +3134,9 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id) if (error) return error; + /* Request the RESET line as asserted so we go into reset */ data->reset_gpio = devm_gpiod_get_optional(&client->dev, - "reset", GPIOD_OUT_LOW); + "reset", GPIOD_OUT_HIGH); if (IS_ERR(data->reset_gpio)) { error = PTR_ERR(data->reset_gpio); dev_err(&client->dev, "Failed to get reset gpio: %d\n", error); @@ -3153,8 +3154,9 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id) disable_irq(client->irq); if (data->reset_gpio) { + /* Wait a while and then de-assert the RESET GPIO line */ msleep(MXT_RESET_GPIO_TIME); - gpiod_set_value(data->reset_gpio, 1); + gpiod_set_value(data->reset_gpio, 0); msleep(MXT_RESET_INVALID_CHG); } diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 5fc789f717c8..b7f87ad4b9a9 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -154,6 +154,7 @@ static const struct goodix_chip_data gt9x_chip_data = { static const struct goodix_chip_id goodix_chip_ids[] = { { .id = "1151", .data = >1x_chip_data }, + { .id = "1158", .data = >1x_chip_data }, { .id = "5663", .data = >1x_chip_data }, { .id = "5688", .data = >1x_chip_data }, { .id = "917S", .data = >1x_chip_data }, @@ -1058,6 +1059,7 @@ static int goodix_configure_dev(struct goodix_ts_data *ts) input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0); input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); +retry_read_config: /* Read configuration and apply touchscreen parameters */ goodix_read_config(ts); @@ -1065,6 +1067,16 @@ static int goodix_configure_dev(struct goodix_ts_data *ts) touchscreen_parse_properties(ts->input_dev, true, &ts->prop); if (!ts->prop.max_x || !ts->prop.max_y || !ts->max_touch_num) { + if (!ts->reset_controller_at_probe && + ts->irq_pin_access_method != IRQ_PIN_ACCESS_NONE) { + dev_info(&ts->client->dev, "Config not set, resetting controller\n"); + /* Retry after a controller reset */ + ts->reset_controller_at_probe = true; + error = goodix_reset(ts); + if (error) + return error; + goto retry_read_config; + } dev_err(&ts->client->dev, "Invalid config (%d, %d, %d), using defaults\n", ts->prop.max_x, ts->prop.max_y, ts->max_touch_num); @@ -1385,6 +1397,7 @@ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match); #ifdef CONFIG_OF static const struct of_device_id goodix_of_match[] = { { .compatible = "goodix,gt1151" }, + { .compatible = "goodix,gt1158" }, { .compatible = "goodix,gt5663" }, { .compatible = "goodix,gt5688" }, { .compatible = "goodix,gt911" }, diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c index 30576a5f2f04..f437eefec94a 100644 --- a/drivers/input/touchscreen/ili210x.c +++ b/drivers/input/touchscreen/ili210x.c @@ -420,9 +420,9 @@ static int ili210x_i2c_probe(struct i2c_client *client, if (error) return error; - usleep_range(50, 100); + usleep_range(12000, 15000); gpiod_set_value_cansleep(reset_gpio, 0); - msleep(100); + msleep(160); } priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c index f67efdd040b2..43fcc8c84e2f 100644 --- a/drivers/input/touchscreen/melfas_mip4.c +++ b/drivers/input/touchscreen/melfas_mip4.c @@ -1453,7 +1453,7 @@ static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id) "ce", GPIOD_OUT_LOW); if (IS_ERR(ts->gpio_ce)) { error = PTR_ERR(ts->gpio_ce); - if (error != EPROBE_DEFER) + if (error != -EPROBE_DEFER) dev_err(&client->dev, "Failed to get gpio: %d\n", error); return error; diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c index 4d2d22a86977..bdb3e2c3ab79 100644 --- a/drivers/input/touchscreen/raydium_i2c_ts.c +++ b/drivers/input/touchscreen/raydium_i2c_ts.c @@ -210,12 +210,14 @@ static int raydium_i2c_send(struct i2c_client *client, error = raydium_i2c_xfer(client, addr, xfer, ARRAY_SIZE(xfer)); if (likely(!error)) - return 0; + goto out; msleep(RM_RETRY_DELAY_MS); } while (++tries < RM_MAX_RETRIES); dev_err(&client->dev, "%s failed: %d\n", __func__, error); +out: + kfree(tx_buf); return error; } diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c index 9a64e1dbc04a..a05a7a66b4ed 100644 --- a/drivers/input/touchscreen/stmfts.c +++ b/drivers/input/touchscreen/stmfts.c @@ -337,13 +337,15 @@ static int stmfts_input_open(struct input_dev *dev) struct stmfts_data *sdata = input_get_drvdata(dev); int err; - err = pm_runtime_get_sync(&sdata->client->dev); - if (err < 0) + err = pm_runtime_resume_and_get(&sdata->client->dev); + if (err) return err; err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON); - if (err) + if (err) { + pm_runtime_put_sync(&sdata->client->dev); return err; + } mutex_lock(&sdata->mutex); sdata->running = true; diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index 7887941730db..ceb6cdc20484 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -1084,9 +1084,14 @@ static int of_count_icc_providers(struct device_node *np) { struct device_node *child; int count = 0; + const struct of_device_id __maybe_unused ignore_list[] = { + { .compatible = "qcom,sc7180-ipa-virt" }, + {} + }; for_each_available_child_of_node(np, child) { - if (of_property_read_bool(child, "#interconnect-cells")) + if (of_property_read_bool(child, "#interconnect-cells") && + likely(!of_match_node(ignore_list, child))) count++; count += of_count_icc_providers(child); } diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c index e398ebf1dbba..36f870e7b596 100644 --- a/drivers/interconnect/imx/imx.c +++ b/drivers/interconnect/imx/imx.c @@ -226,16 +226,16 @@ int imx_icc_register(struct platform_device *pdev, struct device *dev = &pdev->dev; struct icc_onecell_data *data; struct icc_provider *provider; - int max_node_id; + int num_nodes; int ret; /* icc_onecell_data is indexed by node_id, unlike nodes param */ - max_node_id = get_max_node_id(nodes, nodes_count); - data = devm_kzalloc(dev, struct_size(data, nodes, max_node_id), + num_nodes = get_max_node_id(nodes, nodes_count) + 1; + data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes), GFP_KERNEL); if (!data) return -ENOMEM; - data->num_nodes = max_node_id; + data->num_nodes = num_nodes; provider = devm_kzalloc(dev, sizeof(*provider), GFP_KERNEL); if (!provider) diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c index f6fae64861ce..27cc5f03611c 100644 --- a/drivers/interconnect/qcom/icc-rpmh.c +++ b/drivers/interconnect/qcom/icc-rpmh.c @@ -20,13 +20,18 @@ void qcom_icc_pre_aggregate(struct icc_node *node) { size_t i; struct qcom_icc_node *qn; + struct qcom_icc_provider *qp; qn = node->data; + qp = to_qcom_provider(node->provider); for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) { qn->sum_avg[i] = 0; qn->max_peak[i] = 0; } + + for (i = 0; i < qn->num_bcms; i++) + qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]); } EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate); @@ -44,10 +49,8 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, { size_t i; struct qcom_icc_node *qn; - struct qcom_icc_provider *qp; qn = node->data; - qp = to_qcom_provider(node->provider); if (!tag) tag = QCOM_ICC_TAG_ALWAYS; @@ -67,9 +70,6 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, *agg_avg += avg_bw; *agg_peak = max_t(u32, *agg_peak, peak_bw); - for (i = 0; i < qn->num_bcms; i++) - qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]); - return 0; } EXPORT_SYMBOL_GPL(qcom_icc_aggregate); diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c index 8d9044ed18ab..597a7ee7a9bb 100644 --- a/drivers/interconnect/qcom/sc7180.c +++ b/drivers/interconnect/qcom/sc7180.c @@ -47,7 +47,6 @@ DEFINE_QNODE(qnm_mnoc_sf, SC7180_MASTER_MNOC_SF_MEM_NOC, 1, 32, SC7180_SLAVE_GEM DEFINE_QNODE(qnm_snoc_gc, SC7180_MASTER_SNOC_GC_MEM_NOC, 1, 8, SC7180_SLAVE_LLCC); DEFINE_QNODE(qnm_snoc_sf, SC7180_MASTER_SNOC_SF_MEM_NOC, 1, 16, SC7180_SLAVE_LLCC); DEFINE_QNODE(qxm_gpu, SC7180_MASTER_GFX3D, 2, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC); -DEFINE_QNODE(ipa_core_master, SC7180_MASTER_IPA_CORE, 1, 8, SC7180_SLAVE_IPA_CORE); DEFINE_QNODE(llcc_mc, SC7180_MASTER_LLCC, 2, 4, SC7180_SLAVE_EBI1); DEFINE_QNODE(qhm_mnoc_cfg, SC7180_MASTER_CNOC_MNOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_MNOC); DEFINE_QNODE(qxm_camnoc_hf0, SC7180_MASTER_CAMNOC_HF0, 2, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC); @@ -129,7 +128,6 @@ DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SC7180_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4); DEFINE_QNODE(qns_gem_noc_snoc, SC7180_SLAVE_GEM_NOC_SNOC, 1, 8, SC7180_MASTER_GEM_NOC_SNOC); DEFINE_QNODE(qns_llcc, SC7180_SLAVE_LLCC, 1, 16, SC7180_MASTER_LLCC); DEFINE_QNODE(srvc_gemnoc, SC7180_SLAVE_SERVICE_GEM_NOC, 1, 4); -DEFINE_QNODE(ipa_core_slave, SC7180_SLAVE_IPA_CORE, 1, 8); DEFINE_QNODE(ebi, SC7180_SLAVE_EBI1, 2, 4); DEFINE_QNODE(qns_mem_noc_hf, SC7180_SLAVE_MNOC_HF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_HF_MEM_NOC); DEFINE_QNODE(qns_mem_noc_sf, SC7180_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_SF_MEM_NOC); @@ -160,7 +158,6 @@ DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi); DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc); DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf); DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto); -DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave); DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aop, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_rt_throttle_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_npu_dma_throttle_cfg, &qhs_npu_dsp_throttle_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_tlmm_1, &qhs_tlmm_2, &qhs_tlmm_3, &qhs_ufs_mem_cfg, &qhs_usb3, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc); DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qhm_mnoc_cfg, &qxm_mdp0, &qxm_rot, &qxm_venus0, &qxm_venus_arm9); DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu); @@ -372,22 +369,6 @@ static struct qcom_icc_desc sc7180_gem_noc = { .num_bcms = ARRAY_SIZE(gem_noc_bcms), }; -static struct qcom_icc_bcm *ipa_virt_bcms[] = { - &bcm_ip0, -}; - -static struct qcom_icc_node *ipa_virt_nodes[] = { - [MASTER_IPA_CORE] = &ipa_core_master, - [SLAVE_IPA_CORE] = &ipa_core_slave, -}; - -static struct qcom_icc_desc sc7180_ipa_virt = { - .nodes = ipa_virt_nodes, - .num_nodes = ARRAY_SIZE(ipa_virt_nodes), - .bcms = ipa_virt_bcms, - .num_bcms = ARRAY_SIZE(ipa_virt_bcms), -}; - static struct qcom_icc_bcm *mc_virt_bcms[] = { &bcm_acv, &bcm_mc0, @@ -611,8 +592,6 @@ static const struct of_device_id qnoc_of_match[] = { .data = &sc7180_dc_noc}, { .compatible = "qcom,sc7180-gem-noc", .data = &sc7180_gem_noc}, - { .compatible = "qcom,sc7180-ipa-virt", - .data = &sc7180_ipa_virt}, { .compatible = "qcom,sc7180-mc-virt", .data = &sc7180_mc_virt}, { .compatible = "qcom,sc7180-mmss-noc", diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c index c76b2c7f9b10..b936196c229c 100644 --- a/drivers/interconnect/qcom/sm8150.c +++ b/drivers/interconnect/qcom/sm8150.c @@ -627,7 +627,6 @@ static struct platform_driver qnoc_driver = { .driver = { .name = "qnoc-sm8150", .of_match_table = qnoc_of_match, - .sync_state = icc_sync_state, }, }; module_platform_driver(qnoc_driver); diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c index cc558fec74e3..40820043c8d3 100644 --- a/drivers/interconnect/qcom/sm8250.c +++ b/drivers/interconnect/qcom/sm8250.c @@ -643,7 +643,6 @@ static struct platform_driver qnoc_driver = { .driver = { .name = "qnoc-sm8250", .of_match_table = qnoc_of_match, - .sync_state = icc_sync_state, }, }; module_platform_driver(qnoc_driver); diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 6eaefc9e7b3d..e988f6f198c5 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -84,7 +84,7 @@ #define ACPI_DEVFLAG_LINT1 0x80 #define ACPI_DEVFLAG_ATSDIS 0x10000000 -#define LOOP_TIMEOUT 100000 +#define LOOP_TIMEOUT 2000000 /* * ACPI table definitions * diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 200cf5da5e0a..f216a86d9c81 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -923,7 +923,8 @@ static void build_completion_wait(struct iommu_cmd *cmd, memset(cmd, 0, sizeof(*cmd)); cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; cmd->data[1] = upper_32_bits(paddr); - cmd->data[2] = data; + cmd->data[2] = lower_32_bits(data); + cmd->data[3] = upper_32_bits(data); CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); } diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 58f17312e68c..2b24dfabc68f 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1368,6 +1368,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) dev_info(smmu->dev, "\t0x%016llx\n", (unsigned long long)evt[i]); + cond_resched(); } /* @@ -3504,6 +3505,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev) /* Base address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; if (resource_size(res) < arm_smmu_resource_size(smmu)) { dev_err(dev, "MMIO region too small (%pr)\n", res); return -EINVAL; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 6c9a69305636..fa6caa2a12e5 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -2117,11 +2117,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev) if (err) return err; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ioaddr = res->start; - smmu->base = devm_ioremap_resource(dev, res); + smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(smmu->base)) return PTR_ERR(smmu->base); + ioaddr = res->start; /* * The resource size should effectively match the value of SMMU_TOP; * stash that temporarily until we know PAGESIZE to validate it with. diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index 7f280c8d5c53..38641110be00 100644 --- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -758,9 +758,12 @@ static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu) { struct device_node *child; - for_each_child_of_node(qcom_iommu->dev->of_node, child) - if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) + for_each_child_of_node(qcom_iommu->dev->of_node, child) { + if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) { + of_node_put(child); return true; + } + } return false; } diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index a8678429291f..7990f65db16d 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -50,6 +50,11 @@ struct iommu_dma_cookie { struct iommu_domain *fq_domain; }; +struct iommu_dma_cookie_ext { + struct iommu_dma_cookie cookie; + struct mutex mutex; +}; + static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) { if (cookie->type == IOMMU_DMA_IOVA_COOKIE) @@ -59,14 +64,15 @@ static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) { - struct iommu_dma_cookie *cookie; + struct iommu_dma_cookie_ext *cookie; cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); if (cookie) { - INIT_LIST_HEAD(&cookie->msi_page_list); - cookie->type = type; + INIT_LIST_HEAD(&cookie->cookie.msi_page_list); + cookie->cookie.type = type; + mutex_init(&cookie->mutex); } - return cookie; + return &cookie->cookie; } /** @@ -305,9 +311,11 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size, struct device *dev) { struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iommu_dma_cookie_ext *cookie_ext; unsigned long order, base_pfn; struct iova_domain *iovad; int attr; + int ret; if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) return -EINVAL; @@ -331,14 +339,18 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, } /* start_pfn is always nonzero for an already-initialised domain */ + cookie_ext = container_of(cookie, struct iommu_dma_cookie_ext, cookie); + mutex_lock(&cookie_ext->mutex); if (iovad->start_pfn) { if (1UL << order != iovad->granule || base_pfn != iovad->start_pfn) { pr_warn("Incompatible range for DMA domain\n"); - return -EFAULT; + ret = -EFAULT; + goto done_unlock; } - return 0; + ret = 0; + goto done_unlock; } init_iova_domain(iovad, 1UL << order, base_pfn); @@ -352,10 +364,16 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, cookie->fq_domain = domain; } - if (!dev) - return 0; + if (!dev) { + ret = 0; + goto done_unlock; + } - return iova_reserve_iommu_regions(dev, domain); + ret = iova_reserve_iommu_regions(dev, domain); + +done_unlock: + mutex_unlock(&cookie_ext->mutex); + return ret; } static int iommu_dma_deferred_attach(struct device *dev, diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index de324b4eedfe..0cdb5493a464 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -635,7 +635,7 @@ static int exynos_sysmmu_probe(struct platform_device *pdev) ret = iommu_device_register(&data->iommu); if (ret) - return ret; + goto err_iommu_register; platform_set_drvdata(pdev, data); @@ -662,6 +662,10 @@ static int exynos_sysmmu_probe(struct platform_device *pdev) pm_runtime_enable(dev); return 0; + +err_iommu_register: + iommu_device_sysfs_remove(&data->iommu); + return ret; } static int __maybe_unused exynos_sysmmu_suspend(struct device *dev) diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index b8d0b56a7575..a27765a7f6b7 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -385,7 +385,7 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb, static struct notifier_block dmar_pci_bus_nb = { .notifier_call = dmar_pci_bus_notifier, - .priority = INT_MIN, + .priority = 1, }; static struct dmar_drhd_unit * @@ -497,7 +497,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg) if (drhd->reg_base_addr == rhsa->base_address) { int node = pxm_to_node(rhsa->proximity_domain); - if (!node_online(node)) + if (node != NUMA_NO_NODE && !node_online(node)) node = NUMA_NO_NODE; drhd->iommu->node = node; return 0; @@ -816,6 +816,7 @@ int __init dmar_dev_scope_init(void) info = dmar_alloc_pci_notify_info(dev, BUS_NOTIFY_ADD_DEVICE); if (!info) { + pci_dev_put(dev); return dmar_dev_scope_status; } else { dmar_pci_bus_add_dev(info); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index b21c8224b1c8..47666c9b4ba1 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -560,14 +560,36 @@ static inline int domain_pfn_supported(struct dmar_domain *domain, return !(addr_width < BITS_PER_LONG && pfn >> addr_width); } +/* + * Calculate the Supported Adjusted Guest Address Widths of an IOMMU. + * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of + * the returned SAGAW. + */ +static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu) +{ + unsigned long fl_sagaw, sl_sagaw; + + fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0); + sl_sagaw = cap_sagaw(iommu->cap); + + /* Second level only. */ + if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) + return sl_sagaw; + + /* First level only. */ + if (!ecap_slts(iommu->ecap)) + return fl_sagaw; + + return fl_sagaw & sl_sagaw; +} + static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) { unsigned long sagaw; int agaw = -1; - sagaw = cap_sagaw(iommu->cap); - for (agaw = width_to_agaw(max_gaw); - agaw >= 0; agaw--) { + sagaw = __iommu_calculate_sagaw(iommu); + for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) { if (test_bit(agaw, &sagaw)) break; } @@ -1626,7 +1648,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, unsigned long pfn, unsigned int pages, int ih, int map) { - unsigned int mask = ilog2(__roundup_pow_of_two(pages)); + unsigned int aligned_pages = __roundup_pow_of_two(pages); + unsigned int mask = ilog2(aligned_pages); uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; u16 did = domain->iommu_did[iommu->seq_id]; @@ -1638,10 +1661,30 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, if (domain_use_first_level(domain)) { domain_flush_piotlb(iommu, domain, addr, pages, ih); } else { + unsigned long bitmask = aligned_pages - 1; + + /* + * PSI masks the low order bits of the base address. If the + * address isn't aligned to the mask, then compute a mask value + * needed to ensure the target range is flushed. + */ + if (unlikely(bitmask & pfn)) { + unsigned long end_pfn = pfn + pages - 1, shared_bits; + + /* + * Since end_pfn <= pfn + bitmask, the only way bits + * higher than bitmask can differ in pfn and end_pfn is + * by carrying. This means after masking out bitmask, + * high bits starting with the first set bit in + * shared_bits are all equal in both pfn and end_pfn. + */ + shared_bits = ~(pfn ^ end_pfn) & ~bitmask; + mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG; + } + /* * Fallback to domain selective flush if no PSI support or - * the size is too big. PSI requires page size to be 2 ^ x, - * and the base address is naturally aligned to the size. + * the size is too big. */ if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) @@ -2803,6 +2846,7 @@ static int __init si_domain_init(int hw) if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { domain_exit(si_domain); + si_domain = NULL; return -EFAULT; } @@ -3462,6 +3506,10 @@ free_iommu: disable_dmar_iommu(iommu); free_dmar_iommu(iommu); } + if (si_domain) { + domain_exit(si_domain); + si_domain = NULL; + } kfree(g_iommus); @@ -4845,8 +4893,10 @@ static inline bool has_external_pci(void) struct pci_dev *pdev = NULL; for_each_pci_dev(pdev) - if (pdev->external_facing) + if (pdev->external_facing) { + pci_dev_put(pdev); return true; + } return false; } @@ -6275,7 +6325,7 @@ static void quirk_igfx_skip_te_disable(struct pci_dev *dev) ver = (dev->device >> 8) & 0xff; if (ver != 0x45 && ver != 0x46 && ver != 0x4c && ver != 0x4e && ver != 0x8a && ver != 0x98 && - ver != 0x9a) + ver != 0x9a && ver != 0xa7) return; if (risky_device(dev)) diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index fb911b6c418f..86fd49ae7f61 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -669,7 +669,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, * Since it is a second level only translation setup, we should * set SRE bit as well (addresses are expected to be GPAs). */ - if (pasid != PASID_RID2PASID) + if (pasid != PASID_RID2PASID && ecap_srs(iommu->ecap)) pasid_set_sre(pte); pasid_set_present(pte); pasid_flush_caches(iommu, pte, pasid, did); @@ -704,7 +704,8 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, * We should set SRE bit as well since the addresses are expected * to be GPAs. */ - pasid_set_sre(pte); + if (ecap_srs(iommu->ecap)) + pasid_set_sre(pte); pasid_set_present(pte); pasid_flush_caches(iommu, pte, pasid, did); diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index c710cb413a28..9d05beba3e6c 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -280,7 +280,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, curr = __get_cached_rbnode(iovad, limit_pfn); curr_iova = rb_entry(curr, struct iova, node); - low_pfn_new = curr_iova->pfn_hi + 1; + low_pfn_new = curr_iova->pfn_hi; retry: do { @@ -294,7 +294,7 @@ retry: if (high_pfn < size || new_pfn < low_pfn) { if (low_pfn == iovad->start_pfn && low_pfn_new < limit_pfn) { high_pfn = limit_pfn; - low_pfn = low_pfn_new; + low_pfn = low_pfn_new + 1; curr = &iovad->anchor.node; curr_iova = rb_entry(curr, struct iova, node); goto retry; diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index f0ba6a09b434..4c2b5d76e73f 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -616,16 +616,19 @@ static void insert_iommu_master(struct device *dev, static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *spec) { - struct msm_iommu_dev *iommu; + struct msm_iommu_dev *iommu = NULL, *iter; unsigned long flags; int ret = 0; spin_lock_irqsave(&msm_iommu_lock, flags); - list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) - if (iommu->dev->of_node == spec->np) + list_for_each_entry(iter, &qcom_iommu_devices, dev_node) { + if (iter->dev->of_node == spec->np) { + iommu = iter; break; + } + } - if (!iommu || iommu->dev->of_node != spec->np) { + if (!iommu) { ret = -ENODEV; goto fail; } diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index e168a682806a..46f641481ac9 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -458,7 +458,7 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain) static int mtk_iommu_attach_device(struct iommu_domain *domain, struct device *dev) { - struct mtk_iommu_data *data = dev_iommu_priv_get(dev); + struct mtk_iommu_data *data = dev_iommu_priv_get(dev), *frstdata; struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct device *m4udev = data->dev; int ret, domid; @@ -468,20 +468,24 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain, return domid; if (!dom->data) { - if (mtk_iommu_domain_finalise(dom, data, domid)) + /* Data is in the frstdata in sharing pgtable case. */ + frstdata = mtk_iommu_get_m4u_data(); + + if (mtk_iommu_domain_finalise(dom, frstdata, domid)) return -ENODEV; dom->data = data; } + mutex_lock(&data->mutex); if (!data->m4u_dom) { /* Initialize the M4U HW */ ret = pm_runtime_resume_and_get(m4udev); if (ret < 0) - return ret; + goto err_unlock; ret = mtk_iommu_hw_init(data); if (ret) { pm_runtime_put(m4udev); - return ret; + goto err_unlock; } data->m4u_dom = dom; writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, @@ -489,9 +493,14 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain, pm_runtime_put(m4udev); } + mutex_unlock(&data->mutex); mtk_iommu_config(data, dev, true, domid); return 0; + +err_unlock: + mutex_unlock(&data->mutex); + return ret; } static void mtk_iommu_detach_device(struct iommu_domain *domain, @@ -603,6 +612,7 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev) if (domid < 0) return ERR_PTR(domid); + mutex_lock(&data->mutex); group = data->m4u_group[domid]; if (!group) { group = iommu_group_alloc(); @@ -611,6 +621,7 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev) } else { iommu_group_ref_get(group); } + mutex_unlock(&data->mutex); return group; } @@ -884,6 +895,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, data); + mutex_init(&data->mutex); ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, "mtk-iommu.%pa", &ioaddr); @@ -932,10 +944,8 @@ static int mtk_iommu_remove(struct platform_device *pdev) iommu_device_sysfs_remove(&data->iommu); iommu_device_unregister(&data->iommu); - if (iommu_present(&platform_bus_type)) - bus_set_iommu(&platform_bus_type, NULL); + list_del(&data->list); - clk_disable_unprepare(data->bclk); device_link_remove(data->smicomm_dev, &pdev->dev); pm_runtime_disable(&pdev->dev); devm_free_irq(&pdev->dev, data->irq, data); diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index f81fa8862ed0..f413546ac6e5 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -80,6 +80,8 @@ struct mtk_iommu_data { struct dma_iommu_mapping *mapping; /* For mtk_iommu_v1.c */ + struct mutex mutex; /* Protect m4u_group/m4u_dom above */ + struct list_head list; struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX]; }; diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index a99afb5d9011..259f65291d90 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -32,12 +32,12 @@ static inline bool is_omap_iommu_detached(struct omap_iommu *obj) ssize_t bytes; \ const char *str = "%20s: %08x\n"; \ const int maxcol = 32; \ - bytes = snprintf(p, maxcol, str, __stringify(name), \ + if (len < maxcol) \ + goto out; \ + bytes = scnprintf(p, maxcol, str, __stringify(name), \ iommu_read_reg(obj, MMU_##name)); \ p += bytes; \ len -= bytes; \ - if (len < maxcol) \ - goto out; \ } while (0) static ssize_t diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 71f29c0927fc..ff2c692c0db4 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1665,7 +1665,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev) num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus", sizeof(phandle)); if (num_iommus < 0) - return 0; + return ERR_PTR(-ENODEV); arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL); if (!arch_data) diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index c44e1f31a8bc..90d2e323278f 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -178,7 +178,7 @@ config MADERA_IRQ config IRQ_MIPS_CPU bool select GENERIC_IRQ_CHIP - select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING + select GENERIC_IRQ_IPI if SMP && SYS_SUPPORTS_MULTITHREADING select IRQ_DOMAIN select GENERIC_IRQ_EFFECTIVE_AFF_MASK @@ -313,7 +313,8 @@ config KEYSTONE_IRQ config MIPS_GIC bool - select GENERIC_IRQ_IPI + select GENERIC_IRQ_IPI if SMP + select IRQ_DOMAIN_HIERARCHY select MIPS_CM config INGENIC_IRQ diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 84f2741aaac6..c76fb70c70bb 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -308,7 +308,16 @@ static inline int armada_370_xp_msi_init(struct device_node *node, static void armada_xp_mpic_perf_init(void) { - unsigned long cpuid = cpu_logical_map(smp_processor_id()); + unsigned long cpuid; + + /* + * This Performance Counter Overflow interrupt is specific for + * Armada 370 and XP. It is not available on Armada 375, 38x and 39x. + */ + if (!of_machine_is_compatible("marvell,armada-370-xp")) + return; + + cpuid = cpu_logical_map(smp_processor_id()); /* Enable Performance Counter Overflow interrupts */ writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid), diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c index 8d591c179f81..3d3210828e9b 100644 --- a/drivers/irqchip/irq-aspeed-i2c-ic.c +++ b/drivers/irqchip/irq-aspeed-i2c-ic.c @@ -79,8 +79,8 @@ static int __init aspeed_i2c_ic_of_init(struct device_node *node, } i2c_ic->parent_irq = irq_of_parse_and_map(node, 0); - if (i2c_ic->parent_irq < 0) { - ret = i2c_ic->parent_irq; + if (!i2c_ic->parent_irq) { + ret = -EINVAL; goto err_iounmap; } diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c index 0f0aac7cc114..7cb13364ecfa 100644 --- a/drivers/irqchip/irq-aspeed-scu-ic.c +++ b/drivers/irqchip/irq-aspeed-scu-ic.c @@ -159,8 +159,8 @@ static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic, } irq = irq_of_parse_and_map(node, 0); - if (irq < 0) { - rc = irq; + if (!irq) { + rc = -EINVAL; goto err; } diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c index b4c1924f0255..38fab02ffe9d 100644 --- a/drivers/irqchip/irq-gic-realview.c +++ b/drivers/irqchip/irq-gic-realview.c @@ -57,6 +57,7 @@ realview_gic_of_init(struct device_node *node, struct device_node *parent) /* The PB11MPCore GIC needs to be configured in the syscon */ map = syscon_node_to_regmap(np); + of_node_put(np); if (!IS_ERR(map)) { /* new irq mode with no DCC */ regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 88debe2abc29..8ccff68c76d7 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1616,7 +1616,7 @@ static int its_select_cpu(struct irq_data *d, cpu = cpumask_pick_least_loaded(d, tmpmask); } else { - cpumask_and(tmpmask, irq_data_get_affinity_mask(d), cpu_online_mask); + cpumask_copy(tmpmask, aff_mask); /* If we cannot cross sockets, limit the search to that node */ if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && @@ -3021,18 +3021,12 @@ static int __init allocate_lpi_tables(void) return 0; } -static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) +static u64 read_vpend_dirty_clear(void __iomem *vlpi_base) { u32 count = 1000000; /* 1s! */ bool clean; u64 val; - val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); - val &= ~GICR_VPENDBASER_Valid; - val &= ~clr; - val |= set; - gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); - do { val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); clean = !(val & GICR_VPENDBASER_Dirty); @@ -3043,10 +3037,26 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) } } while (!clean && count); - if (unlikely(val & GICR_VPENDBASER_Dirty)) { + if (unlikely(!clean)) pr_err_ratelimited("ITS virtual pending table not cleaning\n"); + + return val; +} + +static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) +{ + u64 val; + + /* Make sure we wait until the RD is done with the initial scan */ + val = read_vpend_dirty_clear(vlpi_base); + val &= ~GICR_VPENDBASER_Valid; + val &= ~clr; + val |= set; + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + val = read_vpend_dirty_clear(vlpi_base); + if (unlikely(val & GICR_VPENDBASER_Dirty)) val |= GICR_VPENDBASER_PendingLast; - } return val; } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 5b09e93f4ac5..6db1dbc6a736 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -198,11 +198,11 @@ static inline void __iomem *gic_dist_base(struct irq_data *d) } } -static void gic_do_wait_for_rwp(void __iomem *base) +static void gic_do_wait_for_rwp(void __iomem *base, u32 bit) { u32 count = 1000000; /* 1s! */ - while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { + while (readl_relaxed(base + GICD_CTLR) & bit) { count--; if (!count) { pr_err_ratelimited("RWP timeout, gone fishing\n"); @@ -216,13 +216,13 @@ static void gic_do_wait_for_rwp(void __iomem *base) /* Wait for completion of a distributor change */ static void gic_dist_wait_for_rwp(void) { - gic_do_wait_for_rwp(gic_data.dist_base); + gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP); } /* Wait for completion of a redistributor change */ static void gic_redist_wait_for_rwp(void) { - gic_do_wait_for_rwp(gic_data_rdist_rd_base()); + gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP); } #ifdef CONFIG_ARM64 @@ -1483,6 +1483,12 @@ static int gic_irq_domain_translate(struct irq_domain *d, if(fwspec->param_count != 2) return -EINVAL; + if (fwspec->param[0] < 16) { + pr_err(FW_BUG "Illegal GSI%d translation request\n", + fwspec->param[0]); + return -EINVAL; + } + *hwirq = fwspec->param[0]; *type = fwspec->param[1]; @@ -1842,7 +1848,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); if (!gic_data.ppi_descs) - return; + goto out_put_node; nr_parts = of_get_child_count(parts_node); @@ -1883,12 +1889,15 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) continue; cpu = of_cpu_node_to_id(cpu_node); - if (WARN_ON(cpu < 0)) + if (WARN_ON(cpu < 0)) { + of_node_put(cpu_node); continue; + } pr_cont("%pOF[%d] ", cpu_node, cpu); cpumask_set_cpu(cpu, &part->mask); + of_node_put(cpu_node); } pr_cont("}\n"); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index c5a247291d19..ac027de9e5c4 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1147,6 +1147,12 @@ static int gic_irq_domain_translate(struct irq_domain *d, if(fwspec->param_count != 2) return -EINVAL; + if (fwspec->param[0] < 16) { + pr_err(FW_BUG "Illegal GSI%d translation request\n", + fwspec->param[0]); + return -EINVAL; + } + *hwirq = fwspec->param[0]; *type = fwspec->param[1]; diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 215885962bb0..8ada91bdbe4d 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -50,13 +50,15 @@ static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); static DEFINE_SPINLOCK(gic_lock); static struct irq_domain *gic_irq_domain; -static struct irq_domain *gic_ipi_domain; static int gic_shared_intrs; static unsigned int gic_cpu_pin; static unsigned int timer_cpu_pin; static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; + +#ifdef CONFIG_GENERIC_IRQ_IPI static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); +#endif /* CONFIG_GENERIC_IRQ_IPI */ static struct gic_all_vpes_chip_data { u32 map; @@ -459,9 +461,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, u32 map; if (hwirq >= GIC_SHARED_HWIRQ_BASE) { +#ifdef CONFIG_GENERIC_IRQ_IPI /* verify that shared irqs don't conflict with an IPI irq */ if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv)) return -EBUSY; +#endif /* CONFIG_GENERIC_IRQ_IPI */ err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, &gic_level_irq_controller, @@ -550,6 +554,8 @@ static const struct irq_domain_ops gic_irq_domain_ops = { .map = gic_irq_domain_map, }; +#ifdef CONFIG_GENERIC_IRQ_IPI + static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, @@ -653,6 +659,48 @@ static const struct irq_domain_ops gic_ipi_domain_ops = { .match = gic_ipi_domain_match, }; +static int gic_register_ipi_domain(struct device_node *node) +{ + struct irq_domain *gic_ipi_domain; + unsigned int v[2], num_ipis; + + gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, + IRQ_DOMAIN_FLAG_IPI_PER_CPU, + GIC_NUM_LOCAL_INTRS + gic_shared_intrs, + node, &gic_ipi_domain_ops, NULL); + if (!gic_ipi_domain) { + pr_err("Failed to add IPI domain"); + return -ENXIO; + } + + irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); + + if (node && + !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { + bitmap_set(ipi_resrv, v[0], v[1]); + } else { + /* + * Reserve 2 interrupts per possible CPU/VP for use as IPIs, + * meeting the requirements of arch/mips SMP. + */ + num_ipis = 2 * num_possible_cpus(); + bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); + } + + bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); + + return 0; +} + +#else /* !CONFIG_GENERIC_IRQ_IPI */ + +static inline int gic_register_ipi_domain(struct device_node *node) +{ + return 0; +} + +#endif /* !CONFIG_GENERIC_IRQ_IPI */ + static int gic_cpu_startup(unsigned int cpu) { /* Enable or disable EIC */ @@ -671,11 +719,12 @@ static int gic_cpu_startup(unsigned int cpu) static int __init gic_of_init(struct device_node *node, struct device_node *parent) { - unsigned int cpu_vec, i, gicconfig, v[2], num_ipis; + unsigned int cpu_vec, i, gicconfig; unsigned long reserved; phys_addr_t gic_base; struct resource res; size_t gic_len; + int ret; /* Find the first available CPU vector. */ i = 0; @@ -717,6 +766,10 @@ static int __init gic_of_init(struct device_node *node, } mips_gic_base = ioremap(gic_base, gic_len); + if (!mips_gic_base) { + pr_err("Failed to ioremap gic_base\n"); + return -ENOMEM; + } gicconfig = read_gic_config(); gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; @@ -764,30 +817,9 @@ static int __init gic_of_init(struct device_node *node, return -ENXIO; } - gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, - IRQ_DOMAIN_FLAG_IPI_PER_CPU, - GIC_NUM_LOCAL_INTRS + gic_shared_intrs, - node, &gic_ipi_domain_ops, NULL); - if (!gic_ipi_domain) { - pr_err("Failed to add IPI domain"); - return -ENXIO; - } - - irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); - - if (node && - !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { - bitmap_set(ipi_resrv, v[0], v[1]); - } else { - /* - * Reserve 2 interrupts per possible CPU/VP for use as IPIs, - * meeting the requirements of arch/mips SMP. - */ - num_ipis = 2 * num_possible_cpus(); - bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); - } - - bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); + ret = gic_register_ipi_domain(node); + if (ret) + return ret; board_bind_eic_interrupt = &gic_bind_eic_interrupt; diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c index 03d2366118dd..d5f1fabc45d7 100644 --- a/drivers/irqchip/irq-or1k-pic.c +++ b/drivers/irqchip/irq-or1k-pic.c @@ -66,7 +66,6 @@ static struct or1k_pic_dev or1k_pic_level = { .name = "or1k-PIC-level", .irq_unmask = or1k_pic_unmask, .irq_mask = or1k_pic_mask, - .irq_mask_ack = or1k_pic_mask_ack, }, .handle = handle_level_irq, .flags = IRQ_LEVEL | IRQ_NOPROBE, diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c index abd011fcecf4..c7db617e1a2f 100644 --- a/drivers/irqchip/irq-sni-exiu.c +++ b/drivers/irqchip/irq-sni-exiu.c @@ -37,11 +37,26 @@ struct exiu_irq_data { u32 spi_base; }; -static void exiu_irq_eoi(struct irq_data *d) +static void exiu_irq_ack(struct irq_data *d) { struct exiu_irq_data *data = irq_data_get_irq_chip_data(d); writel(BIT(d->hwirq), data->base + EIREQCLR); +} + +static void exiu_irq_eoi(struct irq_data *d) +{ + struct exiu_irq_data *data = irq_data_get_irq_chip_data(d); + + /* + * Level triggered interrupts are latched and must be cleared during + * EOI or the interrupt will be jammed on. Of course if a level + * triggered interrupt is still asserted then the write will not clear + * the interrupt. + */ + if (irqd_is_level_type(d)) + writel(BIT(d->hwirq), data->base + EIREQCLR); + irq_chip_eoi_parent(d); } @@ -91,10 +106,13 @@ static int exiu_irq_set_type(struct irq_data *d, unsigned int type) writel_relaxed(val, data->base + EILVL); val = readl_relaxed(data->base + EIEDG); - if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH) + if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH) { val &= ~BIT(d->hwirq); - else + irq_set_handler_locked(d, handle_fasteoi_irq); + } else { val |= BIT(d->hwirq); + irq_set_handler_locked(d, handle_fasteoi_ack_irq); + } writel_relaxed(val, data->base + EIEDG); writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR); @@ -104,6 +122,7 @@ static int exiu_irq_set_type(struct irq_data *d, unsigned int type) static struct irq_chip exiu_irq_chip = { .name = "EXIU", + .irq_ack = exiu_irq_ack, .irq_eoi = exiu_irq_eoi, .irq_enable = exiu_irq_enable, .irq_mask = exiu_irq_mask, diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c index e1f771c72fc4..ad3e2c1b3c87 100644 --- a/drivers/irqchip/irq-tegra.c +++ b/drivers/irqchip/irq-tegra.c @@ -148,10 +148,10 @@ static int tegra_ictlr_suspend(void) lic->cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS); /* Disable COP interrupts */ - writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR); + writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR); /* Disable CPU interrupts */ - writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR); + writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR); /* Enable the wakeup sources of ictlr */ writel_relaxed(lic->ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET); @@ -172,12 +172,12 @@ static void tegra_ictlr_resume(void) writel_relaxed(lic->cpu_iep[i], ictlr + ICTLR_CPU_IEP_CLASS); - writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR); + writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_CPU_IER_CLR); writel_relaxed(lic->cpu_ier[i], ictlr + ICTLR_CPU_IER_SET); writel_relaxed(lic->cop_iep[i], ictlr + ICTLR_COP_IEP_CLASS); - writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR); + writel_relaxed(GENMASK(31, 0), ictlr + ICTLR_COP_IER_CLR); writel_relaxed(lic->cop_ier[i], ictlr + ICTLR_COP_IER_SET); } @@ -312,7 +312,7 @@ static int __init tegra_ictlr_init(struct device_node *node, lic->base[i] = base; /* Disable all interrupts */ - writel_relaxed(~0UL, base + ICTLR_CPU_IER_CLR); + writel_relaxed(GENMASK(31, 0), base + ICTLR_CPU_IER_CLR); /* All interrupts target IRQ */ writel_relaxed(0, base + ICTLR_CPU_IEP_CLASS); diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c index 27933338f7b3..8c581c985aa7 100644 --- a/drivers/irqchip/irq-xtensa-mx.c +++ b/drivers/irqchip/irq-xtensa-mx.c @@ -151,14 +151,25 @@ static struct irq_chip xtensa_mx_irq_chip = { .irq_set_affinity = xtensa_mx_irq_set_affinity, }; +static void __init xtensa_mx_init_common(struct irq_domain *root_domain) +{ + unsigned int i; + + irq_set_default_host(root_domain); + secondary_init_irq(); + + /* Initialize default IRQ routing to CPU 0 */ + for (i = 0; i < XCHAL_NUM_EXTINTERRUPTS; ++i) + set_er(1, MIROUT(i)); +} + int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) { struct irq_domain *root_domain = irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, &xtensa_mx_irq_domain_ops, &xtensa_mx_irq_chip); - irq_set_default_host(root_domain); - secondary_init_irq(); + xtensa_mx_init_common(root_domain); return 0; } @@ -168,8 +179,7 @@ static int __init xtensa_mx_init(struct device_node *np, struct irq_domain *root_domain = irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops, &xtensa_mx_irq_chip); - irq_set_default_host(root_domain); - secondary_init_irq(); + xtensa_mx_init_common(root_domain); return 0; } IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init); diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c index a52f275f8263..f8447135a902 100644 --- a/drivers/isdn/hardware/mISDN/netjet.c +++ b/drivers/isdn/hardware/mISDN/netjet.c @@ -956,7 +956,7 @@ nj_release(struct tiger_hw *card) } if (card->irq > 0) free_irq(card->irq, card); - if (card->isac.dch.dev.dev.class) + if (device_is_registered(&card->isac.dch.dev.dev)) mISDN_unregister_device(&card->isac.dch.dev); for (i = 0; i < 2; i++) { diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c index a41b4b264594..90ee56d07a6e 100644 --- a/drivers/isdn/mISDN/core.c +++ b/drivers/isdn/mISDN/core.c @@ -222,7 +222,7 @@ mISDN_register_device(struct mISDNdevice *dev, err = get_free_devid(); if (err < 0) - goto error1; + return err; dev->id = err; device_initialize(&dev->dev); @@ -233,11 +233,12 @@ mISDN_register_device(struct mISDNdevice *dev, if (debug & DEBUG_CORE) printk(KERN_DEBUG "mISDN_register %s %d\n", dev_name(&dev->dev), dev->id); + dev->dev.class = &mISDN_class; + err = create_stack(dev); if (err) goto error1; - dev->dev.class = &mISDN_class; dev->dev.platform_data = dev; dev->dev.parent = parent; dev_set_drvdata(&dev->dev, dev); @@ -249,8 +250,8 @@ mISDN_register_device(struct mISDNdevice *dev, error3: delete_stack(dev); - return err; error1: + put_device(&dev->dev); return err; } diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c index c3b2c99b5cd5..cfbcd9e973c2 100644 --- a/drivers/isdn/mISDN/dsp_pipeline.c +++ b/drivers/isdn/mISDN/dsp_pipeline.c @@ -77,6 +77,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem) if (!entry) return -ENOMEM; + INIT_LIST_HEAD(&entry->list); entry->elem = elem; entry->dev.class = elements_class; @@ -107,7 +108,7 @@ err2: device_unregister(&entry->dev); return ret; err1: - kfree(entry); + put_device(&entry->dev); return ret; } EXPORT_SYMBOL(mISDN_dsp_element_register); diff --git a/drivers/isdn/mISDN/l1oip.h b/drivers/isdn/mISDN/l1oip.h index 7ea10db20e3a..48133d022812 100644 --- a/drivers/isdn/mISDN/l1oip.h +++ b/drivers/isdn/mISDN/l1oip.h @@ -59,6 +59,7 @@ struct l1oip { int bundle; /* bundle channels in one frm */ int codec; /* codec to use for transmis. */ int limit; /* limit number of bchannels */ + bool shutdown; /* if card is released */ /* timer */ struct timer_list keep_tl; diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index b57dcb834594..aec4f2a69c3b 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -275,7 +275,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask, p = frame; /* restart timer */ - if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ)) + if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ) && !hc->shutdown) mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ); else hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ; @@ -601,7 +601,9 @@ multiframe: goto multiframe; /* restart timer */ - if (time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || !hc->timeout_on) { + if ((time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || + !hc->timeout_on) && + !hc->shutdown) { hc->timeout_on = 1; mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ); } else /* only adjust timer */ @@ -1232,11 +1234,10 @@ release_card(struct l1oip *hc) { int ch; - if (timer_pending(&hc->keep_tl)) - del_timer(&hc->keep_tl); + hc->shutdown = true; - if (timer_pending(&hc->timeout_tl)) - del_timer(&hc->timeout_tl); + del_timer_sync(&hc->keep_tl); + del_timer_sync(&hc->timeout_tl); cancel_work_sync(&hc->workq); diff --git a/drivers/leds/leds-lm3601x.c b/drivers/leds/leds-lm3601x.c index d0e1d4814042..3d1272748201 100644 --- a/drivers/leds/leds-lm3601x.c +++ b/drivers/leds/leds-lm3601x.c @@ -444,8 +444,6 @@ static int lm3601x_remove(struct i2c_client *client) { struct lm3601x_led *led = i2c_get_clientdata(client); - mutex_destroy(&led->lock); - return regmap_update_bits(led->regmap, LM3601X_ENABLE_REG, LM3601X_ENABLE_MASK, LM3601X_MODE_STANDBY); diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig index 4c2ce210c123..95b09148167e 100644 --- a/drivers/lightnvm/Kconfig +++ b/drivers/lightnvm/Kconfig @@ -5,7 +5,7 @@ menuconfig NVM bool "Open-Channel SSD target support" - depends on BLOCK + depends on BLOCK && BROKEN help Say Y here to get to enable Open-channel SSDs. diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig index 5cdc361da37c..539a2ed4e13d 100644 --- a/drivers/macintosh/Kconfig +++ b/drivers/macintosh/Kconfig @@ -44,6 +44,7 @@ config ADB_IOP config ADB_CUDA bool "Support for Cuda/Egret based Macs and PowerMacs" depends on (ADB || PPC_PMAC) && !PPC_PMAC64 + select RTC_LIB help This provides support for Cuda/Egret based Macintosh and Power Macintosh systems. This includes most m68k based Macs, @@ -57,6 +58,7 @@ config ADB_CUDA config ADB_PMU bool "Support for PMU based PowerMacs and PowerBooks" depends on PPC_PMAC || MAC + select RTC_LIB help On PowerBooks, iBooks, and recent iMacs and Power Macintoshes, the PMU is an embedded microprocessor whose primary function is to @@ -67,6 +69,10 @@ config ADB_PMU this device; you should do so if your machine is one of those mentioned above. +config ADB_PMU_EVENT + def_bool y + depends on ADB_PMU && INPUT=y + config ADB_PMU_LED bool "Support for the Power/iBook front LED" depends on PPC_PMAC && ADB_PMU diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile index 49819b1b6f20..712edcb3e0b0 100644 --- a/drivers/macintosh/Makefile +++ b/drivers/macintosh/Makefile @@ -12,7 +12,8 @@ obj-$(CONFIG_MAC_EMUMOUSEBTN) += mac_hid.o obj-$(CONFIG_INPUT_ADBHID) += adbhid.o obj-$(CONFIG_ANSLCD) += ans-lcd.o -obj-$(CONFIG_ADB_PMU) += via-pmu.o via-pmu-event.o +obj-$(CONFIG_ADB_PMU) += via-pmu.o +obj-$(CONFIG_ADB_PMU_EVENT) += via-pmu-event.o obj-$(CONFIG_ADB_PMU_LED) += via-pmu-led.o obj-$(CONFIG_PMAC_BACKLIGHT) += via-pmu-backlight.o obj-$(CONFIG_ADB_CUDA) += via-cuda.o diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index 73b396189039..afb0942ccc29 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c @@ -647,7 +647,7 @@ do_adb_query(struct adb_request *req) switch(req->data[1]) { case ADB_QUERY_GETDEVINFO: - if (req->nbytes < 3) + if (req->nbytes < 3 || req->data[2] >= 16) break; mutex_lock(&adb_handler_mutex); req->reply[0] = adb_handler[req->data[2]].original_address; diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 73e6ae88fafd..aae6328b2429 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -1460,7 +1460,7 @@ next: pmu_pass_intr(data, len); /* len == 6 is probably a bad check. But how do I * know what PMU versions send what events here? */ - if (len == 6) { + if (IS_ENABLED(CONFIG_ADB_PMU_EVENT) && len == 6) { via_pmu_event(PMU_EVT_POWER, !!(data[1]&8)); via_pmu_event(PMU_EVT_LID, data[1]&1); } diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index bee33abb5308..e913ed1e34c6 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c @@ -632,15 +632,15 @@ static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg) rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src), DMA_TO_DEVICE); - if (rc < 0) - return rc; + if (!rc) + return -EIO; rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst), DMA_FROM_DEVICE); - if (rc < 0) { + if (!rc) { dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src), DMA_TO_DEVICE); - return rc; + return -EIO; } return 0; diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 418914373a51..b47c00dea0f2 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -2006,8 +2006,7 @@ int bch_btree_check(struct cache_set *c) int i; struct bkey *k = NULL; struct btree_iter iter; - struct btree_check_state *check_state; - char name[32]; + struct btree_check_state check_state; /* check and mark root node keys */ for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) @@ -2018,63 +2017,59 @@ int bch_btree_check(struct cache_set *c) if (c->root->level == 0) return 0; - check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL); - if (!check_state) - return -ENOMEM; - - check_state->c = c; - check_state->total_threads = bch_btree_chkthread_nr(); - check_state->key_idx = 0; - spin_lock_init(&check_state->idx_lock); - atomic_set(&check_state->started, 0); - atomic_set(&check_state->enough, 0); - init_waitqueue_head(&check_state->wait); + memset(&check_state, 0, sizeof(struct btree_check_state)); + check_state.c = c; + check_state.total_threads = bch_btree_chkthread_nr(); + check_state.key_idx = 0; + spin_lock_init(&check_state.idx_lock); + atomic_set(&check_state.started, 0); + atomic_set(&check_state.enough, 0); + init_waitqueue_head(&check_state.wait); + rw_lock(0, c->root, c->root->level); /* * Run multiple threads to check btree nodes in parallel, - * if check_state->enough is non-zero, it means current + * if check_state.enough is non-zero, it means current * running check threads are enough, unncessary to create * more. */ - for (i = 0; i < check_state->total_threads; i++) { - /* fetch latest check_state->enough earlier */ + for (i = 0; i < check_state.total_threads; i++) { + /* fetch latest check_state.enough earlier */ smp_mb__before_atomic(); - if (atomic_read(&check_state->enough)) + if (atomic_read(&check_state.enough)) break; - check_state->infos[i].result = 0; - check_state->infos[i].state = check_state; - snprintf(name, sizeof(name), "bch_btrchk[%u]", i); - atomic_inc(&check_state->started); + check_state.infos[i].result = 0; + check_state.infos[i].state = &check_state; - check_state->infos[i].thread = + check_state.infos[i].thread = kthread_run(bch_btree_check_thread, - &check_state->infos[i], - name); - if (IS_ERR(check_state->infos[i].thread)) { + &check_state.infos[i], + "bch_btrchk[%d]", i); + if (IS_ERR(check_state.infos[i].thread)) { pr_err("fails to run thread bch_btrchk[%d]\n", i); for (--i; i >= 0; i--) - kthread_stop(check_state->infos[i].thread); + kthread_stop(check_state.infos[i].thread); ret = -ENOMEM; goto out; } + atomic_inc(&check_state.started); } /* * Must wait for all threads to stop. */ - wait_event_interruptible(check_state->wait, - atomic_read(&check_state->started) == 0); + wait_event(check_state.wait, atomic_read(&check_state.started) == 0); - for (i = 0; i < check_state->total_threads; i++) { - if (check_state->infos[i].result) { - ret = check_state->infos[i].result; + for (i = 0; i < check_state.total_threads; i++) { + if (check_state.infos[i].result) { + ret = check_state.infos[i].result; goto out; } } out: - kfree(check_state); + rw_unlock(0, c->root); return ret; } diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 50482107134f..1b5fdbc0d83e 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -226,7 +226,7 @@ struct btree_check_info { int result; }; -#define BCH_BTR_CHKTHREAD_MAX 64 +#define BCH_BTR_CHKTHREAD_MAX 12 struct btree_check_state { struct cache_set *c; int total_threads; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index c6613e817333..aea4833f67fc 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -407,6 +407,11 @@ err: return ret; } +void bch_journal_space_reserve(struct journal *j) +{ + j->do_reserve = true; +} + /* Journalling */ static void btree_flush_write(struct cache_set *c) @@ -625,12 +630,30 @@ static void do_journal_discard(struct cache *ca) } } +static unsigned int free_journal_buckets(struct cache_set *c) +{ + struct journal *j = &c->journal; + struct cache *ca = c->cache; + struct journal_device *ja = &c->cache->journal; + unsigned int n; + + /* In case njournal_buckets is not power of 2 */ + if (ja->cur_idx >= ja->discard_idx) + n = ca->sb.njournal_buckets + ja->discard_idx - ja->cur_idx; + else + n = ja->discard_idx - ja->cur_idx; + + if (n > (1 + j->do_reserve)) + return n - (1 + j->do_reserve); + + return 0; +} + static void journal_reclaim(struct cache_set *c) { struct bkey *k = &c->journal.key; struct cache *ca = c->cache; uint64_t last_seq; - unsigned int next; struct journal_device *ja = &ca->journal; atomic_t p __maybe_unused; @@ -653,12 +676,10 @@ static void journal_reclaim(struct cache_set *c) if (c->journal.blocks_free) goto out; - next = (ja->cur_idx + 1) % ca->sb.njournal_buckets; - /* No space available on this device */ - if (next == ja->discard_idx) + if (!free_journal_buckets(c)) goto out; - ja->cur_idx = next; + ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets; k->ptr[0] = MAKE_PTR(0, bucket_to_sector(c, ca->sb.d[ja->cur_idx]), ca->sb.nr_this_dev); diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h index f2ea34d5f431..cd316b4a1e95 100644 --- a/drivers/md/bcache/journal.h +++ b/drivers/md/bcache/journal.h @@ -105,6 +105,7 @@ struct journal { spinlock_t lock; spinlock_t flush_write_lock; bool btree_flushing; + bool do_reserve; /* used when waiting because the journal was full */ struct closure_waitlist wait; struct closure io; @@ -182,5 +183,6 @@ int bch_journal_replay(struct cache_set *c, struct list_head *list); void bch_journal_free(struct cache_set *c); int bch_journal_alloc(struct cache_set *c); +void bch_journal_space_reserve(struct journal *j); #endif /* _BCACHE_JOURNAL_H */ diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 214326383145..97895262fc54 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1109,6 +1109,12 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio) * which would call closure_get(&dc->disk.cl) */ ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO); + if (!ddip) { + bio->bi_status = BLK_STS_RESOURCE; + bio->bi_end_io(bio); + return; + } + ddip->d = d; /* Count on the bcache device */ ddip->start_time = part_start_io_acct(d->disk, &ddip->part, bio); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 81f1cc5b3499..7f5ea2509643 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -2150,6 +2150,7 @@ static int run_cache_set(struct cache_set *c) flash_devs_run(c); + bch_journal_space_reserve(&c->journal); set_bit(CACHE_SET_RUNNING, &c->flags); return 0; err: diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 952253f24175..3aa73da2c67b 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -119,6 +119,53 @@ static void __update_writeback_rate(struct cached_dev *dc) dc->writeback_rate_target = target; } +static bool idle_counter_exceeded(struct cache_set *c) +{ + int counter, dev_nr; + + /* + * If c->idle_counter is overflow (idel for really long time), + * reset as 0 and not set maximum rate this time for code + * simplicity. + */ + counter = atomic_inc_return(&c->idle_counter); + if (counter <= 0) { + atomic_set(&c->idle_counter, 0); + return false; + } + + dev_nr = atomic_read(&c->attached_dev_nr); + if (dev_nr == 0) + return false; + + /* + * c->idle_counter is increased by writeback thread of all + * attached backing devices, in order to represent a rough + * time period, counter should be divided by dev_nr. + * Otherwise the idle time cannot be larger with more backing + * device attached. + * The following calculation equals to checking + * (counter / dev_nr) < (dev_nr * 6) + */ + if (counter < (dev_nr * dev_nr * 6)) + return false; + + return true; +} + +/* + * Idle_counter is increased every time when update_writeback_rate() is + * called. If all backing devices attached to the same cache set have + * identical dc->writeback_rate_update_seconds values, it is about 6 + * rounds of update_writeback_rate() on each backing device before + * c->at_max_writeback_rate is set to 1, and then max wrteback rate set + * to each dc->writeback_rate.rate. + * In order to avoid extra locking cost for counting exact dirty cached + * devices number, c->attached_dev_nr is used to calculate the idle + * throushold. It might be bigger if not all cached device are in write- + * back mode, but it still works well with limited extra rounds of + * update_writeback_rate(). + */ static bool set_at_max_writeback_rate(struct cache_set *c, struct cached_dev *dc) { @@ -129,21 +176,8 @@ static bool set_at_max_writeback_rate(struct cache_set *c, /* Don't set max writeback rate if gc is running */ if (!c->gc_mark_valid) return false; - /* - * Idle_counter is increased everytime when update_writeback_rate() is - * called. If all backing devices attached to the same cache set have - * identical dc->writeback_rate_update_seconds values, it is about 6 - * rounds of update_writeback_rate() on each backing device before - * c->at_max_writeback_rate is set to 1, and then max wrteback rate set - * to each dc->writeback_rate.rate. - * In order to avoid extra locking cost for counting exact dirty cached - * devices number, c->attached_dev_nr is used to calculate the idle - * throushold. It might be bigger if not all cached device are in write- - * back mode, but it still works well with limited extra rounds of - * update_writeback_rate(). - */ - if (atomic_inc_return(&c->idle_counter) < - atomic_read(&c->attached_dev_nr) * 6) + + if (!idle_counter_exceeded(c)) return false; if (atomic_read(&c->at_max_writeback_rate) != 1) @@ -157,13 +191,10 @@ static bool set_at_max_writeback_rate(struct cache_set *c, dc->writeback_rate_change = 0; /* - * Check c->idle_counter and c->at_max_writeback_rate agagain in case - * new I/O arrives during before set_at_max_writeback_rate() returns. - * Then the writeback rate is set to 1, and its new value should be - * decided via __update_writeback_rate(). + * In case new I/O arrives during before + * set_at_max_writeback_rate() returns. */ - if ((atomic_read(&c->idle_counter) < - atomic_read(&c->attached_dev_nr) * 6) || + if (!idle_counter_exceeded(c) || !atomic_read(&c->at_max_writeback_rate)) return false; @@ -756,13 +787,11 @@ static int bch_writeback_thread(void *arg) /* Init */ #define INIT_KEYS_EACH_TIME 500000 -#define INIT_KEYS_SLEEP_MS 100 struct sectors_dirty_init { struct btree_op op; unsigned int inode; size_t count; - struct bkey start; }; static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, @@ -778,11 +807,8 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, KEY_START(k), KEY_SIZE(k)); op->count++; - if (atomic_read(&b->c->search_inflight) && - !(op->count % INIT_KEYS_EACH_TIME)) { - bkey_copy_key(&op->start, k); - return -EAGAIN; - } + if (!(op->count % INIT_KEYS_EACH_TIME)) + cond_resched(); return MAP_CONTINUE; } @@ -797,24 +823,16 @@ static int bch_root_node_dirty_init(struct cache_set *c, bch_btree_op_init(&op.op, -1); op.inode = d->id; op.count = 0; - op.start = KEY(op.inode, 0, 0); - do { - ret = bcache_btree(map_keys_recurse, - k, - c->root, - &op.op, - &op.start, - sectors_dirty_init_fn, - 0); - if (ret == -EAGAIN) - schedule_timeout_interruptible( - msecs_to_jiffies(INIT_KEYS_SLEEP_MS)); - else if (ret < 0) { - pr_warn("sectors dirty init failed, ret=%d!\n", ret); - break; - } - } while (ret == -EAGAIN); + ret = bcache_btree(map_keys_recurse, + k, + c->root, + &op.op, + &KEY(op.inode, 0, 0), + sectors_dirty_init_fn, + 0); + if (ret < 0) + pr_warn("sectors dirty init failed, ret=%d!\n", ret); return ret; } @@ -858,7 +876,6 @@ static int bch_dirty_init_thread(void *arg) goto out; } skip_nr--; - cond_resched(); } if (p) { @@ -868,7 +885,6 @@ static int bch_dirty_init_thread(void *arg) p = NULL; prev_idx = cur_idx; - cond_resched(); } out: @@ -899,67 +915,56 @@ void bch_sectors_dirty_init(struct bcache_device *d) struct btree_iter iter; struct sectors_dirty_init op; struct cache_set *c = d->c; - struct bch_dirty_init_state *state; - char name[32]; + struct bch_dirty_init_state state; /* Just count root keys if no leaf node */ + rw_lock(0, c->root, c->root->level); if (c->root->level == 0) { bch_btree_op_init(&op.op, -1); op.inode = d->id; op.count = 0; - op.start = KEY(op.inode, 0, 0); for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) sectors_dirty_init_fn(&op.op, c->root, k); + + rw_unlock(0, c->root); return; } - state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL); - if (!state) { - pr_warn("sectors dirty init failed: cannot allocate memory\n"); - return; - } + memset(&state, 0, sizeof(struct bch_dirty_init_state)); + state.c = c; + state.d = d; + state.total_threads = bch_btre_dirty_init_thread_nr(); + state.key_idx = 0; + spin_lock_init(&state.idx_lock); + atomic_set(&state.started, 0); + atomic_set(&state.enough, 0); + init_waitqueue_head(&state.wait); - state->c = c; - state->d = d; - state->total_threads = bch_btre_dirty_init_thread_nr(); - state->key_idx = 0; - spin_lock_init(&state->idx_lock); - atomic_set(&state->started, 0); - atomic_set(&state->enough, 0); - init_waitqueue_head(&state->wait); - - for (i = 0; i < state->total_threads; i++) { - /* Fetch latest state->enough earlier */ + for (i = 0; i < state.total_threads; i++) { + /* Fetch latest state.enough earlier */ smp_mb__before_atomic(); - if (atomic_read(&state->enough)) + if (atomic_read(&state.enough)) break; - state->infos[i].state = state; - atomic_inc(&state->started); - snprintf(name, sizeof(name), "bch_dirty_init[%d]", i); - - state->infos[i].thread = - kthread_run(bch_dirty_init_thread, - &state->infos[i], - name); - if (IS_ERR(state->infos[i].thread)) { + state.infos[i].state = &state; + state.infos[i].thread = + kthread_run(bch_dirty_init_thread, &state.infos[i], + "bch_dirtcnt[%d]", i); + if (IS_ERR(state.infos[i].thread)) { pr_err("fails to run thread bch_dirty_init[%d]\n", i); for (--i; i >= 0; i--) - kthread_stop(state->infos[i].thread); + kthread_stop(state.infos[i].thread); goto out; } + atomic_inc(&state.started); } - /* - * Must wait for all threads to stop. - */ - wait_event_interruptible(state->wait, - atomic_read(&state->started) == 0); - out: - kfree(state); + /* Must wait for all threads to stop. */ + wait_event(state.wait, atomic_read(&state.started) == 0); + rw_unlock(0, c->root); } void bch_cached_dev_writeback_init(struct cached_dev *dc) diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 3f1230e22de0..0f1d96920630 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -16,7 +16,7 @@ #define BCH_AUTO_GC_DIRTY_THRESHOLD 50 -#define BCH_DIRTY_INIT_THRD_MAX 64 +#define BCH_DIRTY_INIT_THRD_MAX 12 /* * 14 (16384ths) is chosen here as something that each backing device * should be a reasonable fraction of the share, and not to blow up diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index b9677f701b6a..3d975db86434 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -3404,6 +3404,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_SUBMITTED; } +static char hex2asc(unsigned char c) +{ + return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27); +} + static void crypt_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) { @@ -3422,9 +3427,12 @@ static void crypt_status(struct dm_target *ti, status_type_t type, if (cc->key_size > 0) { if (cc->key_string) DMEMIT(":%u:%s", cc->key_size, cc->key_string); - else - for (i = 0; i < cc->key_size; i++) - DMEMIT("%02x", cc->key[i]); + else { + for (i = 0; i < cc->key_size; i++) { + DMEMIT("%c%c", hex2asc(cc->key[i] >> 4), + hex2asc(cc->key[i] & 0xf)); + } + } } else DMEMIT("-"); diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index d9ac7372108c..96bad057bea2 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1396,7 +1396,7 @@ static void start_worker(struct era *era) static void stop_worker(struct era *era) { atomic_set(&era->suspended, 1); - flush_workqueue(era->wq); + drain_workqueue(era->wq); } /*---------------------------------------------------------------- @@ -1566,6 +1566,12 @@ static void era_postsuspend(struct dm_target *ti) } stop_worker(era); + + r = metadata_commit(era->md); + if (r) { + DMERR("%s: metadata_commit failed", __func__); + /* FIXME: fail mode */ + } } static int era_preresume(struct dm_target *ti) diff --git a/drivers/md/dm-historical-service-time.c b/drivers/md/dm-historical-service-time.c index 186f91e2752c..06fe43c13ba3 100644 --- a/drivers/md/dm-historical-service-time.c +++ b/drivers/md/dm-historical-service-time.c @@ -429,7 +429,7 @@ static struct dm_path *hst_select_path(struct path_selector *ps, { struct selector *s = ps->context; struct path_info *pi = NULL, *best = NULL; - u64 time_now = sched_clock(); + u64 time_now = ktime_get_ns(); struct dm_path *ret = NULL; unsigned long flags; @@ -470,7 +470,7 @@ static int hst_start_io(struct path_selector *ps, struct dm_path *path, static u64 path_service_time(struct path_info *pi, u64 start_time) { - u64 sched_now = ktime_get_ns(); + u64 now = ktime_get_ns(); /* if a previous disk request has finished after this IO was * sent to the hardware, pretend the submission happened @@ -479,11 +479,11 @@ static u64 path_service_time(struct path_info *pi, u64 start_time) if (time_after64(pi->last_finish, start_time)) start_time = pi->last_finish; - pi->last_finish = sched_now; - if (time_before64(sched_now, start_time)) + pi->last_finish = now; + if (time_before64(now, start_time)) return 0; - return sched_now - start_time; + return now - start_time; } static int hst_end_io(struct path_selector *ps, struct dm_path *path, diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index f7471a2642dd..2156a2d5ac70 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -254,6 +254,7 @@ struct dm_integrity_c { struct completion crypto_backoff; + bool wrote_to_journal; bool journal_uptodate; bool just_formatted; bool recalculate_flag; @@ -2256,6 +2257,8 @@ static void integrity_commit(struct work_struct *w) if (!commit_sections) goto release_flush_bios; + ic->wrote_to_journal = true; + i = commit_start; for (n = 0; n < commit_sections; n++) { for (j = 0; j < ic->journal_section_entries; j++) { @@ -2470,10 +2473,6 @@ static void integrity_writer(struct work_struct *w) unsigned prev_free_sectors; - /* the following test is not needed, but it tests the replay code */ - if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev) - return; - spin_lock_irq(&ic->endio_wait.lock); write_start = ic->committed_section; write_sections = ic->n_committed_sections; @@ -2980,10 +2979,17 @@ static void dm_integrity_postsuspend(struct dm_target *ti) drain_workqueue(ic->commit_wq); if (ic->mode == 'J') { - if (ic->meta_dev) - queue_work(ic->writer_wq, &ic->writer_work); + queue_work(ic->writer_wq, &ic->writer_work); drain_workqueue(ic->writer_wq); dm_integrity_flush_buffers(ic, true); + if (ic->wrote_to_journal) { + init_journal(ic, ic->free_section, + ic->journal_sections - ic->free_section, ic->commit_seq); + if (ic->free_section) { + init_journal(ic, 0, ic->free_section, + next_commit_seq(ic->commit_seq)); + } + } } if (ic->mode == 'B') { @@ -3011,6 +3017,8 @@ static void dm_integrity_resume(struct dm_target *ti) DEBUG_print("resume\n"); + ic->wrote_to_journal = false; + if (ic->provided_data_sectors != old_provided_data_sectors) { if (ic->provided_data_sectors > old_provided_data_sectors && ic->mode == 'B' && @@ -4232,6 +4240,7 @@ try_smaller_buffer: } if (ic->internal_hash) { + size_t recalc_tags_size; ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1); if (!ic->recalc_wq ) { ti->error = "Cannot allocate workqueue"; @@ -4245,8 +4254,10 @@ try_smaller_buffer: r = -ENOMEM; goto bad; } - ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block, - ic->tag_size, GFP_KERNEL); + recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size; + if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size) + recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size; + ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL); if (!ic->recalc_tags) { ti->error = "Cannot allocate tags for recalculating"; r = -ENOMEM; @@ -4324,8 +4335,6 @@ try_smaller_buffer: } if (should_write_sb) { - int r; - init_journal(ic, 0, ic->journal_sections, 0); r = dm_integrity_failed(ic); if (unlikely(r)) { diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 1ca65b434f1f..20171c9d8952 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -572,7 +573,7 @@ static void list_version_get_needed(struct target_type *tt, void *needed_param) size_t *needed = needed_param; *needed += sizeof(struct dm_target_versions); - *needed += strlen(tt->name); + *needed += strlen(tt->name) + 1; *needed += ALIGN_MASK; } @@ -637,7 +638,7 @@ static int __list_versions(struct dm_ioctl *param, size_t param_size, const char iter_info.old_vers = NULL; iter_info.vers = vers; iter_info.flags = 0; - iter_info.end = (char *)vers+len; + iter_info.end = (char *)vers + needed; /* * Now loop through filling out the names & versions. @@ -1696,6 +1697,7 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) if (unlikely(cmd >= ARRAY_SIZE(_ioctls))) return NULL; + cmd = array_index_nospec(cmd, ARRAY_SIZE(_ioctls)); *ioctl_flags = _ioctls[cmd].flags; return _ioctls[cmd].fn; } diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 33e71ea6cc14..fe3a9473f338 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -415,8 +415,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, /* * Work out how many "unsigned long"s we need to hold the bitset. */ - bitset_size = dm_round_up(region_count, - sizeof(*lc->clean_bits) << BYTE_SHIFT); + bitset_size = dm_round_up(region_count, BITS_PER_LONG); bitset_size >>= BYTE_SHIFT; lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits); @@ -616,7 +615,7 @@ static int disk_resume(struct dm_dirty_log *log) log_clear_bit(lc, lc->clean_bits, i); /* clear any old bits -- device has shrunk */ - for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++) + for (i = lc->region_count; i % BITS_PER_LONG; i++) log_clear_bit(lc, lc->clean_bits, i); /* copy clean across to sync */ diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index f5083b4a0195..a2d09c9c6e9f 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -1002,12 +1002,13 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) static int validate_raid_redundancy(struct raid_set *rs) { unsigned int i, rebuild_cnt = 0; - unsigned int rebuilds_per_group = 0, copies; + unsigned int rebuilds_per_group = 0, copies, raid_disks; unsigned int group_size, last_group_start; - for (i = 0; i < rs->md.raid_disks; i++) - if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || - !rs->dev[i].rdev.sb_page) + for (i = 0; i < rs->raid_disks; i++) + if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) && + ((!test_bit(In_sync, &rs->dev[i].rdev.flags) || + !rs->dev[i].rdev.sb_page))) rebuild_cnt++; switch (rs->md.level) { @@ -1047,8 +1048,9 @@ static int validate_raid_redundancy(struct raid_set *rs) * A A B B C * C D D E E */ + raid_disks = min(rs->raid_disks, rs->md.raid_disks); if (__is_raid10_near(rs->md.new_layout)) { - for (i = 0; i < rs->md.raid_disks; i++) { + for (i = 0; i < raid_disks; i++) { if (!(i % copies)) rebuilds_per_group = 0; if ((!rs->dev[i].rdev.sb_page || @@ -1071,10 +1073,10 @@ static int validate_raid_redundancy(struct raid_set *rs) * results in the need to treat the last (potentially larger) * set differently. */ - group_size = (rs->md.raid_disks / copies); - last_group_start = (rs->md.raid_disks / group_size) - 1; + group_size = (raid_disks / copies); + last_group_start = (raid_disks / group_size) - 1; last_group_start *= group_size; - for (i = 0; i < rs->md.raid_disks; i++) { + for (i = 0; i < raid_disks; i++) { if (!(i % copies) && !(i > last_group_start)) rebuilds_per_group = 0; if ((!rs->dev[i].rdev.sb_page || @@ -1589,7 +1591,7 @@ static sector_t __rdev_sectors(struct raid_set *rs) { int i; - for (i = 0; i < rs->md.raid_disks; i++) { + for (i = 0; i < rs->raid_disks; i++) { struct md_rdev *rdev = &rs->dev[i].rdev; if (!test_bit(Journal, &rdev->flags) && @@ -3512,7 +3514,7 @@ static void raid_status(struct dm_target *ti, status_type_t type, { struct raid_set *rs = ti->private; struct mddev *mddev = &rs->md; - struct r5conf *conf = mddev->private; + struct r5conf *conf = rs_is_raid456(rs) ? mddev->private : NULL; int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0; unsigned long recovery; unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */ @@ -3732,13 +3734,13 @@ static int raid_iterate_devices(struct dm_target *ti, unsigned int i; int r = 0; - for (i = 0; !r && i < rs->md.raid_disks; i++) - if (rs->dev[i].data_dev) - r = fn(ti, - rs->dev[i].data_dev, - 0, /* No offset on data devs */ - rs->md.dev_sectors, - data); + for (i = 0; !r && i < rs->raid_disks; i++) { + if (rs->dev[i].data_dev) { + r = fn(ti, rs->dev[i].data_dev, + 0, /* No offset on data devs */ + rs->md.dev_sectors, data); + } + } return r; } @@ -3792,7 +3794,7 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs) memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices)); - for (i = 0; i < mddev->raid_disks; i++) { + for (i = 0; i < rs->raid_disks; i++) { r = &rs->dev[i].rdev; /* HM FIXME: enhance journal device recovery processing */ if (test_bit(Journal, &r->flags)) diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index b1e867feb4f6..4833f4b20b2c 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -492,8 +492,13 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, if (unlikely(!ti)) { int srcu_idx; - struct dm_table *map = dm_get_live_table(md, &srcu_idx); + struct dm_table *map; + map = dm_get_live_table(md, &srcu_idx); + if (unlikely(!map)) { + dm_put_live_table(md, srcu_idx); + return BLK_STS_RESOURCE; + } ti = dm_table_find_target(map, 0); dm_put_live_table(md, srcu_idx); } diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 35d368c418d0..55443a6598fa 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -224,6 +224,7 @@ void dm_stats_cleanup(struct dm_stats *stats) atomic_read(&shared->in_flight[READ]), atomic_read(&shared->in_flight[WRITE])); } + cond_resched(); } dm_stat_free(&s->rcu_head); } @@ -313,6 +314,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, for (ni = 0; ni < n_entries; ni++) { atomic_set(&s->stat_shared[ni].in_flight[READ], 0); atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0); + cond_resched(); } if (s->n_histogram_entries) { @@ -325,6 +327,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, for (ni = 0; ni < n_entries; ni++) { s->stat_shared[ni].tmp.histogram = hi; hi += s->n_histogram_entries + 1; + cond_resched(); } } @@ -345,6 +348,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, for (ni = 0; ni < n_entries; ni++) { p[ni].histogram = hi; hi += s->n_histogram_entries + 1; + cond_resched(); } } } @@ -474,6 +478,7 @@ static int dm_stats_list(struct dm_stats *stats, const char *program, } DMEMIT("\n"); } + cond_resched(); } mutex_unlock(&stats->mutex); @@ -750,6 +755,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end, local_irq_enable(); } } + cond_resched(); } } @@ -865,6 +871,8 @@ static int dm_stats_print(struct dm_stats *stats, int id, if (unlikely(sz + 1 >= maxlen)) goto buffer_overflow; + + cond_resched(); } if (clear) diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 6ebb2127f3e2..842d79e5ea3a 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -2058,10 +2058,13 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, dm_sm_threshold_fn fn, void *context) { - int r; + int r = -EINVAL; pmd_write_lock_in_core(pmd); - r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context); + if (!pmd->fail_io) { + r = dm_sm_register_threshold_callback(pmd->metadata_sm, + threshold, fn, context); + } pmd_write_unlock(pmd); return r; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index fff4c50df74d..a196d7cb51bd 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -3401,8 +3401,10 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) calc_metadata_threshold(pt), metadata_low_callback, pool); - if (r) + if (r) { + ti->error = "Error registering metadata threshold"; goto out_flags_changed; + } dm_pool_register_pre_commit_callback(pool->pmd, metadata_pre_commit_callback, pool); diff --git a/drivers/md/dm-user.c b/drivers/md/dm-user.c index e5a85202d8a0..a8d771855e79 100644 --- a/drivers/md/dm-user.c +++ b/drivers/md/dm-user.c @@ -188,7 +188,6 @@ static void message_kill(struct message *m, mempool_t *pool) { m->bio->bi_status = BLK_STS_IOERR; bio_endio(m->bio); - bio_put(m->bio); mempool_free(m, pool); } @@ -989,7 +988,6 @@ finish_bio: */ WARN_ON(bio_size(c->cur_from_user->bio) != 0); bio_endio(c->cur_from_user->bio); - bio_put(c->cur_from_user->bio); /* * We don't actually need to take the target lock here, as all @@ -1227,7 +1225,6 @@ static int user_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REQUEUE; } - bio_get(bio); entry->msg.type = bio_type_to_user_type(bio); entry->msg.flags = bio_flags_to_user_flags(bio); entry->msg.sector = bio->bi_iter.bi_sector; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 9d5c6dd5b756..1c8038c665ec 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -1251,7 +1251,9 @@ bad: static struct target_type verity_target = { .name = "verity", + .features = DM_TARGET_IMMUTABLE, .version = {1, 7, 0}, + .features = DM_TARGET_IMMUTABLE, .module = THIS_MODULE, .ctr = verity_ctr, .dtr = verity_dtr, diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 9d6ae3e64285..13cc318db012 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -20,7 +20,7 @@ #define HIGH_WATERMARK 50 #define LOW_WATERMARK 45 -#define MAX_WRITEBACK_JOBS 0 +#define MAX_WRITEBACK_JOBS min(0x10000000 / PAGE_SIZE, totalram_pages() / 16) #define ENDIO_LATENCY 16 #define WRITEBACK_LATENCY 64 #define AUTOCOMMIT_BLOCKS_SSD 65536 diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 9fd176e225af..be1bf1c1d0ac 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -609,17 +609,19 @@ static void start_io_acct(struct dm_io *io) } static void end_io_acct(struct mapped_device *md, struct bio *bio, - unsigned long start_time, struct dm_stats_aux *stats_aux) + unsigned long start_time, struct dm_stats_aux *stats_aux) { unsigned long duration = jiffies - start_time; - bio_end_io_acct(bio, start_time); - if (unlikely(dm_stats_used(&md->stats))) dm_stats_account_io(&md->stats, bio_data_dir(bio), bio->bi_iter.bi_sector, bio_sectors(bio), true, duration, stats_aux); + smp_wmb(); + + bio_end_io_acct(bio, start_time); + /* nudge anyone waiting on suspend queue */ if (unlikely(wq_has_sleeper(&md->wait))) wake_up(&md->wait); @@ -1696,15 +1698,10 @@ static blk_qc_t dm_submit_bio(struct bio *bio) struct dm_table *map; map = dm_get_live_table(md, &srcu_idx); - if (unlikely(!map)) { - DMERR_LIMIT("%s: mapping table unavailable, erroring io", - dm_device_name(md)); - bio_io_error(bio); - goto out; - } - /* If suspended, queue this IO for later */ - if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { + /* If suspended, or map not yet available, queue this IO for later */ + if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || + unlikely(!map)) { if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); else if (bio->bi_opf & REQ_RAHEAD) @@ -2369,6 +2366,8 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state } finish_wait(&md->wait, &wait); + smp_rmb(); + return r; } @@ -3012,6 +3011,11 @@ static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, goto out; ti = dm_table_get_target(table, 0); + if (dm_suspended_md(md)) { + ret = -EAGAIN; + goto out; + } + ret = -EINVAL; if (!ti->type->iterate_devices) goto out; diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index ea3130e11680..d377ea060925 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -639,14 +639,6 @@ re_read: daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; write_behind = le32_to_cpu(sb->write_behind); sectors_reserved = le32_to_cpu(sb->sectors_reserved); - /* Setup nodes/clustername only if bitmap version is - * cluster-compatible - */ - if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) { - nodes = le32_to_cpu(sb->nodes); - strlcpy(bitmap->mddev->bitmap_info.cluster_name, - sb->cluster_name, 64); - } /* verify that the bitmap-specific fields are valid */ if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) @@ -668,6 +660,16 @@ re_read: goto out; } + /* + * Setup nodes/clustername only if bitmap version is + * cluster-compatible + */ + if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) { + nodes = le32_to_cpu(sb->nodes); + strlcpy(bitmap->mddev->bitmap_info.cluster_name, + sb->cluster_name, 64); + } + /* keep the array size field of the bitmap superblock up to date */ sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); @@ -700,9 +702,9 @@ re_read: out: kunmap_atomic(sb); - /* Assigning chunksize is required for "re_read" */ - bitmap->mddev->bitmap_info.chunksize = chunksize; if (err == 0 && nodes && (bitmap->cluster_slot < 0)) { + /* Assigning chunksize is required for "re_read" */ + bitmap->mddev->bitmap_info.chunksize = chunksize; err = md_setup_cluster(bitmap->mddev, nodes); if (err) { pr_warn("%s: Could not setup cluster service (%d)\n", @@ -713,18 +715,18 @@ out: goto re_read; } - out_no_sb: - if (test_bit(BITMAP_STALE, &bitmap->flags)) - bitmap->events_cleared = bitmap->mddev->events; - bitmap->mddev->bitmap_info.chunksize = chunksize; - bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; - bitmap->mddev->bitmap_info.max_write_behind = write_behind; - bitmap->mddev->bitmap_info.nodes = nodes; - if (bitmap->mddev->bitmap_info.space == 0 || - bitmap->mddev->bitmap_info.space > sectors_reserved) - bitmap->mddev->bitmap_info.space = sectors_reserved; - if (err) { + if (err == 0) { + if (test_bit(BITMAP_STALE, &bitmap->flags)) + bitmap->events_cleared = bitmap->mddev->events; + bitmap->mddev->bitmap_info.chunksize = chunksize; + bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; + bitmap->mddev->bitmap_info.max_write_behind = write_behind; + bitmap->mddev->bitmap_info.nodes = nodes; + if (bitmap->mddev->bitmap_info.space == 0 || + bitmap->mddev->bitmap_info.space > sectors_reserved) + bitmap->mddev->bitmap_info.space = sectors_reserved; + } else { md_bitmap_print_sb(bitmap); if (bitmap->cluster_slot < 0) md_cluster_stop(bitmap->mddev); diff --git a/drivers/md/md.c b/drivers/md/md.c index cc3876500c4b..0043dec37a87 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2648,14 +2648,16 @@ static void sync_sbs(struct mddev *mddev, int nospares) static bool does_sb_need_changing(struct mddev *mddev) { - struct md_rdev *rdev; + struct md_rdev *rdev = NULL, *iter; struct mdp_superblock_1 *sb; int role; /* Find a good rdev */ - rdev_for_each(rdev, mddev) - if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags)) + rdev_for_each(iter, mddev) + if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) { + rdev = iter; break; + } /* No good device found. */ if (!rdev) @@ -6297,6 +6299,7 @@ void md_stop(struct mddev *mddev) /* stop the array and free an attached data structures. * This is called from dm-raid */ + __md_stop_writes(mddev); __md_stop(mddev); bioset_exit(&mddev->bio_set); bioset_exit(&mddev->sync_set); @@ -7968,17 +7971,22 @@ EXPORT_SYMBOL(md_register_thread); void md_unregister_thread(struct md_thread **threadp) { - struct md_thread *thread = *threadp; - if (!thread) - return; - pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); - /* Locking ensures that mddev_unlock does not wake_up a + struct md_thread *thread; + + /* + * Locking ensures that mddev_unlock does not wake_up a * non-existent thread */ spin_lock(&pers_lock); + thread = *threadp; + if (!thread) { + spin_unlock(&pers_lock); + return; + } *threadp = NULL; spin_unlock(&pers_lock); + pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); kthread_stop(thread->tsk); kfree(thread); } @@ -9417,6 +9425,7 @@ void md_reap_sync_thread(struct mddev *mddev) wake_up(&resync_wait); /* flag recovery needed just to double check */ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + sysfs_notify_dirent_safe(mddev->sysfs_completed); sysfs_notify_dirent_safe(mddev->sysfs_action); md_new_event(mddev); if (mddev->event_work.func) @@ -9728,16 +9737,18 @@ static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) void md_reload_sb(struct mddev *mddev, int nr) { - struct md_rdev *rdev; + struct md_rdev *rdev = NULL, *iter; int err; /* Find the rdev */ - rdev_for_each_rcu(rdev, mddev) { - if (rdev->desc_nr == nr) + rdev_for_each_rcu(iter, mddev) { + if (iter->desc_nr == nr) { + rdev = iter; break; + } } - if (!rdev || rdev->desc_nr != nr) { + if (!rdev) { pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr); return; } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 35843df15b5e..a20332e755e8 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -48,7 +48,7 @@ static void dump_zones(struct mddev *mddev) int len = 0; for (k = 0; k < conf->strip_zone[j].nb_dev; k++) - len += snprintf(line+len, 200-len, "%s%s", k?"/":"", + len += scnprintf(line+len, 200-len, "%s%s", k?"/":"", bdevname(conf->devlist[j*raid_disks + k]->bdev, b)); pr_debug("md: zone%d=[%s]\n", j, line); @@ -128,21 +128,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) pr_debug("md/raid0:%s: FINAL %d zones\n", mdname(mddev), conf->nr_strip_zones); - if (conf->nr_strip_zones == 1) { - conf->layout = RAID0_ORIG_LAYOUT; - } else if (mddev->layout == RAID0_ORIG_LAYOUT || - mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) { - conf->layout = mddev->layout; - } else if (default_layout == RAID0_ORIG_LAYOUT || - default_layout == RAID0_ALT_MULTIZONE_LAYOUT) { - conf->layout = default_layout; - } else { - pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n", - mdname(mddev)); - pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n"); - err = -ENOTSUPP; - goto abort; - } /* * now since we have the hard sector sizes, we can make sure * chunk size is a multiple of that sector size @@ -273,6 +258,22 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) (unsigned long long)smallest->sectors); } + if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) { + conf->layout = RAID0_ORIG_LAYOUT; + } else if (mddev->layout == RAID0_ORIG_LAYOUT || + mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) { + conf->layout = mddev->layout; + } else if (default_layout == RAID0_ORIG_LAYOUT || + default_layout == RAID0_ALT_MULTIZONE_LAYOUT) { + conf->layout = default_layout; + } else { + pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n", + mdname(mddev)); + pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n"); + err = -EOPNOTSUPP; + goto abort; + } + pr_debug("md/raid0:%s: done.\n", mdname(mddev)); *private_conf = conf; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 70dccc3c9631..0e741a8d278d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1809,9 +1809,12 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) int err = 0; int number = rdev->raid_disk; struct md_rdev **rdevp; - struct raid10_info *p = conf->mirrors + number; + struct raid10_info *p; print_conf(conf); + if (unlikely(number >= mddev->raid_disks)) + return 0; + p = conf->mirrors + number; if (rdev == p->rdev) rdevp = &p->rdev; else if (rdev == p->replacement) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 39343479ac2a..0dea4aa18c66 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -36,6 +36,7 @@ */ #include +#include #include #include #include @@ -686,17 +687,17 @@ int raid5_calc_degraded(struct r5conf *conf) return degraded; } -static int has_failed(struct r5conf *conf) +static bool has_failed(struct r5conf *conf) { - int degraded; + int degraded = conf->mddev->degraded; - if (conf->mddev->reshape_position == MaxSector) - return conf->mddev->degraded > conf->max_degraded; + if (test_bit(MD_BROKEN, &conf->mddev->flags)) + return true; - degraded = raid5_calc_degraded(conf); - if (degraded > conf->max_degraded) - return 1; - return 0; + if (conf->mddev->reshape_position != MaxSector) + degraded = raid5_calc_degraded(conf); + + return degraded > conf->max_degraded; } struct stripe_head * @@ -2863,10 +2864,10 @@ static void raid5_end_write_request(struct bio *bi) if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) clear_bit(R5_LOCKED, &sh->dev[i].flags); set_bit(STRIPE_HANDLE, &sh->state); - raid5_release_stripe(sh); if (sh->batch_head && sh != sh->batch_head) raid5_release_stripe(sh->batch_head); + raid5_release_stripe(sh); } static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) @@ -2876,34 +2877,31 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) unsigned long flags; pr_debug("raid456: error called\n"); + pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n", + mdname(mddev), bdevname(rdev->bdev, b)); + spin_lock_irqsave(&conf->device_lock, flags); - - if (test_bit(In_sync, &rdev->flags) && - mddev->degraded == conf->max_degraded) { - /* - * Don't allow to achieve failed state - * Don't try to recover this device - */ - conf->recovery_disabled = mddev->recovery_disabled; - spin_unlock_irqrestore(&conf->device_lock, flags); - return; - } - set_bit(Faulty, &rdev->flags); clear_bit(In_sync, &rdev->flags); mddev->degraded = raid5_calc_degraded(conf); + + if (has_failed(conf)) { + set_bit(MD_BROKEN, &conf->mddev->flags); + conf->recovery_disabled = mddev->recovery_disabled; + + pr_crit("md/raid:%s: Cannot continue operation (%d/%d failed).\n", + mdname(mddev), mddev->degraded, conf->raid_disks); + } else { + pr_crit("md/raid:%s: Operation continuing on %d devices.\n", + mdname(mddev), conf->raid_disks - mddev->degraded); + } + spin_unlock_irqrestore(&conf->device_lock, flags); set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(Blocked, &rdev->flags); set_mask_bits(&mddev->sb_flags, 0, BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); - pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n" - "md/raid:%s: Operation continuing on %d devices.\n", - mdname(mddev), - bdevname(rdev->bdev, b), - mdname(mddev), - conf->raid_disks - mddev->degraded); r5c_update_on_rdev_error(mddev, rdev); } @@ -3938,7 +3936,7 @@ static void handle_stripe_fill(struct stripe_head *sh, * back cache (prexor with orig_page, and then xor with * page) in the read path */ - if (s->injournal && s->failed) { + if (s->to_read && s->injournal && s->failed) { if (test_bit(STRIPE_R5C_CACHING, &sh->state)) r5c_make_stripe_write_out(sh); goto out; @@ -6521,7 +6519,18 @@ static void raid5d(struct md_thread *thread) spin_unlock_irq(&conf->device_lock); md_check_recovery(mddev); spin_lock_irq(&conf->device_lock); + + /* + * Waiting on MD_SB_CHANGE_PENDING below may deadlock + * seeing md_check_recovery() is needed to clear + * the flag when using mdmon. + */ + continue; } + + wait_event_lock_irq(mddev->sb_wait, + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags), + conf->device_lock); } pr_debug("%d stripes handled\n", handled); @@ -8006,6 +8015,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) */ if (rdev->saved_raid_disk >= 0 && rdev->saved_raid_disk >= first && + rdev->saved_raid_disk <= last && conf->disks[rdev->saved_raid_disk].rdev == NULL) first = rdev->saved_raid_disk; diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c index 2e5698fbc3a8..e23aa608f66f 100644 --- a/drivers/media/cec/core/cec-adap.c +++ b/drivers/media/cec/core/cec-adap.c @@ -1271,7 +1271,7 @@ static int cec_config_log_addr(struct cec_adapter *adap, * While trying to poll the physical address was reset * and the adapter was unconfigured, so bail out. */ - if (!adap->is_configuring) + if (adap->phys_addr == CEC_PHYS_ADDR_INVALID) return -EINTR; if (err) @@ -1328,7 +1328,6 @@ static void cec_adap_unconfigure(struct cec_adapter *adap) adap->phys_addr != CEC_PHYS_ADDR_INVALID) WARN_ON(adap->ops->adap_log_addr(adap, CEC_LOG_ADDR_INVALID)); adap->log_addrs.log_addr_mask = 0; - adap->is_configuring = false; adap->is_configured = false; cec_flush(adap); wake_up_interruptible(&adap->kthread_waitq); @@ -1520,9 +1519,10 @@ unconfigure: for (i = 0; i < las->num_log_addrs; i++) las->log_addr[i] = CEC_LOG_ADDR_INVALID; cec_adap_unconfigure(adap); + adap->is_configuring = false; adap->kthread_config = NULL; - mutex_unlock(&adap->lock); complete(&adap->config_completion); + mutex_unlock(&adap->lock); return 0; } diff --git a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c index 2d95e16cd248..f66699d5dc66 100644 --- a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c +++ b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c @@ -44,6 +44,8 @@ static void handle_cec_message(struct cros_ec_cec *cros_ec_cec) uint8_t *cec_message = cros_ec->event_data.data.cec_message; unsigned int len = cros_ec->event_size; + if (len > CEC_MAX_MSG_SIZE) + len = CEC_MAX_MSG_SIZE; cros_ec_cec->rx_msg.len = len; memcpy(cros_ec_cec->rx_msg.msg, cec_message, len); diff --git a/drivers/media/cec/platform/s5p/s5p_cec.c b/drivers/media/cec/platform/s5p/s5p_cec.c index 028a09a7531e..102f1af01000 100644 --- a/drivers/media/cec/platform/s5p/s5p_cec.c +++ b/drivers/media/cec/platform/s5p/s5p_cec.c @@ -115,6 +115,8 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv) dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n"); cec->rx = STATE_BUSY; cec->msg.len = status >> 24; + if (cec->msg.len > CEC_MAX_MSG_SIZE) + cec->msg.len = CEC_MAX_MSG_SIZE; cec->msg.rx_status = CEC_RX_STATUS_OK; s5p_cec_get_rx_buf(cec, cec->msg.len, cec->msg.msg); diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 72350343a56a..3bafde87a125 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -787,7 +787,13 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, num_buffers = max_t(unsigned int, *count, q->min_buffers_needed); num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME); memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); + /* + * Set this now to ensure that drivers see the correct q->memory value + * in the queue_setup op. + */ + mutex_lock(&q->mmap_lock); q->memory = memory; + mutex_unlock(&q->mmap_lock); /* * Ask the driver how many buffers and planes per buffer it requires. @@ -796,22 +802,27 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, plane_sizes, q->alloc_devs); if (ret) - return ret; + goto error; /* Check that driver has set sane values */ - if (WARN_ON(!num_planes)) - return -EINVAL; + if (WARN_ON(!num_planes)) { + ret = -EINVAL; + goto error; + } for (i = 0; i < num_planes; i++) - if (WARN_ON(!plane_sizes[i])) - return -EINVAL; + if (WARN_ON(!plane_sizes[i])) { + ret = -EINVAL; + goto error; + } /* Finally, allocate buffers and video memory */ allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes); if (allocated_buffers == 0) { dprintk(q, 1, "memory allocation failed\n"); - return -ENOMEM; + ret = -ENOMEM; + goto error; } /* @@ -852,7 +863,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, if (ret < 0) { /* * Note: __vb2_queue_free() will subtract 'allocated_buffers' - * from q->num_buffers. + * from q->num_buffers and it will reset q->memory to + * VB2_MEMORY_UNKNOWN. */ __vb2_queue_free(q, allocated_buffers); mutex_unlock(&q->mmap_lock); @@ -868,6 +880,12 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, q->waiting_for_buffers = !q->is_output; return 0; + +error: + mutex_lock(&q->mmap_lock); + q->memory = VB2_MEMORY_UNKNOWN; + mutex_unlock(&q->mmap_lock); + return ret; } EXPORT_SYMBOL_GPL(vb2_core_reqbufs); @@ -878,6 +896,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, { unsigned int num_planes = 0, num_buffers, allocated_buffers; unsigned plane_sizes[VB2_MAX_PLANES] = { }; + bool no_previous_buffers = !q->num_buffers; int ret; if (q->num_buffers == VB2_MAX_FRAME) { @@ -885,13 +904,19 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, return -ENOBUFS; } - if (!q->num_buffers) { + if (no_previous_buffers) { if (q->waiting_in_dqbuf && *count) { dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); return -EBUSY; } memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); + /* + * Set this now to ensure that drivers see the correct q->memory + * value in the queue_setup op. + */ + mutex_lock(&q->mmap_lock); q->memory = memory; + mutex_unlock(&q->mmap_lock); q->waiting_for_buffers = !q->is_output; } else { if (q->memory != memory) { @@ -914,14 +939,15 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, plane_sizes, q->alloc_devs); if (ret) - return ret; + goto error; /* Finally, allocate buffers and video memory */ allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes); if (allocated_buffers == 0) { dprintk(q, 1, "memory allocation failed\n"); - return -ENOMEM; + ret = -ENOMEM; + goto error; } /* @@ -952,7 +978,8 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, if (ret < 0) { /* * Note: __vb2_queue_free() will subtract 'allocated_buffers' - * from q->num_buffers. + * from q->num_buffers and it will reset q->memory to + * VB2_MEMORY_UNKNOWN. */ __vb2_queue_free(q, allocated_buffers); mutex_unlock(&q->mmap_lock); @@ -967,6 +994,14 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, *count = allocated_buffers; return 0; + +error: + if (no_previous_buffers) { + mutex_lock(&q->mmap_lock); + q->memory = VB2_MEMORY_UNKNOWN; + mutex_unlock(&q->mmap_lock); + } + return ret; } EXPORT_SYMBOL_GPL(vb2_core_create_bufs); @@ -2120,6 +2155,22 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off, struct vb2_buffer *vb; unsigned int buffer, plane; + /* + * Sanity checks to ensure the lock is held, MEMORY_MMAP is + * used and fileio isn't active. + */ + lockdep_assert_held(&q->mmap_lock); + + if (q->memory != VB2_MEMORY_MMAP) { + dprintk(q, 1, "queue is not currently set up for mmap\n"); + return -EINVAL; + } + + if (vb2_fileio_is_active(q)) { + dprintk(q, 1, "file io in progress\n"); + return -EBUSY; + } + /* * Go over all buffers and their planes, comparing the given offset * with an offset assigned to each plane. If a match is found, @@ -2219,11 +2270,6 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) int ret; unsigned long length; - if (q->memory != VB2_MEMORY_MMAP) { - dprintk(q, 1, "queue is not currently set up for mmap\n"); - return -EINVAL; - } - /* * Check memory area access mode. */ @@ -2245,14 +2291,9 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) mutex_lock(&q->mmap_lock); - if (vb2_fileio_is_active(q)) { - dprintk(q, 1, "mmap: file io in progress\n"); - ret = -EBUSY; - goto unlock; - } - /* - * Find the plane corresponding to the offset passed by userspace. + * Find the plane corresponding to the offset passed by userspace. This + * will return an error if not MEMORY_MMAP or file I/O is in progress. */ ret = __find_plane_by_offset(q, off, &buffer, &plane); if (ret) @@ -2305,22 +2346,25 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q, void *vaddr; int ret; - if (q->memory != VB2_MEMORY_MMAP) { - dprintk(q, 1, "queue is not currently set up for mmap\n"); - return -EINVAL; - } + mutex_lock(&q->mmap_lock); /* - * Find the plane corresponding to the offset passed by userspace. + * Find the plane corresponding to the offset passed by userspace. This + * will return an error if not MEMORY_MMAP or file I/O is in progress. */ ret = __find_plane_by_offset(q, off, &buffer, &plane); if (ret) - return ret; + goto unlock; vb = q->bufs[buffer]; vaddr = vb2_plane_vaddr(vb, plane); + mutex_unlock(&q->mmap_lock); return vaddr ? (unsigned long)vaddr : -EINVAL; + +unlock: + mutex_unlock(&q->mmap_lock); + return ret; } EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); #endif diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c index e58cb8434daf..12b7f698f562 100644 --- a/drivers/media/dvb-core/dmxdev.c +++ b/drivers/media/dvb-core/dmxdev.c @@ -800,6 +800,11 @@ static int dvb_demux_open(struct inode *inode, struct file *file) if (mutex_lock_interruptible(&dmxdev->mutex)) return -ERESTARTSYS; + if (dmxdev->exit) { + mutex_unlock(&dmxdev->mutex); + return -ENODEV; + } + for (i = 0; i < dmxdev->filternum; i++) if (dmxdev->filter[i].state == DMXDEV_STATE_FREE) break; @@ -1458,7 +1463,10 @@ EXPORT_SYMBOL(dvb_dmxdev_init); void dvb_dmxdev_release(struct dmxdev *dmxdev) { + mutex_lock(&dmxdev->mutex); dmxdev->exit = 1; + mutex_unlock(&dmxdev->mutex); + if (dmxdev->dvbdev->users > 1) { wait_event(dmxdev->dvbdev->wait_queue, dmxdev->dvbdev->users == 1); diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c index 6974f1731529..1331f2c2237e 100644 --- a/drivers/media/dvb-core/dvb_vb2.c +++ b/drivers/media/dvb-core/dvb_vb2.c @@ -358,6 +358,12 @@ int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req) int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) { + struct vb2_queue *q = &ctx->vb_q; + + if (b->index >= q->num_buffers) { + dprintk(1, "[%s] buffer index out of range\n", ctx->name); + return -EINVAL; + } vb2_core_querybuf(&ctx->vb_q, b->index, b); dprintk(3, "[%s] index=%d\n", ctx->name, b->index); return 0; @@ -382,8 +388,13 @@ int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp) int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) { + struct vb2_queue *q = &ctx->vb_q; int ret; + if (b->index >= q->num_buffers) { + dprintk(1, "[%s] buffer index out of range\n", ctx->name); + return -EINVAL; + } ret = vb2_core_qbuf(&ctx->vb_q, b->index, b, NULL); if (ret) { dprintk(1, "[%s] index=%d errno=%d\n", ctx->name, diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c index a57470bf71bf..2134e25096aa 100644 --- a/drivers/media/dvb-frontends/drxk_hard.c +++ b/drivers/media/dvb-frontends/drxk_hard.c @@ -6672,7 +6672,7 @@ static int drxk_read_snr(struct dvb_frontend *fe, u16 *snr) static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct drxk_state *state = fe->demodulator_priv; - u16 err; + u16 err = 0; dprintk(1, "\n"); diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c index b42b289faaef..154776d0069e 100644 --- a/drivers/media/i2c/ov7670.c +++ b/drivers/media/i2c/ov7670.c @@ -2000,7 +2000,6 @@ static int ov7670_remove(struct i2c_client *client) v4l2_async_unregister_subdev(sd); v4l2_ctrl_handler_free(&info->hdl); media_entity_cleanup(&info->sd.entity); - ov7670_power_off(sd); return 0; } diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index 4e8132d4b2df..a23c025595a0 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c @@ -2154,7 +2154,7 @@ static int cx23885_initdev(struct pci_dev *pci_dev, err = pci_set_dma_mask(pci_dev, 0xffffffff); if (err) { pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); - goto fail_ctrl; + goto fail_dma_set_mask; } err = request_irq(pci_dev->irq, cx23885_irq, @@ -2162,7 +2162,7 @@ static int cx23885_initdev(struct pci_dev *pci_dev, if (err < 0) { pr_err("%s: can't get IRQ %d\n", dev->name, pci_dev->irq); - goto fail_irq; + goto fail_dma_set_mask; } switch (dev->board) { @@ -2184,7 +2184,7 @@ static int cx23885_initdev(struct pci_dev *pci_dev, return 0; -fail_irq: +fail_dma_set_mask: cx23885_dev_unregister(dev); fail_ctrl: v4l2_ctrl_handler_free(hdl); diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c index 285047b32c44..a3d45287a534 100644 --- a/drivers/media/pci/cx25821/cx25821-core.c +++ b/drivers/media/pci/cx25821/cx25821-core.c @@ -1340,11 +1340,11 @@ static void cx25821_finidev(struct pci_dev *pci_dev) struct cx25821_dev *dev = get_cx25821(v4l2_dev); cx25821_shutdown(dev); - pci_disable_device(pci_dev); /* unregister stuff */ if (pci_dev->irq) free_irq(pci_dev->irq, dev); + pci_disable_device(pci_dev); cx25821_dev_unregister(dev); v4l2_device_unregister(v4l2_dev); diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c index 58489ea0c1da..7cf2271866d0 100644 --- a/drivers/media/pci/cx88/cx88-vbi.c +++ b/drivers/media/pci/cx88/cx88-vbi.c @@ -144,11 +144,10 @@ static int buffer_prepare(struct vb2_buffer *vb) return -EINVAL; vb2_set_plane_payload(vb, 0, size); - cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl, - 0, VBI_LINE_LENGTH * lines, - VBI_LINE_LENGTH, 0, - lines); - return 0; + return cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl, + 0, VBI_LINE_LENGTH * lines, + VBI_LINE_LENGTH, 0, + lines); } static void buffer_finish(struct vb2_buffer *vb) diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index 8cffdacf6007..e5adffa3a99a 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -431,6 +431,7 @@ static int queue_setup(struct vb2_queue *q, static int buffer_prepare(struct vb2_buffer *vb) { + int ret; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct cx8800_dev *dev = vb->vb2_queue->drv_priv; struct cx88_core *core = dev->core; @@ -445,35 +446,35 @@ static int buffer_prepare(struct vb2_buffer *vb) switch (core->field) { case V4L2_FIELD_TOP: - cx88_risc_buffer(dev->pci, &buf->risc, - sgt->sgl, 0, UNSET, - buf->bpl, 0, core->height); + ret = cx88_risc_buffer(dev->pci, &buf->risc, + sgt->sgl, 0, UNSET, + buf->bpl, 0, core->height); break; case V4L2_FIELD_BOTTOM: - cx88_risc_buffer(dev->pci, &buf->risc, - sgt->sgl, UNSET, 0, - buf->bpl, 0, core->height); + ret = cx88_risc_buffer(dev->pci, &buf->risc, + sgt->sgl, UNSET, 0, + buf->bpl, 0, core->height); break; case V4L2_FIELD_SEQ_TB: - cx88_risc_buffer(dev->pci, &buf->risc, - sgt->sgl, - 0, buf->bpl * (core->height >> 1), - buf->bpl, 0, - core->height >> 1); + ret = cx88_risc_buffer(dev->pci, &buf->risc, + sgt->sgl, + 0, buf->bpl * (core->height >> 1), + buf->bpl, 0, + core->height >> 1); break; case V4L2_FIELD_SEQ_BT: - cx88_risc_buffer(dev->pci, &buf->risc, - sgt->sgl, - buf->bpl * (core->height >> 1), 0, - buf->bpl, 0, - core->height >> 1); + ret = cx88_risc_buffer(dev->pci, &buf->risc, + sgt->sgl, + buf->bpl * (core->height >> 1), 0, + buf->bpl, 0, + core->height >> 1); break; case V4L2_FIELD_INTERLACED: default: - cx88_risc_buffer(dev->pci, &buf->risc, - sgt->sgl, 0, buf->bpl, - buf->bpl, buf->bpl, - core->height >> 1); + ret = cx88_risc_buffer(dev->pci, &buf->risc, + sgt->sgl, 0, buf->bpl, + buf->bpl, buf->bpl, + core->height >> 1); break; } dprintk(2, @@ -481,7 +482,7 @@ static int buffer_prepare(struct vb2_buffer *vb) buf, buf->vb.vb2_buf.index, __func__, core->width, core->height, dev->fmt->depth, dev->fmt->fourcc, (unsigned long)buf->risc.dma); - return 0; + return ret; } static void buffer_finish(struct vb2_buffer *vb) diff --git a/drivers/media/pci/tw686x/tw686x-core.c b/drivers/media/pci/tw686x/tw686x-core.c index 74ae4f0dcee7..8a25a0dac4ae 100644 --- a/drivers/media/pci/tw686x/tw686x-core.c +++ b/drivers/media/pci/tw686x/tw686x-core.c @@ -315,13 +315,6 @@ static int tw686x_probe(struct pci_dev *pci_dev, spin_lock_init(&dev->lock); - err = request_irq(pci_dev->irq, tw686x_irq, IRQF_SHARED, - dev->name, dev); - if (err < 0) { - dev_err(&pci_dev->dev, "unable to request interrupt\n"); - goto iounmap; - } - timer_setup(&dev->dma_delay_timer, tw686x_dma_delay, 0); /* @@ -333,18 +326,23 @@ static int tw686x_probe(struct pci_dev *pci_dev, err = tw686x_video_init(dev); if (err) { dev_err(&pci_dev->dev, "can't register video\n"); - goto free_irq; + goto iounmap; } err = tw686x_audio_init(dev); if (err) dev_warn(&pci_dev->dev, "can't register audio\n"); + err = request_irq(pci_dev->irq, tw686x_irq, IRQF_SHARED, + dev->name, dev); + if (err < 0) { + dev_err(&pci_dev->dev, "unable to request interrupt\n"); + goto iounmap; + } + pci_set_drvdata(pci_dev, dev); return 0; -free_irq: - free_irq(pci_dev->irq, dev); iounmap: pci_iounmap(pci_dev, dev->mmio); free_region: diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c index 1ced2b0ddb24..55ed8851256f 100644 --- a/drivers/media/pci/tw686x/tw686x-video.c +++ b/drivers/media/pci/tw686x/tw686x-video.c @@ -1283,8 +1283,10 @@ int tw686x_video_init(struct tw686x_dev *dev) video_set_drvdata(vdev, vc); err = video_register_device(vdev, VFL_TYPE_VIDEO, -1); - if (err < 0) + if (err < 0) { + video_device_release(vdev); goto error; + } vc->num = vdev->num; } diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c index 757a58829a51..9d9124308f6a 100644 --- a/drivers/media/platform/aspeed-video.c +++ b/drivers/media/platform/aspeed-video.c @@ -1723,6 +1723,7 @@ static int aspeed_video_probe(struct platform_device *pdev) rc = aspeed_video_setup_video(video); if (rc) { + aspeed_video_free_buf(video, &video->jpeg); clk_unprepare(video->vclk); clk_unprepare(video->eclk); return rc; @@ -1748,8 +1749,7 @@ static int aspeed_video_remove(struct platform_device *pdev) v4l2_device_unregister(v4l2_dev); - dma_free_coherent(video->dev, VE_JPEG_HEADER_SIZE, video->jpeg.virt, - video->jpeg.dma); + aspeed_video_free_buf(video, &video->jpeg); of_reserved_mem_device_release(dev); diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index 2333079a83c7..14d4830d5db5 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -1318,7 +1318,8 @@ static int coda_enum_frameintervals(struct file *file, void *fh, struct v4l2_frmivalenum *f) { struct coda_ctx *ctx = fh_to_ctx(fh); - int i; + struct coda_q_data *q_data; + const struct coda_codec *codec; if (f->index) return -EINVAL; @@ -1327,12 +1328,19 @@ static int coda_enum_frameintervals(struct file *file, void *fh, if (!ctx->vdoa && f->pixel_format == V4L2_PIX_FMT_YUYV) return -EINVAL; - for (i = 0; i < CODA_MAX_FORMATS; i++) { - if (f->pixel_format == ctx->cvd->src_formats[i] || - f->pixel_format == ctx->cvd->dst_formats[i]) - break; + if (coda_format_normalize_yuv(f->pixel_format) == V4L2_PIX_FMT_YUV420) { + q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); + codec = coda_find_codec(ctx->dev, f->pixel_format, + q_data->fourcc); + } else { + codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_YUV420, + f->pixel_format); } - if (i == CODA_MAX_FORMATS) + if (!codec) + return -EINVAL; + + if (f->width < MIN_W || f->width > codec->max_w || + f->height < MIN_H || f->height > codec->max_h) return -EINVAL; f->type = V4L2_FRMIVAL_TYPE_CONTINUOUS; @@ -2335,8 +2343,8 @@ static void coda_encode_ctrls(struct coda_ctx *ctx) V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET, -12, 12, 1, 0); v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE, - V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0, - V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE); + V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE, 0x0, + V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE); if (ctx->dev->devtype->product == CODA_HX4 || ctx->dev->devtype->product == CODA_7541) { v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, @@ -2350,12 +2358,15 @@ static void coda_encode_ctrls(struct coda_ctx *ctx) if (ctx->dev->devtype->product == CODA_960) { v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL, - V4L2_MPEG_VIDEO_H264_LEVEL_4_0, - ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) | + V4L2_MPEG_VIDEO_H264_LEVEL_4_2, + ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_1_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) | (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) | (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) | (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) | - (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0)), + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) | + (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2)), V4L2_MPEG_VIDEO_H264_LEVEL_4_0); } v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, @@ -2417,7 +2428,7 @@ static void coda_decode_ctrls(struct coda_ctx *ctx) ctx->h264_profile_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, - ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) | + ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) | (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) | (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)), V4L2_MPEG_VIDEO_H264_PROFILE_HIGH); diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c index d26fa5967d82..b52d2203eac5 100644 --- a/drivers/media/platform/exynos4-is/fimc-is.c +++ b/drivers/media/platform/exynos4-is/fimc-is.c @@ -140,7 +140,7 @@ static int fimc_is_enable_clocks(struct fimc_is *is) dev_err(&is->pdev->dev, "clock %s enable failed\n", fimc_is_clocks[i]); for (--i; i >= 0; i--) - clk_disable(is->clocks[i]); + clk_disable_unprepare(is->clocks[i]); return ret; } pr_debug("enabled clock: %s\n", fimc_is_clocks[i]); @@ -213,6 +213,7 @@ static int fimc_is_register_subdevs(struct fimc_is *is) if (ret < 0 || index >= FIMC_IS_SENSORS_NUM) { of_node_put(child); + of_node_put(i2c_bus); return ret; } index++; @@ -830,7 +831,7 @@ static int fimc_is_probe(struct platform_device *pdev) ret = pm_runtime_resume_and_get(dev); if (ret < 0) - goto err_irq; + goto err_pm_disable; vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32)); @@ -864,6 +865,8 @@ err_pm: pm_runtime_put_noidle(dev); if (!pm_runtime_enabled(dev)) fimc_is_runtime_suspend(dev); +err_pm_disable: + pm_runtime_disable(dev); err_irq: free_irq(is->irq, is); err_clk: diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.h b/drivers/media/platform/exynos4-is/fimc-isp-video.h index edcb3a5e3cb9..2dd4ddbc748a 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp-video.h +++ b/drivers/media/platform/exynos4-is/fimc-isp-video.h @@ -32,7 +32,7 @@ static inline int fimc_isp_video_device_register(struct fimc_isp *isp, return 0; } -void fimc_isp_video_device_unregister(struct fimc_isp *isp, +static inline void fimc_isp_video_device_unregister(struct fimc_isp *isp, enum v4l2_buf_type type) { } diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h b/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h index 2cb8cecb3077..b810c96695c8 100644 --- a/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h +++ b/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h @@ -40,12 +40,14 @@ struct mdp_ipi_init { * @ipi_id : IPI_MDP * @ap_inst : AP mtk_mdp_vpu address * @vpu_inst_addr : VPU MDP instance address + * @padding : Alignment padding */ struct mdp_ipi_comm { uint32_t msg_id; uint32_t ipi_id; uint64_t ap_inst; uint32_t vpu_inst_addr; + uint32_t padding; }; /** diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c index a59022adb14c..966b4d9b57a9 100644 --- a/drivers/media/platform/qcom/venus/hfi.c +++ b/drivers/media/platform/qcom/venus/hfi.c @@ -104,6 +104,9 @@ int hfi_core_deinit(struct venus_core *core, bool blocking) mutex_lock(&core->lock); } + if (!core->ops) + goto unlock; + ret = core->ops->core_deinit(core); if (!ret) diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index ea13170a6a2c..de34a87d1130 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -158,6 +158,8 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f) else return NULL; fmt = find_format(inst, pixmp->pixelformat, f->type); + if (!fmt) + return NULL; } pixmp->width = clamp(pixmp->width, frame_width_min(inst), diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c index 6759091b15e0..e3246344fb72 100644 --- a/drivers/media/platform/rockchip/rga/rga.c +++ b/drivers/media/platform/rockchip/rga/rga.c @@ -868,7 +868,7 @@ static int rga_probe(struct platform_device *pdev) ret = pm_runtime_resume_and_get(rga->dev); if (ret < 0) - goto rel_vdev; + goto rel_m2m; rga->version.major = (rga_read(rga, RGA_VERSION_INFO) >> 24) & 0xFF; rga->version.minor = (rga_read(rga, RGA_VERSION_INFO) >> 20) & 0x0F; @@ -884,7 +884,7 @@ static int rga_probe(struct platform_device *pdev) DMA_ATTR_WRITE_COMBINE); if (!rga->cmdbuf_virt) { ret = -ENOMEM; - goto rel_vdev; + goto rel_m2m; } rga->src_mmu_pages = @@ -895,7 +895,7 @@ static int rga_probe(struct platform_device *pdev) } rga->dst_mmu_pages = (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3); - if (rga->dst_mmu_pages) { + if (!rga->dst_mmu_pages) { ret = -ENOMEM; goto free_src_pages; } @@ -921,6 +921,8 @@ free_src_pages: free_dma: dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, rga->cmdbuf_virt, rga->cmdbuf_phy, DMA_ATTR_WRITE_COMBINE); +rel_m2m: + v4l2_m2m_release(rga->m2m_dev); rel_vdev: video_device_release(vfd); unreg_v4l2_dev: diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c index c691b3d81549..5da49a0ab70b 100644 --- a/drivers/media/platform/sti/delta/delta-v4l2.c +++ b/drivers/media/platform/sti/delta/delta-v4l2.c @@ -1862,7 +1862,7 @@ static int delta_probe(struct platform_device *pdev) if (ret) { dev_err(delta->dev, "%s failed to initialize firmware ipc channel\n", DELTA_PREFIX); - goto err; + goto err_pm_disable; } /* register all available decoders */ @@ -1876,7 +1876,7 @@ static int delta_probe(struct platform_device *pdev) if (ret) { dev_err(delta->dev, "%s failed to register V4L2 device\n", DELTA_PREFIX); - goto err; + goto err_pm_disable; } delta->work_queue = create_workqueue(DELTA_NAME); @@ -1901,6 +1901,8 @@ err_work_queue: destroy_workqueue(delta->work_queue); err_v4l2: v4l2_device_unregister(&delta->v4l2_dev); +err_pm_disable: + pm_runtime_disable(dev); err: return ret; } diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c index 85587c1b6a37..75083cb234fe 100644 --- a/drivers/media/platform/vsp1/vsp1_rpf.c +++ b/drivers/media/platform/vsp1/vsp1_rpf.c @@ -291,11 +291,11 @@ static void rpf_configure_partition(struct vsp1_entity *entity, + crop.left * fmtinfo->bpp[0] / 8; if (format->num_planes > 1) { + unsigned int bpl = format->plane_fmt[1].bytesperline; unsigned int offset; - offset = crop.top * format->plane_fmt[1].bytesperline - + crop.left / fmtinfo->hsub - * fmtinfo->bpp[1] / 8; + offset = crop.top / fmtinfo->vsub * bpl + + crop.left / fmtinfo->hsub * fmtinfo->bpp[1] / 8; mem.addr[1] += offset; mem.addr[2] += offset; } diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c index cc2856efea59..f2b0c490187c 100644 --- a/drivers/media/platform/xilinx/xilinx-vipp.c +++ b/drivers/media/platform/xilinx/xilinx-vipp.c @@ -472,7 +472,7 @@ static int xvip_graph_dma_init(struct xvip_composite_device *xdev) { struct device_node *ports; struct device_node *port; - int ret; + int ret = 0; ports = of_get_child_by_name(xdev->dev->of_node, "ports"); if (ports == NULL) { @@ -482,13 +482,14 @@ static int xvip_graph_dma_init(struct xvip_composite_device *xdev) for_each_child_of_node(ports, port) { ret = xvip_graph_dma_init_one(xdev, port); - if (ret < 0) { + if (ret) { of_node_put(port); - return ret; + break; } } - return 0; + of_node_put(ports); + return ret; } static void xvip_graph_cleanup(struct xvip_composite_device *xdev) diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index a7962ca2ac8e..bc9ac6002e25 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -153,6 +153,24 @@ struct imon_context { const struct imon_usb_dev_descr *dev_descr; /* device description with key */ /* table for front panels */ + /* + * Fields for deferring free_imon_context(). + * + * Since reference to "struct imon_context" is stored into + * "struct file"->private_data, we need to remember + * how many file descriptors might access this "struct imon_context". + */ + refcount_t users; + /* + * Use a flag for telling display_open()/vfd_write()/lcd_write() that + * imon_disconnect() was already called. + */ + bool disconnected; + /* + * We need to wait for RCU grace period in order to allow + * display_open() to safely check ->disconnected and increment ->users. + */ + struct rcu_head rcu; }; #define TOUCH_TIMEOUT (HZ/30) @@ -160,18 +178,18 @@ struct imon_context { /* vfd character device file operations */ static const struct file_operations vfd_fops = { .owner = THIS_MODULE, - .open = &display_open, - .write = &vfd_write, - .release = &display_close, + .open = display_open, + .write = vfd_write, + .release = display_close, .llseek = noop_llseek, }; /* lcd character device file operations */ static const struct file_operations lcd_fops = { .owner = THIS_MODULE, - .open = &display_open, - .write = &lcd_write, - .release = &display_close, + .open = display_open, + .write = lcd_write, + .release = display_close, .llseek = noop_llseek, }; @@ -439,9 +457,6 @@ static struct usb_driver imon_driver = { .id_table = imon_usb_id_table, }; -/* to prevent races between open() and disconnect(), probing, etc */ -static DEFINE_MUTEX(driver_lock); - /* Module bookkeeping bits */ MODULE_AUTHOR(MOD_AUTHOR); MODULE_DESCRIPTION(MOD_DESC); @@ -481,9 +496,11 @@ static void free_imon_context(struct imon_context *ictx) struct device *dev = ictx->dev; usb_free_urb(ictx->tx_urb); + WARN_ON(ictx->dev_present_intf0); usb_free_urb(ictx->rx_urb_intf0); + WARN_ON(ictx->dev_present_intf1); usb_free_urb(ictx->rx_urb_intf1); - kfree(ictx); + kfree_rcu(ictx, rcu); dev_dbg(dev, "%s: iMON context freed\n", __func__); } @@ -499,9 +516,6 @@ static int display_open(struct inode *inode, struct file *file) int subminor; int retval = 0; - /* prevent races with disconnect */ - mutex_lock(&driver_lock); - subminor = iminor(inode); interface = usb_find_interface(&imon_driver, subminor); if (!interface) { @@ -509,13 +523,16 @@ static int display_open(struct inode *inode, struct file *file) retval = -ENODEV; goto exit; } - ictx = usb_get_intfdata(interface); - if (!ictx) { + rcu_read_lock(); + ictx = usb_get_intfdata(interface); + if (!ictx || ictx->disconnected || !refcount_inc_not_zero(&ictx->users)) { + rcu_read_unlock(); pr_err("no context found for minor %d\n", subminor); retval = -ENODEV; goto exit; } + rcu_read_unlock(); mutex_lock(&ictx->lock); @@ -533,8 +550,10 @@ static int display_open(struct inode *inode, struct file *file) mutex_unlock(&ictx->lock); + if (retval && refcount_dec_and_test(&ictx->users)) + free_imon_context(ictx); + exit: - mutex_unlock(&driver_lock); return retval; } @@ -544,16 +563,9 @@ exit: */ static int display_close(struct inode *inode, struct file *file) { - struct imon_context *ictx = NULL; + struct imon_context *ictx = file->private_data; int retval = 0; - ictx = file->private_data; - - if (!ictx) { - pr_err("no context for device\n"); - return -ENODEV; - } - mutex_lock(&ictx->lock); if (!ictx->display_supported) { @@ -568,6 +580,8 @@ static int display_close(struct inode *inode, struct file *file) } mutex_unlock(&ictx->lock); + if (refcount_dec_and_test(&ictx->users)) + free_imon_context(ictx); return retval; } @@ -937,15 +951,12 @@ static ssize_t vfd_write(struct file *file, const char __user *buf, int offset; int seq; int retval = 0; - struct imon_context *ictx; + struct imon_context *ictx = file->private_data; static const unsigned char vfd_packet6[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF }; - ictx = file->private_data; - if (!ictx) { - pr_err_ratelimited("no context for device\n"); + if (ictx->disconnected) return -ENODEV; - } mutex_lock(&ictx->lock); @@ -1021,13 +1032,10 @@ static ssize_t lcd_write(struct file *file, const char __user *buf, size_t n_bytes, loff_t *pos) { int retval = 0; - struct imon_context *ictx; + struct imon_context *ictx = file->private_data; - ictx = file->private_data; - if (!ictx) { - pr_err_ratelimited("no context for device\n"); + if (ictx->disconnected) return -ENODEV; - } mutex_lock(&ictx->lock); @@ -2405,7 +2413,6 @@ static int imon_probe(struct usb_interface *interface, int ifnum, sysfs_err; int ret = 0; struct imon_context *ictx = NULL; - struct imon_context *first_if_ctx = NULL; u16 vendor, product; usbdev = usb_get_dev(interface_to_usbdev(interface)); @@ -2417,17 +2424,12 @@ static int imon_probe(struct usb_interface *interface, dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n", __func__, vendor, product, ifnum); - /* prevent races probing devices w/multiple interfaces */ - mutex_lock(&driver_lock); - first_if = usb_ifnum_to_if(usbdev, 0); if (!first_if) { ret = -ENODEV; goto fail; } - first_if_ctx = usb_get_intfdata(first_if); - if (ifnum == 0) { ictx = imon_init_intf0(interface, id); if (!ictx) { @@ -2435,9 +2437,11 @@ static int imon_probe(struct usb_interface *interface, ret = -ENODEV; goto fail; } + refcount_set(&ictx->users, 1); } else { /* this is the secondary interface on the device */ + struct imon_context *first_if_ctx = usb_get_intfdata(first_if); /* fail early if first intf failed to register */ if (!first_if_ctx) { @@ -2451,14 +2455,13 @@ static int imon_probe(struct usb_interface *interface, ret = -ENODEV; goto fail; } + refcount_inc(&ictx->users); } usb_set_intfdata(interface, ictx); if (ifnum == 0) { - mutex_lock(&ictx->lock); - if (product == 0xffdc && ictx->rf_device) { sysfs_err = sysfs_create_group(&interface->dev.kobj, &imon_rf_attr_group); @@ -2469,21 +2472,17 @@ static int imon_probe(struct usb_interface *interface, if (ictx->display_supported) imon_init_display(ictx, interface); - - mutex_unlock(&ictx->lock); } dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n", vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum); - mutex_unlock(&driver_lock); usb_put_dev(usbdev); return 0; fail: - mutex_unlock(&driver_lock); usb_put_dev(usbdev); dev_err(dev, "unable to register, err %d\n", ret); @@ -2499,10 +2498,8 @@ static void imon_disconnect(struct usb_interface *interface) struct device *dev; int ifnum; - /* prevent races with multi-interface device probing and display_open */ - mutex_lock(&driver_lock); - ictx = usb_get_intfdata(interface); + ictx->disconnected = true; dev = ictx->dev; ifnum = interface->cur_altsetting->desc.bInterfaceNumber; @@ -2543,11 +2540,9 @@ static void imon_disconnect(struct usb_interface *interface) } } - if (!ictx->dev_present_intf0 && !ictx->dev_present_intf1) + if (refcount_dec_and_test(&ictx->users)) free_imon_context(ictx); - mutex_unlock(&driver_lock); - dev_dbg(dev, "%s: iMON device (intf%d) disconnected\n", __func__, ifnum); } diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index dbb5a4f44bda..142319c48405 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -1077,7 +1077,7 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout) struct mceusb_dev *ir = dev->priv; unsigned int units; - units = DIV_ROUND_CLOSEST(timeout, MCE_TIME_UNIT); + units = DIV_ROUND_UP(timeout, MCE_TIME_UNIT); cmdbuf[2] = units >> 8; cmdbuf[3] = units; @@ -1416,42 +1416,37 @@ static void mceusb_gen1_init(struct mceusb_dev *ir) { int ret; struct device *dev = ir->dev; - char *data; - - data = kzalloc(USB_CTRL_MSG_SZ, GFP_KERNEL); - if (!data) { - dev_err(dev, "%s: memory allocation failed!", __func__); - return; - } + char data[USB_CTRL_MSG_SZ]; /* * This is a strange one. Windows issues a set address to the device * on the receive control pipe and expect a certain value pair back */ - ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), - USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0, - data, USB_CTRL_MSG_SZ, 3000); + ret = usb_control_msg_recv(ir->usbdev, 0, USB_REQ_SET_ADDRESS, + USB_DIR_IN | USB_TYPE_VENDOR, + 0, 0, data, USB_CTRL_MSG_SZ, 3000, + GFP_KERNEL); dev_dbg(dev, "set address - ret = %d", ret); dev_dbg(dev, "set address - data[0] = %d, data[1] = %d", data[0], data[1]); /* set feature: bit rate 38400 bps */ - ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), - USB_REQ_SET_FEATURE, USB_TYPE_VENDOR, - 0xc04e, 0x0000, NULL, 0, 3000); + ret = usb_control_msg_send(ir->usbdev, 0, + USB_REQ_SET_FEATURE, USB_TYPE_VENDOR, + 0xc04e, 0x0000, NULL, 0, 3000, GFP_KERNEL); dev_dbg(dev, "set feature - ret = %d", ret); /* bRequest 4: set char length to 8 bits */ - ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), - 4, USB_TYPE_VENDOR, - 0x0808, 0x0000, NULL, 0, 3000); + ret = usb_control_msg_send(ir->usbdev, 0, + 4, USB_TYPE_VENDOR, + 0x0808, 0x0000, NULL, 0, 3000, GFP_KERNEL); dev_dbg(dev, "set char length - retB = %d", ret); /* bRequest 2: set handshaking to use DTR/DSR */ - ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), - 2, USB_TYPE_VENDOR, - 0x0000, 0x0100, NULL, 0, 3000); + ret = usb_control_msg_send(ir->usbdev, 0, + 2, USB_TYPE_VENDOR, + 0x0000, 0x0100, NULL, 0, 3000, GFP_KERNEL); dev_dbg(dev, "set handshake - retC = %d", ret); /* device resume */ @@ -1459,8 +1454,6 @@ static void mceusb_gen1_init(struct mceusb_dev *ir) /* get hw/sw revision? */ mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION)); - - kfree(data); } static void mceusb_gen2_init(struct mceusb_dev *ir) diff --git a/drivers/media/test-drivers/vim2m.c b/drivers/media/test-drivers/vim2m.c index a776bb8e0e09..a24624353f9e 100644 --- a/drivers/media/test-drivers/vim2m.c +++ b/drivers/media/test-drivers/vim2m.c @@ -1325,12 +1325,6 @@ static int vim2m_probe(struct platform_device *pdev) vfd->lock = &dev->dev_mutex; vfd->v4l2_dev = &dev->v4l2_dev; - ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); - if (ret) { - v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); - goto error_v4l2; - } - video_set_drvdata(vfd, dev); v4l2_info(&dev->v4l2_dev, "Device registered as /dev/video%d\n", vfd->num); @@ -1353,12 +1347,20 @@ static int vim2m_probe(struct platform_device *pdev) media_device_init(&dev->mdev); dev->mdev.ops = &m2m_media_ops; dev->v4l2_dev.mdev = &dev->mdev; +#endif + ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); + if (ret) { + v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); + goto error_m2m; + } + +#ifdef CONFIG_MEDIA_CONTROLLER ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd, MEDIA_ENT_F_PROC_VIDEO_SCALER); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n"); - goto error_dev; + goto error_v4l2; } ret = media_device_register(&dev->mdev); @@ -1373,11 +1375,13 @@ static int vim2m_probe(struct platform_device *pdev) error_m2m_mc: v4l2_m2m_unregister_media_controller(dev->m2m_dev); #endif -error_dev: +error_v4l2: video_unregister_device(&dev->vfd); /* vim2m_device_release called by video_unregister_device to release various objects */ return ret; -error_v4l2: +error_m2m: + v4l2_m2m_release(dev->m2m_dev); +error_dev: v4l2_device_unregister(&dev->v4l2_dev); error_free: kfree(dev); diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c index 1e356dc65d31..761d2abd4006 100644 --- a/drivers/media/test-drivers/vivid/vivid-core.c +++ b/drivers/media/test-drivers/vivid/vivid-core.c @@ -330,6 +330,28 @@ static int vidioc_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a return vivid_vid_out_g_fbuf(file, fh, a); } +/* + * Only support the framebuffer of one of the vivid instances. + * Anything else is rejected. + */ +bool vivid_validate_fb(const struct v4l2_framebuffer *a) +{ + struct vivid_dev *dev; + int i; + + for (i = 0; i < n_devs; i++) { + dev = vivid_devs[i]; + if (!dev || !dev->video_pbase) + continue; + if ((unsigned long)a->base == dev->video_pbase && + a->fmt.width <= dev->display_width && + a->fmt.height <= dev->display_height && + a->fmt.bytesperline <= dev->display_byte_stride) + return true; + } + return false; +} + static int vidioc_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a) { struct video_device *vdev = video_devdata(file); @@ -850,8 +872,12 @@ static int vivid_detect_feature_set(struct vivid_dev *dev, int inst, /* how many inputs do we have and of what type? */ dev->num_inputs = num_inputs[inst]; - if (dev->num_inputs < 1) - dev->num_inputs = 1; + if (node_type & 0x20007) { + if (dev->num_inputs < 1) + dev->num_inputs = 1; + } else { + dev->num_inputs = 0; + } if (dev->num_inputs >= MAX_INPUTS) dev->num_inputs = MAX_INPUTS; for (i = 0; i < dev->num_inputs; i++) { @@ -868,8 +894,12 @@ static int vivid_detect_feature_set(struct vivid_dev *dev, int inst, /* how many outputs do we have and of what type? */ dev->num_outputs = num_outputs[inst]; - if (dev->num_outputs < 1) - dev->num_outputs = 1; + if (node_type & 0x40300) { + if (dev->num_outputs < 1) + dev->num_outputs = 1; + } else { + dev->num_outputs = 0; + } if (dev->num_outputs >= MAX_OUTPUTS) dev->num_outputs = MAX_OUTPUTS; for (i = 0; i < dev->num_outputs; i++) { diff --git a/drivers/media/test-drivers/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h index 99e69b8f770f..6aa32c8e6fb5 100644 --- a/drivers/media/test-drivers/vivid/vivid-core.h +++ b/drivers/media/test-drivers/vivid/vivid-core.h @@ -609,4 +609,6 @@ static inline bool vivid_is_hdmi_out(const struct vivid_dev *dev) return dev->output_type[dev->output] == HDMI; } +bool vivid_validate_fb(const struct v4l2_framebuffer *a); + #endif diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c index eadf28ab1e39..d493bd17481b 100644 --- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c +++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c @@ -452,6 +452,12 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); dev->crop_cap = dev->src_rect; dev->crop_bounds_cap = dev->src_rect; + if (dev->bitmap_cap && + (dev->compose_cap.width != dev->crop_cap.width || + dev->compose_cap.height != dev->crop_cap.height)) { + vfree(dev->bitmap_cap); + dev->bitmap_cap = NULL; + } dev->compose_cap = dev->crop_cap; if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap)) dev->compose_cap.height /= 2; @@ -909,6 +915,8 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection struct vivid_dev *dev = video_drvdata(file); struct v4l2_rect *crop = &dev->crop_cap; struct v4l2_rect *compose = &dev->compose_cap; + unsigned orig_compose_w = compose->width; + unsigned orig_compose_h = compose->height; unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1; int ret; @@ -1025,17 +1033,17 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection s->r.height /= factor; } v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect); - if (dev->bitmap_cap && (compose->width != s->r.width || - compose->height != s->r.height)) { - vfree(dev->bitmap_cap); - dev->bitmap_cap = NULL; - } *compose = s->r; break; default: return -EINVAL; } + if (dev->bitmap_cap && (compose->width != orig_compose_w || + compose->height != orig_compose_h)) { + vfree(dev->bitmap_cap); + dev->bitmap_cap = NULL; + } tpg_s_crop_compose(&dev->tpg, crop, compose); return 0; } @@ -1276,7 +1284,14 @@ int vivid_vid_cap_s_fbuf(struct file *file, void *fh, return -EINVAL; if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8) return -EINVAL; - if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage) + if (a->fmt.bytesperline > a->fmt.sizeimage / a->fmt.height) + return -EINVAL; + + /* + * Only support the framebuffer of one of the vivid instances. + * Anything else is rejected. + */ + if (!vivid_validate_fb(a)) return -EINVAL; dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base); diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index a2563c254080..2299d5cca8ff 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -512,7 +512,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb) if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1) return -ENODEV; - if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc)) + if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc)) return -ENODEV; switch (fc_usb->udev->speed) { diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c index 60e57e0f1927..fd7d2a9d0449 100644 --- a/drivers/media/usb/hdpvr/hdpvr-video.c +++ b/drivers/media/usb/hdpvr/hdpvr-video.c @@ -409,7 +409,7 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count, struct hdpvr_device *dev = video_drvdata(file); struct hdpvr_buffer *buf = NULL; struct urb *urb; - unsigned int ret = 0; + int ret = 0; int rem, cnt; if (*pos) diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c index 3915d551d59e..d22ce328a279 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c @@ -2569,6 +2569,11 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf, } while (0); mutex_unlock(&pvr2_unit_mtx); + INIT_WORK(&hdw->workpoll, pvr2_hdw_worker_poll); + + if (hdw->unit_number == -1) + goto fail; + cnt1 = 0; cnt2 = scnprintf(hdw->name+cnt1,sizeof(hdw->name)-cnt1,"pvrusb2"); cnt1 += cnt2; @@ -2580,8 +2585,6 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf, if (cnt1 >= sizeof(hdw->name)) cnt1 = sizeof(hdw->name)-1; hdw->name[cnt1] = 0; - INIT_WORK(&hdw->workpoll,pvr2_hdw_worker_poll); - pvr2_trace(PVR2_TRACE_INIT,"Driver unit number is %d, name is %s", hdw->unit_number,hdw->name); @@ -2607,6 +2610,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf, del_timer_sync(&hdw->encoder_run_timer); del_timer_sync(&hdw->encoder_wait_timer); flush_work(&hdw->workpoll); + v4l2_device_unregister(&hdw->v4l2_dev); usb_free_urb(hdw->ctl_read_urb); usb_free_urb(hdw->ctl_write_urb); kfree(hdw->ctl_read_buffer); diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index 753b8a99e08f..b40a2b904ace 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c @@ -863,29 +863,31 @@ static int uvc_ioctl_enum_input(struct file *file, void *fh, struct uvc_video_chain *chain = handle->chain; const struct uvc_entity *selector = chain->selector; struct uvc_entity *iterm = NULL; + struct uvc_entity *it; u32 index = input->index; - int pin = 0; if (selector == NULL || (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) { if (index != 0) return -EINVAL; - list_for_each_entry(iterm, &chain->entities, chain) { - if (UVC_ENTITY_IS_ITERM(iterm)) + list_for_each_entry(it, &chain->entities, chain) { + if (UVC_ENTITY_IS_ITERM(it)) { + iterm = it; break; + } } - pin = iterm->id; } else if (index < selector->bNrInPins) { - pin = selector->baSourceID[index]; - list_for_each_entry(iterm, &chain->entities, chain) { - if (!UVC_ENTITY_IS_ITERM(iterm)) + list_for_each_entry(it, &chain->entities, chain) { + if (!UVC_ENTITY_IS_ITERM(it)) continue; - if (iterm->id == pin) + if (it->id == selector->baSourceID[index]) { + iterm = it; break; + } } } - if (iterm == NULL || iterm->id != pin) + if (iterm == NULL) return -EINVAL; memset(input, 0, sizeof(*input)); diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c index af48705c704f..942d0005c55e 100644 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -145,6 +145,8 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t, const struct v4l2_bt_timings *bt = &t->bt; const struct v4l2_bt_timings_cap *cap = &dvcap->bt; u32 caps = cap->capabilities; + const u32 max_vert = 10240; + u32 max_hor = 3 * bt->width; if (t->type != V4L2_DV_BT_656_1120) return false; @@ -161,6 +163,26 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t, (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) || (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE))) return false; + + /* sanity checks for the blanking timings */ + if (!bt->interlaced && + (bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch)) + return false; + /* + * Some video receivers cannot properly separate the frontporch, + * backporch and sync values, and instead they only have the total + * blanking. That can be assigned to any of these three fields. + * So just check that none of these are way out of range. + */ + if (bt->hfrontporch > max_hor || + bt->hsync > max_hor || bt->hbackporch > max_hor) + return false; + if (bt->vfrontporch > max_vert || + bt->vsync > max_vert || bt->vbackporch > max_vert) + return false; + if (bt->interlaced && (bt->il_vfrontporch > max_vert || + bt->il_vsync > max_vert || bt->il_vbackporch > max_vert)) + return false; return fnc == NULL || fnc(t, fnc_handle); } EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings); diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index 73190652c267..ad14d5214106 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -927,7 +927,7 @@ static __poll_t v4l2_m2m_poll_for_data(struct file *file, if ((!src_q->streaming || src_q->error || list_empty(&src_q->queued_list)) && (!dst_q->streaming || dst_q->error || - list_empty(&dst_q->queued_list))) + (list_empty(&dst_q->queued_list) && !dst_q->last_buffer_dequeued))) return EPOLLERR; spin_lock_irqsave(&src_q->done_lock, flags); diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c index c267283b01fd..e749dcb3ddea 100644 --- a/drivers/memory/atmel-ebi.c +++ b/drivers/memory/atmel-ebi.c @@ -544,20 +544,27 @@ static int atmel_ebi_probe(struct platform_device *pdev) smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0); ebi->smc.regmap = syscon_node_to_regmap(smc_np); - if (IS_ERR(ebi->smc.regmap)) - return PTR_ERR(ebi->smc.regmap); + if (IS_ERR(ebi->smc.regmap)) { + ret = PTR_ERR(ebi->smc.regmap); + goto put_node; + } ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np); - if (IS_ERR(ebi->smc.layout)) - return PTR_ERR(ebi->smc.layout); + if (IS_ERR(ebi->smc.layout)) { + ret = PTR_ERR(ebi->smc.layout); + goto put_node; + } ebi->smc.clk = of_clk_get(smc_np, 0); if (IS_ERR(ebi->smc.clk)) { - if (PTR_ERR(ebi->smc.clk) != -ENOENT) - return PTR_ERR(ebi->smc.clk); + if (PTR_ERR(ebi->smc.clk) != -ENOENT) { + ret = PTR_ERR(ebi->smc.clk); + goto put_node; + } ebi->smc.clk = NULL; } + of_node_put(smc_np); ret = clk_prepare_enable(ebi->smc.clk); if (ret) return ret; @@ -608,6 +615,10 @@ static int atmel_ebi_probe(struct platform_device *pdev) } return of_platform_populate(np, NULL, NULL, dev); + +put_node: + of_node_put(smc_np); + return ret; } static __maybe_unused int atmel_ebi_resume(struct device *dev) diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c index d9f5437d3bce..1791614f324b 100644 --- a/drivers/memory/of_memory.c +++ b/drivers/memory/of_memory.c @@ -134,6 +134,7 @@ const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr, for_each_child_of_node(np_ddr, np_tim) { if (of_device_is_compatible(np_tim, tim_compat)) { if (of_do_get_timings(np_tim, &timings[i])) { + of_node_put(np_tim); devm_kfree(dev, timings); goto default_timings; } @@ -282,6 +283,7 @@ const struct lpddr3_timings if (of_device_is_compatible(np_tim, tim_compat)) { if (of_lpddr3_do_get_timings(np_tim, &timings[i])) { devm_kfree(dev, timings); + of_node_put(np_tim); goto default_timings; } i++; diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c index b0b251bb207f..1a6964f1ba6a 100644 --- a/drivers/memory/pl353-smc.c +++ b/drivers/memory/pl353-smc.c @@ -416,6 +416,7 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id) if (init) init(adev, child); of_platform_device_create(child, NULL, &adev->dev); + of_node_put(child); return 0; diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c index 9019121a80f5..1dfb81dea961 100644 --- a/drivers/memory/renesas-rpc-if.c +++ b/drivers/memory/renesas-rpc-if.c @@ -163,25 +163,39 @@ static const struct regmap_access_table rpcif_volatile_table = { /* - * Custom accessor functions to ensure SMRDR0 and SMWDR0 are always accessed - * with proper width. Requires SMENR_SPIDE to be correctly set before! + * Custom accessor functions to ensure SM[RW]DR[01] are always accessed with + * proper width. Requires rpcif.xfer_size to be correctly set before! */ static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val) { struct rpcif *rpc = context; - if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) { - u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF); - - if (spide == 0x8) { + switch (reg) { + case RPCIF_SMRDR0: + case RPCIF_SMWDR0: + switch (rpc->xfer_size) { + case 1: *val = readb(rpc->base + reg); return 0; - } else if (spide == 0xC) { + + case 2: *val = readw(rpc->base + reg); return 0; - } else if (spide != 0xF) { + + case 4: + case 8: + *val = readl(rpc->base + reg); + return 0; + + default: return -EILSEQ; } + + case RPCIF_SMRDR1: + case RPCIF_SMWDR1: + if (rpc->xfer_size != 8) + return -EILSEQ; + break; } *val = readl(rpc->base + reg); @@ -193,18 +207,34 @@ static int rpcif_reg_write(void *context, unsigned int reg, unsigned int val) { struct rpcif *rpc = context; - if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) { - u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF); - - if (spide == 0x8) { + switch (reg) { + case RPCIF_SMWDR0: + switch (rpc->xfer_size) { + case 1: writeb(val, rpc->base + reg); return 0; - } else if (spide == 0xC) { + + case 2: writew(val, rpc->base + reg); return 0; - } else if (spide != 0xF) { + + case 4: + case 8: + writel(val, rpc->base + reg); + return 0; + + default: return -EILSEQ; } + + case RPCIF_SMWDR1: + if (rpc->xfer_size != 8) + return -EILSEQ; + break; + + case RPCIF_SMRDR0: + case RPCIF_SMRDR1: + return -EPERM; } writel(val, rpc->base + reg); @@ -455,6 +485,7 @@ int rpcif_manual_xfer(struct rpcif *rpc) smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes)); regmap_write(rpc->regmap, RPCIF_SMENR, smenr); + rpc->xfer_size = nbytes; memcpy(data, rpc->buffer + pos, nbytes); if (nbytes == 8) { @@ -519,6 +550,7 @@ int rpcif_manual_xfer(struct rpcif *rpc) regmap_write(rpc->regmap, RPCIF_SMENR, smenr); regmap_write(rpc->regmap, RPCIF_SMCR, rpc->smcr | RPCIF_SMCR_SPIE); + rpc->xfer_size = nbytes; ret = wait_msg_xfer_end(rpc); if (ret) goto err_out; @@ -592,6 +624,7 @@ static int rpcif_probe(struct platform_device *pdev) struct platform_device *vdev; struct device_node *flash; const char *name; + int ret; flash = of_get_next_child(pdev->dev.of_node, NULL); if (!flash) { @@ -615,7 +648,14 @@ static int rpcif_probe(struct platform_device *pdev) return -ENOMEM; vdev->dev.parent = &pdev->dev; platform_set_drvdata(pdev, vdev); - return platform_device_add(vdev); + + ret = platform_device_add(vdev); + if (ret) { + platform_device_put(vdev); + return ret; + } + + return 0; } static int rpcif_remove(struct platform_device *pdev) diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c index 3d230f07eaf2..5cec30d2dc7d 100644 --- a/drivers/memory/samsung/exynos5422-dmc.c +++ b/drivers/memory/samsung/exynos5422-dmc.c @@ -1192,33 +1192,39 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc) dmc->timing_row = devm_kmalloc_array(dmc->dev, TIMING_COUNT, sizeof(u32), GFP_KERNEL); - if (!dmc->timing_row) - return -ENOMEM; + if (!dmc->timing_row) { + ret = -ENOMEM; + goto put_node; + } dmc->timing_data = devm_kmalloc_array(dmc->dev, TIMING_COUNT, sizeof(u32), GFP_KERNEL); - if (!dmc->timing_data) - return -ENOMEM; + if (!dmc->timing_data) { + ret = -ENOMEM; + goto put_node; + } dmc->timing_power = devm_kmalloc_array(dmc->dev, TIMING_COUNT, sizeof(u32), GFP_KERNEL); - if (!dmc->timing_power) - return -ENOMEM; + if (!dmc->timing_power) { + ret = -ENOMEM; + goto put_node; + } dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dmc->dev, DDR_TYPE_LPDDR3, &dmc->timings_arr_size); if (!dmc->timings) { - of_node_put(np_ddr); dev_warn(dmc->dev, "could not get timings from DT\n"); - return -EINVAL; + ret = -EINVAL; + goto put_node; } dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dmc->dev); if (!dmc->min_tck) { - of_node_put(np_ddr); dev_warn(dmc->dev, "could not get tck from DT\n"); - return -EINVAL; + ret = -EINVAL; + goto put_node; } /* Sorted array of OPPs with frequency ascending */ @@ -1232,13 +1238,14 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc) clk_period_ps); } - of_node_put(np_ddr); /* Take the highest frequency's timings as 'bypass' */ dmc->bypass_timing_row = dmc->timing_row[idx - 1]; dmc->bypass_timing_data = dmc->timing_data[idx - 1]; dmc->bypass_timing_power = dmc->timing_power[idx - 1]; +put_node: + of_node_put(np_ddr); return ret; } @@ -1327,7 +1334,6 @@ static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc) */ static int exynos5_performance_counters_init(struct exynos5_dmc *dmc) { - int counters_size; int ret, i; dmc->num_counters = devfreq_event_get_edev_count(dmc->dev, @@ -1337,8 +1343,8 @@ static int exynos5_performance_counters_init(struct exynos5_dmc *dmc) return dmc->num_counters; } - counters_size = sizeof(struct devfreq_event_dev) * dmc->num_counters; - dmc->counter = devm_kzalloc(dmc->dev, counters_size, GFP_KERNEL); + dmc->counter = devm_kcalloc(dmc->dev, dmc->num_counters, + sizeof(*dmc->counter), GFP_KERNEL); if (!dmc->counter) return -ENOMEM; diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c index bc1f484f50f1..6df98c0e5622 100644 --- a/drivers/memstick/core/ms_block.c +++ b/drivers/memstick/core/ms_block.c @@ -1335,17 +1335,17 @@ static int msb_ftl_initialize(struct msb_data *msb) msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE; msb->logical_block_count = msb->zone_count * 496 - 2; - msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL); - msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL); + msb->used_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL); + msb->erased_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL); msb->lba_to_pba_table = kmalloc_array(msb->logical_block_count, sizeof(u16), GFP_KERNEL); if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table || !msb->erased_blocks_bitmap) { - kfree(msb->used_blocks_bitmap); + bitmap_free(msb->used_blocks_bitmap); + bitmap_free(msb->erased_blocks_bitmap); kfree(msb->lba_to_pba_table); - kfree(msb->erased_blocks_bitmap); return -ENOMEM; } @@ -1953,7 +1953,8 @@ static int msb_bd_open(struct block_device *bdev, fmode_t mode) static void msb_data_clear(struct msb_data *msb) { kfree(msb->boot_page); - kfree(msb->used_blocks_bitmap); + bitmap_free(msb->used_blocks_bitmap); + bitmap_free(msb->erased_blocks_bitmap); kfree(msb->lba_to_pba_table); kfree(msb->cache); msb->card = NULL; diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c index e5c8bc998eb4..965820481f1e 100644 --- a/drivers/mfd/davinci_voicecodec.c +++ b/drivers/mfd/davinci_voicecodec.c @@ -46,14 +46,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev) } clk_enable(davinci_vc->clk); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - fifo_base = (dma_addr_t)res->start; - davinci_vc->base = devm_ioremap_resource(&pdev->dev, res); + davinci_vc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(davinci_vc->base)) { ret = PTR_ERR(davinci_vc->base); goto fail; } + fifo_base = (dma_addr_t)res->start; davinci_vc->regmap = devm_regmap_init_mmio(&pdev->dev, davinci_vc->base, diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c index a016b39fe9b0..5f1f6f3a0696 100644 --- a/drivers/mfd/fsl-imx25-tsadc.c +++ b/drivers/mfd/fsl-imx25-tsadc.c @@ -69,7 +69,7 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev, int irq; irq = platform_get_irq(pdev, 0); - if (irq <= 0) + if (irq < 0) return irq; tsadc->domain = irq_domain_add_simple(np, 2, 0, &mx25_tsadc_domain_ops, @@ -84,6 +84,19 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev, return 0; } +static int mx25_tsadc_unset_irq(struct platform_device *pdev) +{ + struct mx25_tsadc *tsadc = platform_get_drvdata(pdev); + int irq = platform_get_irq(pdev, 0); + + if (irq >= 0) { + irq_set_chained_handler_and_data(irq, NULL, NULL); + irq_domain_remove(tsadc->domain); + } + + return 0; +} + static void mx25_tsadc_setup_clk(struct platform_device *pdev, struct mx25_tsadc *tsadc) { @@ -171,18 +184,21 @@ static int mx25_tsadc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, tsadc); - return devm_of_platform_populate(dev); + ret = devm_of_platform_populate(dev); + if (ret) + goto err_irq; + + return 0; + +err_irq: + mx25_tsadc_unset_irq(pdev); + + return ret; } static int mx25_tsadc_remove(struct platform_device *pdev) { - struct mx25_tsadc *tsadc = platform_get_drvdata(pdev); - int irq = platform_get_irq(pdev, 0); - - if (irq) { - irq_set_chained_handler_and_data(irq, NULL, NULL); - irq_domain_remove(tsadc->domain); - } + mx25_tsadc_unset_irq(pdev); return 0; } diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c index ddd64f9e3341..926653e1f603 100644 --- a/drivers/mfd/intel_soc_pmic_core.c +++ b/drivers/mfd/intel_soc_pmic_core.c @@ -95,6 +95,7 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c, return 0; err_del_irq_chip: + pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup)); regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data); return ret; } diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c index e92eeeb67a98..4cd5ecc72211 100644 --- a/drivers/mfd/ipaq-micro.c +++ b/drivers/mfd/ipaq-micro.c @@ -403,7 +403,7 @@ static int __init micro_probe(struct platform_device *pdev) micro_reset_comm(micro); irq = platform_get_irq(pdev, 0); - if (!irq) + if (irq < 0) return -EINVAL; ret = devm_request_irq(&pdev->dev, irq, micro_serial_isr, IRQF_SHARED, "ipaq-micro", diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c index 348439a3fbbd..39006297f3d2 100644 --- a/drivers/mfd/lp8788-irq.c +++ b/drivers/mfd/lp8788-irq.c @@ -175,6 +175,7 @@ int lp8788_irq_init(struct lp8788 *lp, int irq) IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "lp8788-irq", irqd); if (ret) { + irq_domain_remove(lp->irqdm); dev_err(lp->dev, "failed to create a thread for IRQ_N\n"); return ret; } @@ -188,4 +189,6 @@ void lp8788_irq_exit(struct lp8788 *lp) { if (lp->irq) free_irq(lp->irq, lp->irqdm); + if (lp->irqdm) + irq_domain_remove(lp->irqdm); } diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c index 768d556b3fe9..5c3d642c8e3a 100644 --- a/drivers/mfd/lp8788.c +++ b/drivers/mfd/lp8788.c @@ -195,8 +195,16 @@ static int lp8788_probe(struct i2c_client *cl, const struct i2c_device_id *id) if (ret) return ret; - return mfd_add_devices(lp->dev, -1, lp8788_devs, - ARRAY_SIZE(lp8788_devs), NULL, 0, NULL); + ret = mfd_add_devices(lp->dev, -1, lp8788_devs, + ARRAY_SIZE(lp8788_devs), NULL, 0, NULL); + if (ret) + goto err_exit_irq; + + return 0; + +err_exit_irq: + lp8788_irq_exit(lp); + return ret; } static int lp8788_remove(struct i2c_client *cl) diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c index 3bbb29a7e7a5..2411b7a2e6f4 100644 --- a/drivers/mfd/lpc_ich.c +++ b/drivers/mfd/lpc_ich.c @@ -63,6 +63,8 @@ #define SPIBASE_BYT 0x54 #define SPIBASE_BYT_SZ 512 #define SPIBASE_BYT_EN BIT(1) +#define BYT_BCR 0xfc +#define BYT_BCR_WPD BIT(0) #define SPIBASE_LPT 0x3800 #define SPIBASE_LPT_SZ 512 @@ -1083,12 +1085,57 @@ wdt_done: return ret; } +static bool lpc_ich_byt_set_writeable(void __iomem *base, void *data) +{ + u32 val; + + val = readl(base + BYT_BCR); + if (!(val & BYT_BCR_WPD)) { + val |= BYT_BCR_WPD; + writel(val, base + BYT_BCR); + val = readl(base + BYT_BCR); + } + + return val & BYT_BCR_WPD; +} + +static bool lpc_ich_lpt_set_writeable(void __iomem *base, void *data) +{ + struct pci_dev *pdev = data; + u32 bcr; + + pci_read_config_dword(pdev, BCR, &bcr); + if (!(bcr & BCR_WPD)) { + bcr |= BCR_WPD; + pci_write_config_dword(pdev, BCR, bcr); + pci_read_config_dword(pdev, BCR, &bcr); + } + + return bcr & BCR_WPD; +} + +static bool lpc_ich_bxt_set_writeable(void __iomem *base, void *data) +{ + unsigned int spi = PCI_DEVFN(13, 2); + struct pci_bus *bus = data; + u32 bcr; + + pci_bus_read_config_dword(bus, spi, BCR, &bcr); + if (!(bcr & BCR_WPD)) { + bcr |= BCR_WPD; + pci_bus_write_config_dword(bus, spi, BCR, bcr); + pci_bus_read_config_dword(bus, spi, BCR, &bcr); + } + + return bcr & BCR_WPD; +} + static int lpc_ich_init_spi(struct pci_dev *dev) { struct lpc_ich_priv *priv = pci_get_drvdata(dev); struct resource *res = &intel_spi_res[0]; struct intel_spi_boardinfo *info; - u32 spi_base, rcba, bcr; + u32 spi_base, rcba; info = devm_kzalloc(&dev->dev, sizeof(*info), GFP_KERNEL); if (!info) @@ -1102,6 +1149,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev) if (spi_base & SPIBASE_BYT_EN) { res->start = spi_base & ~(SPIBASE_BYT_SZ - 1); res->end = res->start + SPIBASE_BYT_SZ - 1; + + info->set_writeable = lpc_ich_byt_set_writeable; } break; @@ -1112,8 +1161,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev) res->start = spi_base + SPIBASE_LPT; res->end = res->start + SPIBASE_LPT_SZ - 1; - pci_read_config_dword(dev, BCR, &bcr); - info->writeable = !!(bcr & BCR_WPD); + info->set_writeable = lpc_ich_lpt_set_writeable; + info->data = dev; } break; @@ -1134,8 +1183,8 @@ static int lpc_ich_init_spi(struct pci_dev *dev) res->start = spi_base & 0xfffffff0; res->end = res->start + SPIBASE_APL_SZ - 1; - pci_bus_read_config_dword(bus, spi, BCR, &bcr); - info->writeable = !!(bcr & BCR_WPD); + info->set_writeable = lpc_ich_bxt_set_writeable; + info->data = bus; } pci_bus_write_config_byte(bus, p2sb, 0xe1, 0x1); diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c index fec2096474ad..a6661e07035b 100644 --- a/drivers/mfd/max77620.c +++ b/drivers/mfd/max77620.c @@ -419,9 +419,11 @@ static int max77620_initialise_fps(struct max77620_chip *chip) ret = max77620_config_fps(chip, fps_child); if (ret < 0) { of_node_put(fps_child); + of_node_put(fps_np); return ret; } } + of_node_put(fps_np); config = chip->enable_global_lpm ? MAX77620_ONOFFCNFG2_SLP_LPM_MSK : 0; ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2, diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index 6d2f4a0a901d..37ad72d8cde2 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -1720,7 +1720,12 @@ static struct platform_driver sm501_plat_driver = { static int __init sm501_base_init(void) { - platform_driver_register(&sm501_plat_driver); + int ret; + + ret = platform_driver_register(&sm501_plat_driver); + if (ret < 0) + return ret; + return pci_register_driver(&sm501_pci_driver); } diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c index 70da0c4ae457..58811c5ab564 100644 --- a/drivers/mfd/t7l66xb.c +++ b/drivers/mfd/t7l66xb.c @@ -405,11 +405,8 @@ err_noirq: static int t7l66xb_remove(struct platform_device *dev) { - struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev); struct t7l66xb *t7l66xb = platform_get_drvdata(dev); - int ret; - ret = pdata->disable(dev); clk_disable_unprepare(t7l66xb->clk48m); clk_put(t7l66xb->clk48m); clk_disable_unprepare(t7l66xb->clk32k); @@ -420,8 +417,7 @@ static int t7l66xb_remove(struct platform_device *dev) mfd_remove_devices(&dev->dev); kfree(t7l66xb); - return ret; - + return 0; } static struct platform_driver t7l66xb_platform_driver = { diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index d6cd5537126c..69f9b0336410 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -232,9 +232,9 @@ static int ssc_probe(struct platform_device *pdev) clk_disable_unprepare(ssc->clk); ssc->irq = platform_get_irq(pdev, 0); - if (!ssc->irq) { + if (ssc->irq < 0) { dev_dbg(&pdev->dev, "could not get irq\n"); - return -ENXIO; + return ssc->irq; } mutex_lock(&user_lock); diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 5d15607027e9..358b000b3a55 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -1529,7 +1529,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev, pcr->remap_addr = ioremap(base, len); if (!pcr->remap_addr) { ret = -ENOMEM; - goto free_handle; + goto free_idr; } pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev), @@ -1591,6 +1591,10 @@ disable_msi: pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr); unmap: iounmap(pcr->remap_addr); +free_idr: + spin_lock(&rtsx_pci_lock); + idr_remove(&rtsx_pci_idr, pcr->id); + spin_unlock(&rtsx_pci_lock); free_handle: kfree(handle); free_pcr: diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c index 59eda55d92a3..f150d8769f19 100644 --- a/drivers/misc/cardreader/rtsx_usb.c +++ b/drivers/misc/cardreader/rtsx_usb.c @@ -631,16 +631,20 @@ static int rtsx_usb_probe(struct usb_interface *intf, ucr->pusb_dev = usb_dev; - ucr->iobuf = usb_alloc_coherent(ucr->pusb_dev, IOBUF_SIZE, - GFP_KERNEL, &ucr->iobuf_dma); - if (!ucr->iobuf) + ucr->cmd_buf = kmalloc(IOBUF_SIZE, GFP_KERNEL); + if (!ucr->cmd_buf) return -ENOMEM; + ucr->rsp_buf = kmalloc(IOBUF_SIZE, GFP_KERNEL); + if (!ucr->rsp_buf) { + ret = -ENOMEM; + goto out_free_cmd_buf; + } + usb_set_intfdata(intf, ucr); ucr->vendor_id = id->idVendor; ucr->product_id = id->idProduct; - ucr->cmd_buf = ucr->rsp_buf = ucr->iobuf; mutex_init(&ucr->dev_mutex); @@ -667,8 +671,12 @@ static int rtsx_usb_probe(struct usb_interface *intf, return 0; out_init_fail: - usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf, - ucr->iobuf_dma); + usb_set_intfdata(ucr->pusb_intf, NULL); + kfree(ucr->rsp_buf); + ucr->rsp_buf = NULL; +out_free_cmd_buf: + kfree(ucr->cmd_buf); + ucr->cmd_buf = NULL; return ret; } @@ -681,8 +689,12 @@ static void rtsx_usb_disconnect(struct usb_interface *intf) mfd_remove_devices(&intf->dev); usb_set_intfdata(ucr->pusb_intf, NULL); - usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf, - ucr->iobuf_dma); + + kfree(ucr->cmd_buf); + ucr->cmd_buf = NULL; + + kfree(ucr->rsp_buf); + ucr->rsp_buf = NULL; } #ifdef CONFIG_PM diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c index 4cb829d5d873..2e4dcfebf19a 100644 --- a/drivers/misc/cxl/irq.c +++ b/drivers/misc/cxl/irq.c @@ -349,6 +349,7 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count) out: cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); + bitmap_free(ctx->irq_bitmap); afu_irq_name_free(ctx); return -ENOMEM; } diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c index 3e4a594c110b..6a456645efb0 100644 --- a/drivers/misc/eeprom/idt_89hpesx.c +++ b/drivers/misc/eeprom/idt_89hpesx.c @@ -940,14 +940,18 @@ static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf, u32 csraddr, csrval; char *buf; + if (*offp) + return 0; + /* Copy data from User-space */ buf = kmalloc(count + 1, GFP_KERNEL); if (!buf) return -ENOMEM; - ret = simple_write_to_buffer(buf, count, offp, ubuf, count); - if (ret < 0) + if (copy_from_user(buf, ubuf, count)) { + ret = -EFAULT; goto free_buf; + } buf[count] = 0; /* Find position of colon in the buffer */ diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index d0471fec37fb..2c3142b4b5dd 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -1349,17 +1349,18 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_req_munmap *req) { struct fastrpc_invoke_args args[1] = { [0] = { 0 } }; - struct fastrpc_buf *buf, *b; + struct fastrpc_buf *buf = NULL, *iter, *b; struct fastrpc_munmap_req_msg req_msg; struct device *dev = fl->sctx->dev; int err; u32 sc; spin_lock(&fl->lock); - list_for_each_entry_safe(buf, b, &fl->mmaps, node) { - if ((buf->raddr == req->vaddrout) && (buf->size == req->size)) + list_for_each_entry_safe(iter, b, &fl->mmaps, node) { + if ((iter->raddr == req->vaddrout) && (iter->size == req->size)) { + buf = iter; break; - buf = NULL; + } } spin_unlock(&fl->lock); @@ -1547,7 +1548,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev) of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions); spin_lock_irqsave(&cctx->lock, flags); - sess = &cctx->session[cctx->sesscount]; + if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) { + dev_err(&pdev->dev, "too many sessions\n"); + spin_unlock_irqrestore(&cctx->lock, flags); + return -ENOSPC; + } + sess = &cctx->session[cctx->sesscount++]; sess->used = false; sess->valid = true; sess->dev = dev; @@ -1560,13 +1566,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev) struct fastrpc_session_ctx *dup_sess; for (i = 1; i < sessions; i++) { - if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS) + if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) break; - dup_sess = &cctx->session[cctx->sesscount]; + dup_sess = &cctx->session[cctx->sesscount++]; memcpy(dup_sess, sess, sizeof(*dup_sess)); } } - cctx->sesscount++; spin_unlock_irqrestore(&cctx->lock, flags); rc = dma_set_mask(dev, DMA_BIT_MASK(32)); if (rc) { diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index a337f97b30e2..d39b8139b096 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -231,6 +231,11 @@ void lkdtm_ARRAY_BOUNDS(void) not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL); checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL); + if (!not_checked || !checked) { + kfree(not_checked); + kfree(checked); + return; + } pr_info("Array access within bounds ...\n"); /* For both, touch all bytes in the actual member size. */ diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c index d173d6175c87..8e01c256a9cb 100644 --- a/drivers/misc/lkdtm/usercopy.c +++ b/drivers/misc/lkdtm/usercopy.c @@ -30,12 +30,12 @@ static const unsigned char test_text[] = "This is a test.\n"; */ static noinline unsigned char *trick_compiler(unsigned char *stack) { - return stack + 0; + return stack + unconst; } static noinline unsigned char *do_usercopy_stack_callee(int value) { - unsigned char buf[32]; + unsigned char buf[128]; int i; /* Exercise stack to avoid everything living in registers. */ @@ -43,7 +43,12 @@ static noinline unsigned char *do_usercopy_stack_callee(int value) buf[i] = value & 0xff; } - return trick_compiler(buf); + /* + * Put the target buffer in the middle of stack allocation + * so that we don't step on future stack users regardless + * of stack growth direction. + */ + return trick_compiler(&buf[(128/2)-32]); } static noinline void do_usercopy_stack(bool to_user, bool bad_frame) @@ -66,6 +71,12 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame) bad_stack -= sizeof(unsigned long); } +#ifdef ARCH_HAS_CURRENT_STACK_POINTER + pr_info("stack : %px\n", (void *)current_stack_pointer); +#endif + pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack)); + pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack)); + user_addr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index d81d75a20b8f..afb2e78df4d6 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -109,6 +109,8 @@ #define MEI_DEV_ID_ADP_P 0x51E0 /* Alder Lake Point P */ #define MEI_DEV_ID_ADP_N 0x54E0 /* Alder Lake Point N */ +#define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index a738253dbd05..5324b65d0d29 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -115,6 +115,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index 4d1b44de1492..e094809b54ff 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c @@ -259,6 +259,8 @@ static long afu_ioctl(struct file *file, unsigned int cmd, if (IS_ERR(ev_ctx)) return PTR_ERR(ev_ctx); rc = ocxl_irq_set_handler(ctx, irq_id, irq_handler, irq_free, ev_ctx); + if (rc) + eventfd_ctx_put(ev_ctx); break; case OCXL_IOCTL_GET_METADATA: @@ -558,7 +560,9 @@ int ocxl_file_register_afu(struct ocxl_afu *afu) err_unregister: ocxl_sysfs_unregister_afu(info); // safe to call even if register failed + free_minor(info); device_unregister(&info->dev); + return rc; err_put: ocxl_afu_put(afu); free_minor(info); diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index d3844730eaca..48eec5fe7397 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -331,6 +331,22 @@ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test, return false; } +static int pci_endpoint_test_validate_xfer_params(struct device *dev, + struct pci_endpoint_test_xfer_param *param, size_t alignment) +{ + if (!param->size) { + dev_dbg(dev, "Data size is zero\n"); + return -EINVAL; + } + + if (param->size > SIZE_MAX - alignment) { + dev_dbg(dev, "Maximum transfer data size exceeded\n"); + return -EINVAL; + } + + return 0; +} + static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, unsigned long arg) { @@ -362,9 +378,11 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, return false; } + err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); + if (err) + return false; + size = param.size; - if (size > SIZE_MAX - alignment) - goto err; use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); if (use_dma) @@ -496,9 +514,11 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, return false; } + err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); + if (err) + return false; + size = param.size; - if (size > SIZE_MAX - alignment) - goto err; use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); if (use_dma) @@ -594,9 +614,11 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, return false; } + err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); + if (err) + return false; + size = param.size; - if (size > SIZE_MAX - alignment) - goto err; use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); if (use_dma) diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c index 56dd98ab5a81..95e56eb2cdd0 100644 --- a/drivers/misc/uacce/uacce.c +++ b/drivers/misc/uacce/uacce.c @@ -9,43 +9,38 @@ static struct class *uacce_class; static dev_t uacce_devt; -static DEFINE_MUTEX(uacce_mutex); static DEFINE_XARRAY_ALLOC(uacce_xa); +/* + * If the parent driver or the device disappears, the queue state is invalid and + * ops are not usable anymore. + */ +static bool uacce_queue_is_valid(struct uacce_queue *q) +{ + return q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED; +} + static int uacce_start_queue(struct uacce_queue *q) { - int ret = 0; + int ret; - mutex_lock(&uacce_mutex); - - if (q->state != UACCE_Q_INIT) { - ret = -EINVAL; - goto out_with_lock; - } + if (q->state != UACCE_Q_INIT) + return -EINVAL; if (q->uacce->ops->start_queue) { ret = q->uacce->ops->start_queue(q); if (ret < 0) - goto out_with_lock; + return ret; } q->state = UACCE_Q_STARTED; - -out_with_lock: - mutex_unlock(&uacce_mutex); - - return ret; + return 0; } static int uacce_put_queue(struct uacce_queue *q) { struct uacce_device *uacce = q->uacce; - mutex_lock(&uacce_mutex); - - if (q->state == UACCE_Q_ZOMBIE) - goto out; - if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue) uacce->ops->stop_queue(q); @@ -54,8 +49,6 @@ static int uacce_put_queue(struct uacce_queue *q) uacce->ops->put_queue(q); q->state = UACCE_Q_ZOMBIE; -out: - mutex_unlock(&uacce_mutex); return 0; } @@ -65,20 +58,36 @@ static long uacce_fops_unl_ioctl(struct file *filep, { struct uacce_queue *q = filep->private_data; struct uacce_device *uacce = q->uacce; + long ret = -ENXIO; + + /* + * uacce->ops->ioctl() may take the mmap_lock when copying arg to/from + * user. Avoid a circular lock dependency with uacce_fops_mmap(), which + * gets called with mmap_lock held, by taking uacce->mutex instead of + * q->mutex. Doing this in uacce_fops_mmap() is not possible because + * uacce_fops_open() calls iommu_sva_bind_device(), which takes + * mmap_lock, while holding uacce->mutex. + */ + mutex_lock(&uacce->mutex); + if (!uacce_queue_is_valid(q)) + goto out_unlock; switch (cmd) { case UACCE_CMD_START_Q: - return uacce_start_queue(q); - + ret = uacce_start_queue(q); + break; case UACCE_CMD_PUT_Q: - return uacce_put_queue(q); - + ret = uacce_put_queue(q); + break; default: - if (!uacce->ops->ioctl) - return -EINVAL; - - return uacce->ops->ioctl(q, cmd, arg); + if (uacce->ops->ioctl) + ret = uacce->ops->ioctl(q, cmd, arg); + else + ret = -EINVAL; } +out_unlock: + mutex_unlock(&uacce->mutex); + return ret; } #ifdef CONFIG_COMPAT @@ -136,6 +145,13 @@ static int uacce_fops_open(struct inode *inode, struct file *filep) if (!q) return -ENOMEM; + mutex_lock(&uacce->mutex); + + if (!uacce->parent) { + ret = -EINVAL; + goto out_with_mem; + } + ret = uacce_bind_queue(uacce, q); if (ret) goto out_with_mem; @@ -152,10 +168,9 @@ static int uacce_fops_open(struct inode *inode, struct file *filep) filep->private_data = q; uacce->inode = inode; q->state = UACCE_Q_INIT; - - mutex_lock(&uacce->queues_lock); + mutex_init(&q->mutex); list_add(&q->list, &uacce->queues); - mutex_unlock(&uacce->queues_lock); + mutex_unlock(&uacce->mutex); return 0; @@ -163,18 +178,20 @@ out_with_bond: uacce_unbind_queue(q); out_with_mem: kfree(q); + mutex_unlock(&uacce->mutex); return ret; } static int uacce_fops_release(struct inode *inode, struct file *filep) { struct uacce_queue *q = filep->private_data; + struct uacce_device *uacce = q->uacce; - mutex_lock(&q->uacce->queues_lock); - list_del(&q->list); - mutex_unlock(&q->uacce->queues_lock); + mutex_lock(&uacce->mutex); uacce_put_queue(q); uacce_unbind_queue(q); + list_del(&q->list); + mutex_unlock(&uacce->mutex); kfree(q); return 0; @@ -217,10 +234,9 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma) vma->vm_private_data = q; qfr->type = type; - mutex_lock(&uacce_mutex); - - if (q->state != UACCE_Q_INIT && q->state != UACCE_Q_STARTED) { - ret = -EINVAL; + mutex_lock(&q->mutex); + if (!uacce_queue_is_valid(q)) { + ret = -ENXIO; goto out_with_lock; } @@ -259,12 +275,12 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma) } q->qfrs[type] = qfr; - mutex_unlock(&uacce_mutex); + mutex_unlock(&q->mutex); return ret; out_with_lock: - mutex_unlock(&uacce_mutex); + mutex_unlock(&q->mutex); kfree(qfr); return ret; } @@ -273,12 +289,20 @@ static __poll_t uacce_fops_poll(struct file *file, poll_table *wait) { struct uacce_queue *q = file->private_data; struct uacce_device *uacce = q->uacce; + __poll_t ret = 0; + + mutex_lock(&q->mutex); + if (!uacce_queue_is_valid(q)) + goto out_unlock; poll_wait(file, &q->wait, wait); - if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q)) - return EPOLLIN | EPOLLRDNORM; - return 0; + if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q)) + ret = EPOLLIN | EPOLLRDNORM; + +out_unlock: + mutex_unlock(&q->mutex); + return ret; } static const struct file_operations uacce_fops = { @@ -431,7 +455,7 @@ struct uacce_device *uacce_alloc(struct device *parent, goto err_with_uacce; INIT_LIST_HEAD(&uacce->queues); - mutex_init(&uacce->queues_lock); + mutex_init(&uacce->mutex); device_initialize(&uacce->dev); uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id); uacce->dev.class = uacce_class; @@ -489,13 +513,23 @@ void uacce_remove(struct uacce_device *uacce) if (uacce->inode) unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1); + /* + * uacce_fops_open() may be running concurrently, even after we remove + * the cdev. Holding uacce->mutex ensures that open() does not obtain a + * removed uacce device. + */ + mutex_lock(&uacce->mutex); /* ensure no open queue remains */ - mutex_lock(&uacce->queues_lock); list_for_each_entry_safe(q, next_q, &uacce->queues, list) { + /* + * Taking q->mutex ensures that fops do not use the defunct + * uacce->ops after the queue is disabled. + */ + mutex_lock(&q->mutex); uacce_put_queue(q); + mutex_unlock(&q->mutex); uacce_unbind_queue(q); } - mutex_unlock(&uacce->queues_lock); /* disable sva now since no opened queues */ if (uacce->flags & UACCE_DEV_SVA) @@ -504,6 +538,13 @@ void uacce_remove(struct uacce_device *uacce) if (uacce->cdev) cdev_device_del(uacce->cdev, &uacce->dev); xa_erase(&uacce_xa, uacce->dev_id); + /* + * uacce exists as long as there are open fds, but ops will be freed + * now. Ensure that bugs cause NULL deref rather than use-after-free. + */ + uacce->ops = NULL; + uacce->parent = NULL; + mutex_unlock(&uacce->mutex); put_device(&uacce->dev); } EXPORT_SYMBOL_GPL(uacce_remove); diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index a49782dd903c..d4d388f021cc 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -852,6 +852,7 @@ static int qp_notify_peer_local(bool attach, struct vmci_handle handle) u32 context_id = vmci_get_context_id(); struct vmci_event_qp ev; + memset(&ev, 0, sizeof(ev)); ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER); ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_CONTEXT_RESOURCE_ID); @@ -1465,6 +1466,7 @@ static int qp_notify_peer(bool attach, * kernel. */ + memset(&ev, 0, sizeof(ev)); ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER); ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_CONTEXT_RESOURCE_ID); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 8365979197ed..5b3d6dff11ee 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -172,7 +172,7 @@ static inline int mmc_blk_part_switch(struct mmc_card *card, unsigned int part_type); static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, - int disable_multi, + int recovery_mode, struct mmc_queue *mq); static void mmc_blk_hsq_req_done(struct mmc_request *mrq); @@ -1085,6 +1085,11 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) nr = blk_rq_sectors(req); do { + unsigned int erase_arg = card->erase_arg; + + if (mmc_card_broken_sd_discard(card)) + erase_arg = SD_ERASE_ARG; + err = 0; if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, @@ -1095,7 +1100,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) card->ext_csd.generic_cmd6_time); } if (!err) - err = mmc_erase(card, from, nr, card->erase_arg); + err = mmc_erase(card, from, nr, erase_arg); } while (err == -EIO && !mmc_blk_reset(md, card->host, type)); if (err) status = BLK_STS_IOERR; @@ -1263,7 +1268,7 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) } static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, - int disable_multi, bool *do_rel_wr_p, + int recovery_mode, bool *do_rel_wr_p, bool *do_data_tag_p) { struct mmc_blk_data *md = mq->blkdata; @@ -1329,12 +1334,12 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, brq->data.blocks--; /* - * After a read error, we redo the request one sector + * After a read error, we redo the request one (native) sector * at a time in order to accurately determine which * sectors can be read successfully. */ - if (disable_multi) - brq->data.blocks = 1; + if (recovery_mode) + brq->data.blocks = queue_physical_block_size(mq->queue) >> 9; /* * Some controllers have HW issues while operating @@ -1460,8 +1465,7 @@ void mmc_blk_cqe_recovery(struct mmc_queue *mq) err = mmc_cqe_recovery(host); if (err) mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); - else - mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); + mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); pr_debug("%s: CQE recovery done\n", mmc_hostname(host)); } @@ -1552,7 +1556,7 @@ static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req) static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, - int disable_multi, + int recovery_mode, struct mmc_queue *mq) { u32 readcmd, writecmd; @@ -1561,7 +1565,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_blk_data *md = mq->blkdata; bool do_rel_wr, do_data_tag; - mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag); + mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag); brq->mrq.cmd = &brq->cmd; @@ -1652,7 +1656,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req) #define MMC_READ_SINGLE_RETRIES 2 -/* Single sector read during recovery */ +/* Single (native) sector read during recovery */ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) { struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); @@ -1660,6 +1664,7 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) struct mmc_card *card = mq->card; struct mmc_host *host = card->host; blk_status_t error = BLK_STS_OK; + size_t bytes_per_read = queue_physical_block_size(mq->queue); do { u32 status; @@ -1694,13 +1699,13 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) else error = BLK_STS_OK; - } while (blk_update_request(req, error, 512)); + } while (blk_update_request(req, error, bytes_per_read)); return; error_exit: mrq->data->bytes_xfered = 0; - blk_update_request(req, BLK_STS_IOERR, 512); + blk_update_request(req, BLK_STS_IOERR, bytes_per_read); /* Let it try the remaining request again */ if (mqrq->retries > MMC_MAX_RETRIES - 1) mqrq->retries = MMC_MAX_RETRIES - 1; @@ -1842,10 +1847,9 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) return; } - /* FIXME: Missing single sector read for large sector size */ - if (!mmc_large_sector(card) && rq_data_dir(req) == READ && - brq->data.blocks > 1) { - /* Read one sector at a time */ + if (rq_data_dir(req) == READ && brq->data.blocks > + queue_physical_block_size(mq->queue) >> 9) { + /* Read one (native) sector at a time */ mmc_blk_read_single(mq, req); return; } diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h index d07478b182c7..193df83a3b18 100644 --- a/drivers/mmc/core/card.h +++ b/drivers/mmc/core/card.h @@ -70,6 +70,7 @@ struct mmc_fixup { #define EXT_CSD_REV_ANY (-1u) #define CID_MANFID_SANDISK 0x2 +#define CID_MANFID_SANDISK_SD 0x3 #define CID_MANFID_ATP 0x9 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 @@ -223,4 +224,9 @@ static inline int mmc_card_broken_hpi(const struct mmc_card *c) return c->quirks & MMC_QUIRK_BROKEN_HPI; } +static inline int mmc_card_broken_sd_discard(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_BROKEN_SD_DISCARD; +} + #endif diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index cada7e9b83fd..fd482d06de72 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1131,7 +1131,13 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) mmc_power_cycle(host, ocr); } else { bit = fls(ocr) - 1; - ocr &= 3 << bit; + /* + * The bit variable represents the highest voltage bit set in + * the OCR register. + * To keep a range of 2 values (e.g. 3.2V/3.3V and 3.3V/3.4V), + * we must shift the mask '3' with (bit - 1). + */ + ocr &= 3 << (bit - 1); if (bit != host->ios.vdd) dev_warn(mmc_dev(host), "exceeding card's volts\n"); } @@ -1537,6 +1543,11 @@ void mmc_init_erase(struct mmc_card *card) card->pref_erase = 0; } +static bool is_trim_arg(unsigned int arg) +{ + return (arg & MMC_TRIM_OR_DISCARD_ARGS) && arg != MMC_DISCARD_ARG; +} + static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, unsigned int arg, unsigned int qty) { @@ -1835,7 +1846,7 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) return -EOPNOTSUPP; - if (mmc_card_mmc(card) && (arg & MMC_TRIM_ARGS) && + if (mmc_card_mmc(card) && is_trim_arg(arg) && !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) return -EOPNOTSUPP; @@ -1865,7 +1876,7 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, * identified by the card->eg_boundary flag. */ rem = card->erase_size - (from % card->erase_size); - if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) { + if ((arg & MMC_TRIM_OR_DISCARD_ARGS) && card->eg_boundary && nr > rem) { err = mmc_do_erase(card, from, from + rem - 1, arg); from += rem; if ((err) || (to <= from)) diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index b33d2c98a11f..17ece18fadf3 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1462,9 +1462,13 @@ static int mmc_select_hs400es(struct mmc_card *card) goto out_err; } + /* + * Bump to HS timing and frequency. Some cards don't handle + * SEND_STATUS reliably at the initial frequency. + */ mmc_set_timing(host, MMC_TIMING_MMC_HS); /* Set clock immediately after changing timing */ - mmc_set_clock(host, card->ext_csd.hs_max_dtr); + mmc_set_bus_speed(card); err = mmc_switch_status(card, true); if (err) @@ -1527,7 +1531,7 @@ out_err: static int mmc_select_hs200(struct mmc_card *card) { struct mmc_host *host = card->host; - unsigned int old_timing, old_signal_voltage; + unsigned int old_timing, old_signal_voltage, old_clock; int err = -EINVAL; u8 val; @@ -1558,8 +1562,17 @@ static int mmc_select_hs200(struct mmc_card *card) false, true); if (err) goto err; + + /* + * Bump to HS timing and frequency. Some cards don't handle + * SEND_STATUS reliably at the initial frequency. + * NB: We can't move to full (HS200) speeds until after we've + * successfully switched over. + */ old_timing = host->ios.timing; + old_clock = host->ios.clock; mmc_set_timing(host, MMC_TIMING_MMC_HS200); + mmc_set_clock(card->host, card->ext_csd.hs_max_dtr); /* * For HS200, CRC errors are not a reliable way to know the @@ -1572,8 +1585,10 @@ static int mmc_select_hs200(struct mmc_card *card) * mmc_select_timing() assumes timing has not changed if * it is a switch error. */ - if (err == -EBADMSG) + if (err == -EBADMSG) { + mmc_set_clock(host, old_clock); mmc_set_timing(host, old_timing); + } } err: if (err) { diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c index 152e7525ed33..b9b6f000154b 100644 --- a/drivers/mmc/core/mmc_test.c +++ b/drivers/mmc/core/mmc_test.c @@ -3195,7 +3195,8 @@ static int __mmc_test_register_dbgfs_file(struct mmc_card *card, struct mmc_test_dbgfs_file *df; if (card->debugfs_root) - debugfs_create_file(name, mode, card->debugfs_root, card, fops); + file = debugfs_create_file(name, mode, card->debugfs_root, + card, fops); df = kmalloc(sizeof(*df), GFP_KERNEL); if (!df) { diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index f5c6dfc1bd65..88b654a15554 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h @@ -101,6 +101,12 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = { MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_TRIM_BROKEN), + /* + * Some SD cards reports discard support while they don't + */ + MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd, + MMC_QUIRK_BROKEN_SD_DISCARD), + END_FIXUP }; diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 3d1d13729239..e7f2eebd8bfb 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -857,7 +857,8 @@ try_again: * the CCS bit is set as well. We deliberately deviate from the spec in * regards to this, which allows UHS-I to be supported for SDSC cards. */ - if (!mmc_host_is_spi(host) && rocr && (*rocr & 0x01000000)) { + if (!mmc_host_is_spi(host) && (ocr & SD_OCR_S18R) && + rocr && (*rocr & SD_ROCR_S18A)) { err = mmc_set_uhs_voltage(host, pocr); if (err == -EAGAIN) { retries--; @@ -936,15 +937,16 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, /* Erase init depends on CSD and SSR */ mmc_init_erase(card); - - /* - * Fetch switch information from card. - */ - err = mmc_read_switch(card); - if (err) - return err; } + /* + * Fetch switch information from card. Note, sd3_bus_mode can change if + * voltage switch outcome changes, so do this always. + */ + err = mmc_read_switch(card); + if (err) + return err; + /* * For SPI, enable CRC as appropriate. * This CRC enable is located AFTER the reading of the @@ -1093,26 +1095,15 @@ retry: if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) && mmc_sd_card_using_v18(card) && host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) { - /* - * Re-read switch information in case it has changed since - * oldcard was initialized. - */ - if (oldcard) { - err = mmc_read_switch(card); - if (err) - goto free_card; - } - if (mmc_sd_card_using_v18(card)) { - if (mmc_host_set_uhs_voltage(host) || - mmc_sd_init_uhs_card(card)) { - v18_fixup_failed = true; - mmc_power_cycle(host, ocr); - if (!oldcard) - mmc_remove_card(card); - goto retry; - } - goto done; + if (mmc_host_set_uhs_voltage(host) || + mmc_sd_init_uhs_card(card)) { + v18_fixup_failed = true; + mmc_power_cycle(host, ocr); + if (!oldcard) + mmc_remove_card(card); + goto retry; } + goto cont; } /* Initialization sequence for UHS-I cards */ @@ -1147,7 +1138,7 @@ retry: mmc_set_bus_width(host, MMC_BUS_WIDTH_4); } } - +cont: if (host->cqe_ops && !host->cqe_enabled) { err = host->cqe_ops->cqe_enable(host, card); if (!err) { @@ -1165,7 +1156,7 @@ retry: err = -EINVAL; goto free_card; } -done: + host->card = card; return 0; diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 3d709029e07c..a448535c1265 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -292,7 +292,8 @@ static void sdio_release_func(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); - sdio_free_func_cis(func); + if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO)) + sdio_free_func_cis(func); kfree(func->info); kfree(func->tmpbuf); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 3c308ba3aaeb..c40a1d494bc1 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -1081,9 +1081,10 @@ config MMC_SDHCI_OMAP config MMC_SDHCI_AM654 tristate "Support for the SDHCI Controller in TI's AM654 SOCs" - depends on MMC_SDHCI_PLTFM && OF && REGMAP_MMIO + depends on MMC_SDHCI_PLTFM && OF select MMC_SDHCI_IO_ACCESSORS select MMC_CQHCI + select REGMAP_MMIO help This selects the Secure Digital Host Controller Interface (SDHCI) support present in TI's AM654 SOCs. The controller supports diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index bd00515fbaba..56a3bf51d446 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -1097,8 +1097,9 @@ out5: if (host->platdata && host->platdata->cd_setup && !(mmc->caps & MMC_CAP_NEEDS_POLL)) host->platdata->cd_setup(mmc, 0); -out_clk: + clk_disable_unprepare(host->clk); +out_clk: clk_put(host->clk); out_irq: free_irq(host->irq, host); diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c index 2c4b2df52adb..12dca91a8ef6 100644 --- a/drivers/mmc/host/cavium-octeon.c +++ b/drivers/mmc/host/cavium-octeon.c @@ -277,6 +277,7 @@ static int octeon_mmc_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Error populating slots\n"); octeon_mmc_set_shared_power(host, 0); + of_node_put(cn); goto error; } i++; diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c index 76013bbbcff3..202b1d6da678 100644 --- a/drivers/mmc/host/cavium-thunderx.c +++ b/drivers/mmc/host/cavium-thunderx.c @@ -142,8 +142,10 @@ static int thunder_mmc_probe(struct pci_dev *pdev, continue; ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host); - if (ret) + if (ret) { + of_node_put(child_node); goto error; + } } i++; } diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index a1f92fed2a55..aa3dfb9c1071 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -236,6 +236,26 @@ static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host) return PTR_ERR(host->dma_rx); } + /* + * Limit the maximum segment size in any SG entry according to + * the parameters of the DMA engine device. + */ + if (host->dma_tx) { + struct device *dev = host->dma_tx->device->dev; + unsigned int max_seg_size = dma_get_max_seg_size(dev); + + if (max_seg_size < host->mmc->max_seg_size) + host->mmc->max_seg_size = max_seg_size; + } + + if (host->dma_rx) { + struct device *dev = host->dma_rx->device->dev; + unsigned int max_seg_size = dma_get_max_seg_size(dev); + + if (max_seg_size < host->mmc->max_seg_size) + host->mmc->max_seg_size = max_seg_size; + } + return 0; } diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 091e0e051d10..bccc85b3fc50 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -1161,8 +1161,10 @@ static int meson_mmc_probe(struct platform_device *pdev) } ret = device_reset_optional(&pdev->dev); - if (ret) - return dev_err_probe(&pdev->dev, ret, "device reset failed\n"); + if (ret) { + dev_err_probe(&pdev->dev, ret, "device reset failed\n"); + goto free_host; + } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); host->regs = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c index a75d3dd34d18..4cceb9bab036 100644 --- a/drivers/mmc/host/mmci_stm32_sdmmc.c +++ b/drivers/mmc/host/mmci_stm32_sdmmc.c @@ -62,8 +62,8 @@ static int sdmmc_idma_validate_data(struct mmci_host *host, * excepted the last element which has no constraint on idmasize */ for_each_sg(data->sg, sg, data->sg_len - 1, i) { - if (!IS_ALIGNED(data->sg->offset, sizeof(u32)) || - !IS_ALIGNED(data->sg->length, SDMMC_IDMA_BURST)) { + if (!IS_ALIGNED(sg->offset, sizeof(u32)) || + !IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) { dev_err(mmc_dev(host->mmc), "unaligned scatterlist: ofst:%x length:%d\n", data->sg->offset, data->sg->length); @@ -71,7 +71,7 @@ static int sdmmc_idma_validate_data(struct mmci_host *host, } } - if (!IS_ALIGNED(data->sg->offset, sizeof(u32))) { + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { dev_err(mmc_dev(host->mmc), "unaligned last scatterlist: ofst:%x length:%d\n", data->sg->offset, data->sg->length); diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index ea67a7ef2390..c16300b92139 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -111,8 +111,8 @@ #define CLK_DIV_MASK 0x7f /* REG_BUS_WIDTH */ -#define BUS_WIDTH_8 BIT(2) -#define BUS_WIDTH_4 BIT(1) +#define BUS_WIDTH_4_SUPPORT BIT(3) +#define BUS_WIDTH_4 BIT(2) #define BUS_WIDTH_1 BIT(0) #define MMC_VDD_360 23 @@ -527,9 +527,6 @@ static void moxart_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) case MMC_BUS_WIDTH_4: writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH); break; - case MMC_BUS_WIDTH_8: - writel(BUS_WIDTH_8, host->base + REG_BUS_WIDTH); - break; default: writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH); break; @@ -654,16 +651,8 @@ static int moxart_probe(struct platform_device *pdev) dmaengine_slave_config(host->dma_chan_rx, &cfg); } - switch ((readl(host->base + REG_BUS_WIDTH) >> 3) & 3) { - case 1: + if (readl(host->base + REG_BUS_WIDTH) & BUS_WIDTH_4_SUPPORT) mmc->caps |= MMC_CAP_4_BIT_DATA; - break; - case 2: - mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; - break; - default: - break; - } writel(0, host->base + REG_INTERRUPT_MASK); diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index f5c965da9501..d71c113f428f 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -2293,6 +2293,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery) /* disable busy check */ sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL); + val = readl(host->base + MSDC_INT); + writel(val, host->base + MSDC_INT); + if (recovery) { sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1); @@ -2693,11 +2696,14 @@ static int __maybe_unused msdc_suspend(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); int ret; + u32 val; if (mmc->caps2 & MMC_CAP2_CQE) { ret = cqhci_suspend(mmc); if (ret) return ret; + val = readl(((struct msdc_host *)mmc_priv(mmc))->base + MSDC_INT); + writel(val, ((struct msdc_host *)mmc_priv(mmc))->base + MSDC_INT); } return pm_runtime_force_suspend(dev); diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 316393c694d7..55868b6b8658 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -648,7 +648,7 @@ static int pxamci_probe(struct platform_device *pdev) ret = pxamci_of_init(pdev, mmc); if (ret) - return ret; + goto out; host = mmc_priv(mmc); host->mmc = mmc; @@ -672,7 +672,7 @@ static int pxamci_probe(struct platform_device *pdev) ret = pxamci_init_ocr(host); if (ret < 0) - return ret; + goto out; mmc->caps = 0; host->cmdat = 0; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 782879d46ff4..ac01fb518386 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -390,10 +390,10 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc) SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) | sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2)); - /* Set the sampling clock selection range of HS400 mode */ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL, SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN | - 0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT); + sd_scc_read32(host, priv, + SH_MOBILE_SDHI_SCC_DTCNTL)); /* Avoid bad TAP */ if (bad_taps & BIT(priv->tap_set)) { diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index e00167bcfaf6..b5cb83bb9b85 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -37,10 +37,7 @@ struct realtek_pci_sdmmc { bool double_clk; bool eject; bool initial_mode; - int power_state; -#define SDMMC_POWER_ON 1 -#define SDMMC_POWER_OFF 0 - + int prev_power_state; int sg_count; s32 cookie; int cookie_sg_count; @@ -902,14 +899,21 @@ static int sd_set_bus_width(struct realtek_pci_sdmmc *host, return err; } -static int sd_power_on(struct realtek_pci_sdmmc *host) +static int sd_power_on(struct realtek_pci_sdmmc *host, unsigned char power_mode) { struct rtsx_pcr *pcr = host->pcr; int err; - if (host->power_state == SDMMC_POWER_ON) + if (host->prev_power_state == MMC_POWER_ON) return 0; + if (host->prev_power_state == MMC_POWER_UP) { + rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, 0); + goto finish; + } + + msleep(100); + rtsx_pci_init_cmd(pcr); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE, @@ -928,11 +932,17 @@ static int sd_power_on(struct realtek_pci_sdmmc *host) if (err < 0) return err; + mdelay(1); + err = rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN); if (err < 0) return err; - host->power_state = SDMMC_POWER_ON; + /* send at least 74 clocks */ + rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN); + +finish: + host->prev_power_state = power_mode; return 0; } @@ -941,7 +951,7 @@ static int sd_power_off(struct realtek_pci_sdmmc *host) struct rtsx_pcr *pcr = host->pcr; int err; - host->power_state = SDMMC_POWER_OFF; + host->prev_power_state = MMC_POWER_OFF; rtsx_pci_init_cmd(pcr); @@ -967,7 +977,7 @@ static int sd_set_power_mode(struct realtek_pci_sdmmc *host, if (power_mode == MMC_POWER_OFF) err = sd_power_off(host); else - err = sd_power_on(host); + err = sd_power_on(host, power_mode); return err; } @@ -1404,10 +1414,11 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev) host = mmc_priv(mmc); host->pcr = pcr; + mmc->ios.power_delay_ms = 5; host->mmc = mmc; host->pdev = pdev; host->cookie = -1; - host->power_state = SDMMC_POWER_OFF; + host->prev_power_state = MMC_POWER_OFF; INIT_WORK(&host->work, sd_request); platform_set_drvdata(pdev, host); pcr->slots[RTSX_SD_CARD].p_dev = pdev; diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index f24623aac2db..4d42b1810ace 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -12,28 +12,55 @@ #include #include +#include "sdhci-cqhci.h" #include "sdhci-pltfm.h" #include "cqhci.h" #define SDHCI_VENDOR 0x78 #define SDHCI_VENDOR_ENHANCED_STRB 0x1 +#define SDHCI_VENDOR_GATE_SDCLK_EN 0x2 -#define BRCMSTB_PRIV_FLAGS_NO_64BIT BIT(0) -#define BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT BIT(1) +#define BRCMSTB_MATCH_FLAGS_NO_64BIT BIT(0) +#define BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT BIT(1) +#define BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE BIT(2) + +#define BRCMSTB_PRIV_FLAGS_HAS_CQE BIT(0) +#define BRCMSTB_PRIV_FLAGS_GATE_CLOCK BIT(1) #define SDHCI_ARASAN_CQE_BASE_ADDR 0x200 struct sdhci_brcmstb_priv { void __iomem *cfg_regs; - bool has_cqe; + unsigned int flags; }; struct brcmstb_match_priv { void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios); struct sdhci_ops *ops; - unsigned int flags; + const unsigned int flags; }; +static inline void enable_clock_gating(struct sdhci_host *host) +{ + u32 reg; + + reg = sdhci_readl(host, SDHCI_VENDOR); + reg |= SDHCI_VENDOR_GATE_SDCLK_EN; + sdhci_writel(host, reg, SDHCI_VENDOR); +} + +void brcmstb_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host); + + sdhci_and_cqhci_reset(host, mask); + + /* Reset will clear this, so re-enable it */ + if (priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK) + enable_clock_gating(host); +} + static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios) { struct sdhci_host *host = mmc_priv(mmc); @@ -129,22 +156,23 @@ static struct sdhci_ops sdhci_brcmstb_ops = { static struct sdhci_ops sdhci_brcmstb_ops_7216 = { .set_clock = sdhci_brcmstb_set_clock, .set_bus_width = sdhci_set_bus_width, - .reset = sdhci_reset, + .reset = brcmstb_reset, .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling, }; static struct brcmstb_match_priv match_priv_7425 = { - .flags = BRCMSTB_PRIV_FLAGS_NO_64BIT | - BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT, + .flags = BRCMSTB_MATCH_FLAGS_NO_64BIT | + BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT, .ops = &sdhci_brcmstb_ops, }; static struct brcmstb_match_priv match_priv_7445 = { - .flags = BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT, + .flags = BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT, .ops = &sdhci_brcmstb_ops, }; static const struct brcmstb_match_priv match_priv_7216 = { + .flags = BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE, .hs400es = sdhci_brcmstb_hs400es, .ops = &sdhci_brcmstb_ops_7216, }; @@ -176,7 +204,7 @@ static int sdhci_brcmstb_add_host(struct sdhci_host *host, bool dma64; int ret; - if (!priv->has_cqe) + if ((priv->flags & BRCMSTB_PRIV_FLAGS_HAS_CQE) == 0) return sdhci_add_host(host); dev_dbg(mmc_dev(host->mmc), "CQE is enabled\n"); @@ -225,7 +253,6 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) struct sdhci_brcmstb_priv *priv; struct sdhci_host *host; struct resource *iomem; - bool has_cqe = false; struct clk *clk; int res; @@ -244,10 +271,6 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) return res; memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata)); - if (device_property_read_bool(&pdev->dev, "supports-cqe")) { - has_cqe = true; - match_priv->ops->irq = sdhci_brcmstb_cqhci_irq; - } brcmstb_pdata.ops = match_priv->ops; host = sdhci_pltfm_init(pdev, &brcmstb_pdata, sizeof(struct sdhci_brcmstb_priv)); @@ -258,7 +281,10 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) pltfm_host = sdhci_priv(host); priv = sdhci_pltfm_priv(pltfm_host); - priv->has_cqe = has_cqe; + if (device_property_read_bool(&pdev->dev, "supports-cqe")) { + priv->flags |= BRCMSTB_PRIV_FLAGS_HAS_CQE; + match_priv->ops->irq = sdhci_brcmstb_cqhci_irq; + } /* Map in the non-standard CFG registers */ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 1); @@ -273,6 +299,14 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) if (res) goto err; + /* + * Automatic clock gating does not work for SD cards that may + * voltage switch so only enable it for non-removable devices. + */ + if ((match_priv->flags & BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE) && + (host->mmc->caps & MMC_CAP_NONREMOVABLE)) + priv->flags |= BRCMSTB_PRIV_FLAGS_GATE_CLOCK; + /* * If the chip has enhanced strobe and it's enabled, add * callback @@ -287,14 +321,14 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) * properties through mmc_of_parse(). */ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); - if (match_priv->flags & BRCMSTB_PRIV_FLAGS_NO_64BIT) + if (match_priv->flags & BRCMSTB_MATCH_FLAGS_NO_64BIT) host->caps &= ~SDHCI_CAN_64BIT; host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_DDR50); host->quirks |= SDHCI_QUIRK_MISSING_CAPS; - if (match_priv->flags & BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT) + if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT) host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; res = sdhci_brcmstb_add_host(host, priv); diff --git a/drivers/mmc/host/sdhci-cqhci.h b/drivers/mmc/host/sdhci-cqhci.h new file mode 100644 index 000000000000..cf8e7ba71bbd --- /dev/null +++ b/drivers/mmc/host/sdhci-cqhci.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2022 The Chromium OS Authors + * + * Support that applies to the combination of SDHCI and CQHCI, while not + * expressing a dependency between the two modules. + */ + +#ifndef __MMC_HOST_SDHCI_CQHCI_H__ +#define __MMC_HOST_SDHCI_CQHCI_H__ + +#include "cqhci.h" +#include "sdhci.h" + +static inline void sdhci_and_cqhci_reset(struct sdhci_host *host, u8 mask) +{ + if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) && + host->mmc->cqe_private) + cqhci_deactivate(host->mmc); + + sdhci_reset(host, mask); +} + +#endif /* __MMC_HOST_SDHCI_CQHCI_H__ */ diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index a4bd85b200a3..9e827bfe19ff 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -26,6 +26,7 @@ #include #include #include +#include "sdhci-cqhci.h" #include "sdhci-pltfm.h" #include "sdhci-esdhc.h" #include "cqhci.h" @@ -294,22 +295,6 @@ struct pltfm_imx_data { struct pm_qos_request pm_qos_req; }; -static const struct platform_device_id imx_esdhc_devtype[] = { - { - .name = "sdhci-esdhc-imx25", - .driver_data = (kernel_ulong_t) &esdhc_imx25_data, - }, { - .name = "sdhci-esdhc-imx35", - .driver_data = (kernel_ulong_t) &esdhc_imx35_data, - }, { - .name = "sdhci-esdhc-imx51", - .driver_data = (kernel_ulong_t) &esdhc_imx51_data, - }, { - /* sentinel */ - } -}; -MODULE_DEVICE_TABLE(platform, imx_esdhc_devtype); - static const struct of_device_id imx_esdhc_dt_ids[] = { { .compatible = "fsl,imx25-esdhc", .data = &esdhc_imx25_data, }, { .compatible = "fsl,imx35-esdhc", .data = &esdhc_imx35_data, }, @@ -1243,7 +1228,7 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing) static void esdhc_reset(struct sdhci_host *host, u8 mask) { - sdhci_reset(host, mask); + sdhci_and_cqhci_reset(host, mask); sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); @@ -1464,7 +1449,7 @@ static void esdhc_cqe_enable(struct mmc_host *mmc) * system resume back. */ cqhci_writel(cq_host, 0, CQHCI_CTL); - if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) + if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) dev_err(mmc_dev(host->mmc), "failed to exit halt state when enable CQE\n"); @@ -1545,72 +1530,6 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, } #endif -static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, - struct sdhci_host *host, - struct pltfm_imx_data *imx_data) -{ - struct esdhc_platform_data *boarddata = &imx_data->boarddata; - int err; - - if (!host->mmc->parent->platform_data) { - dev_err(mmc_dev(host->mmc), "no board data!\n"); - return -EINVAL; - } - - imx_data->boarddata = *((struct esdhc_platform_data *) - host->mmc->parent->platform_data); - /* write_protect */ - if (boarddata->wp_type == ESDHC_WP_GPIO) { - host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; - - err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0); - if (err) { - dev_err(mmc_dev(host->mmc), - "failed to request write-protect gpio!\n"); - return err; - } - } - - /* card_detect */ - switch (boarddata->cd_type) { - case ESDHC_CD_GPIO: - err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0); - if (err) { - dev_err(mmc_dev(host->mmc), - "failed to request card-detect gpio!\n"); - return err; - } - fallthrough; - - case ESDHC_CD_CONTROLLER: - /* we have a working card_detect back */ - host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; - break; - - case ESDHC_CD_PERMANENT: - host->mmc->caps |= MMC_CAP_NONREMOVABLE; - break; - - case ESDHC_CD_NONE: - break; - } - - switch (boarddata->max_bus_width) { - case 8: - host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; - break; - case 4: - host->mmc->caps |= MMC_CAP_4_BIT_DATA; - break; - case 1: - default: - host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; - break; - } - - return 0; -} - static int sdhci_esdhc_imx_probe(struct platform_device *pdev) { const struct of_device_id *of_id = @@ -1630,8 +1549,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) imx_data = sdhci_pltfm_priv(pltfm_host); - imx_data->socdata = of_id ? of_id->data : (struct esdhc_soc_data *) - pdev->id_entry->driver_data; + imx_data->socdata = of_id->data; if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS) cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0); @@ -1692,6 +1610,10 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) host->mmc_host_ops.execute_tuning = usdhc_execute_tuning; } + err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); + if (err) + goto disable_ahb_clk; + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) sdhci_esdhc_ops.platform_execute_tuning = esdhc_executing_tuning; @@ -1699,13 +1621,15 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; - if (imx_data->socdata->flags & ESDHC_FLAG_HS400) + if (host->mmc->caps & MMC_CAP_8_BIT_DATA && + imx_data->socdata->flags & ESDHC_FLAG_HS400) host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400; if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23) host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN; - if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) { + if (host->mmc->caps & MMC_CAP_8_BIT_DATA && + imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) { host->mmc->caps2 |= MMC_CAP2_HS400_ES; host->mmc_host_ops.hs400_enhanced_strobe = esdhc_hs400_enhanced_strobe; @@ -1727,13 +1651,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) goto disable_ahb_clk; } - if (of_id) - err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); - else - err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data); - if (err) - goto disable_ahb_clk; - sdhci_esdhc_imx_hwinit(host); err = sdhci_add_host(host); @@ -1944,7 +1861,6 @@ static struct platform_driver sdhci_esdhc_imx_driver = { .of_match_table = imx_esdhc_dt_ids, .pm = &sdhci_esdhc_pmops, }, - .id_table = imx_esdhc_devtype, .probe = sdhci_esdhc_imx_probe, .remove = sdhci_esdhc_imx_remove, }; diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index bef081d35305..559795530c70 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "sdhci-pltfm.h" #include "cqhci.h" @@ -2441,6 +2442,7 @@ static const struct sdhci_msm_variant_info sm8250_sdhci_var = { static const struct of_device_id sdhci_msm_dt_match[] = { {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var}, {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var}, + {.compatible = "qcom,sdm670-sdhci", .data = &sdm845_sdhci_var}, {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var}, {.compatible = "qcom,sm8250-sdhci", .data = &sm8250_sdhci_var}, {.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var}, @@ -2488,6 +2490,43 @@ static inline void sdhci_msm_get_of_property(struct platform_device *pdev, of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config); } +static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host) +{ + struct reset_control *reset; + int ret = 0; + + reset = reset_control_get_optional_exclusive(dev, NULL); + if (IS_ERR(reset)) + return dev_err_probe(dev, PTR_ERR(reset), + "unable to acquire core_reset\n"); + + if (!reset) + return ret; + + ret = reset_control_assert(reset); + if (ret) { + reset_control_put(reset); + return dev_err_probe(dev, ret, "core_reset assert failed\n"); + } + + /* + * The hardware requirement for delay between assert/deassert + * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to + * ~125us (4/32768). To be on the safe side add 200us delay. + */ + usleep_range(200, 210); + + ret = reset_control_deassert(reset); + if (ret) { + reset_control_put(reset); + return dev_err_probe(dev, ret, "core_reset deassert failed\n"); + } + + usleep_range(200, 210); + reset_control_put(reset); + + return ret; +} static int sdhci_msm_probe(struct platform_device *pdev) { @@ -2536,6 +2575,10 @@ static int sdhci_msm_probe(struct platform_device *pdev) msm_host->saved_tuning_phase = INVALID_TUNING_PHASE; + ret = sdhci_msm_gcc_reset(&pdev->dev, host); + if (ret) + goto pltfm_free; + /* Setup SDCC bus voter clock. */ msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus"); if (!IS_ERR(msm_host->bus_clk)) { diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index 6a7e49246a4d..d37d57c7aa18 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -25,6 +25,7 @@ #include #include "cqhci.h" +#include "sdhci-cqhci.h" #include "sdhci-pltfm.h" #define SDHCI_ARASAN_VENDOR_REGISTER 0x78 @@ -359,7 +360,7 @@ static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); - sdhci_reset(host, mask); + sdhci_and_cqhci_reset(host, mask); if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_FORCE_CDTEST) { ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index d1a1c548c515..0452c312b65e 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -100,8 +100,13 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock) static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing) { - if (timing == MMC_TIMING_MMC_DDR52) - sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R); + u8 mc1r; + + if (timing == MMC_TIMING_MMC_DDR52) { + mc1r = sdhci_readb(host, SDMMC_MC1R); + mc1r |= SDMMC_MC1R_DDR; + sdhci_writeb(host, mc1r, SDMMC_MC1R); + } sdhci_set_uhs_signaling(host, timing); } diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 343648fcbc31..d53374991e13 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -904,6 +904,7 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc, scfg_node = of_find_matching_node(NULL, scfg_device_ids); if (scfg_node) scfg_base = of_iomap(scfg_node, 0); + of_node_put(scfg_node); if (scfg_base) { sdhciovselcr = SDHCIOVSELCR_TGLEN | SDHCIOVSELCR_VSELVAL; diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index a78b060ce847..8b02fe3916d1 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -967,6 +967,12 @@ static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) dmi_match(DMI_SYS_VENDOR, "IRBIS")); } +static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot) +{ + return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC && + dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC."); +} + static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) { int ret = byt_emmc_probe_slot(slot); @@ -975,9 +981,11 @@ static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) slot->host->mmc->caps2 |= MMC_CAP2_CQE; if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) { - slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES, - slot->host->mmc_host_ops.hs400_enhanced_strobe = - intel_hs400_enhanced_strobe; + if (!jsl_broken_hs400es(slot)) { + slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES; + slot->host->mmc_host_ops.hs400_enhanced_strobe = + intel_hs400_enhanced_strobe; + } slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD; } @@ -1791,6 +1799,8 @@ static int amd_probe(struct sdhci_pci_chip *chip) } } + pci_dev_put(smbus_dev); + if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ) chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD; diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index 94e3f72f6405..72234790a310 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -31,6 +31,7 @@ #define O2_SD_CAPS 0xE0 #define O2_SD_ADMA1 0xE2 #define O2_SD_ADMA2 0xE7 +#define O2_SD_MISC_CTRL2 0xF0 #define O2_SD_INF_MOD 0xF1 #define O2_SD_MISC_CTRL4 0xFC #define O2_SD_MISC_CTRL 0x1C0 @@ -147,6 +148,8 @@ static int sdhci_o2_get_cd(struct mmc_host *mmc) if (!(sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1) & O2_PLL_LOCK_STATUS)) sdhci_o2_enable_internal_clock(host); + else + sdhci_o2_wait_card_detect_stable(host); return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); } @@ -820,6 +823,12 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) /* Set Tuning Windows to 5 */ pci_write_config_byte(chip->pdev, O2_SD_TUNING_CTRL, 0x55); + //Adjust 1st and 2nd CD debounce time + pci_read_config_dword(chip->pdev, O2_SD_MISC_CTRL2, &scratch_32); + scratch_32 &= 0xFFE7FFFF; + scratch_32 |= 0x00180000; + pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL2, scratch_32); + pci_write_config_dword(chip->pdev, O2_SD_DETECT_SETTING, 1); /* Lock WP */ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c index 9cd8862e6cbd..110ee0c804c8 100644 --- a/drivers/mmc/host/sdhci-sprd.c +++ b/drivers/mmc/host/sdhci-sprd.c @@ -296,7 +296,7 @@ static unsigned int sdhci_sprd_get_max_clock(struct sdhci_host *host) static unsigned int sdhci_sprd_get_min_clock(struct sdhci_host *host) { - return 400000; + return 100000; } static void sdhci_sprd_set_uhs_signaling(struct sdhci_host *host, @@ -457,7 +457,7 @@ static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) } if (IS_ERR(sprd_host->pinctrl)) - return 0; + goto reset; switch (ios->signal_voltage) { case MMC_SIGNAL_VOLTAGE_180: @@ -485,6 +485,8 @@ static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) /* Wait for 300 ~ 500 us for pin state stable */ usleep_range(300, 500); + +reset: sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); return 0; diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index d50b691f6c44..d8fd2b5efd38 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -24,6 +24,7 @@ #include #include +#include "sdhci-cqhci.h" #include "sdhci-pltfm.h" #include "cqhci.h" @@ -361,7 +362,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; u32 misc_ctrl, clk_ctrl, pad_ctrl; - sdhci_reset(host, mask); + sdhci_and_cqhci_reset(host, mask); if (!(mask & SDHCI_RESET_ALL)) return; @@ -760,7 +761,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) */ host_clk = tegra_host->ddr_signaling ? clock * 2 : clock; clk_set_rate(pltfm_host->clk, host_clk); - tegra_host->curr_clk_rate = host_clk; + tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk); if (tegra_host->ddr_signaling) host->max_clk = host_clk; else diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 0e5234a5ca22..d509198c00c8 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -240,16 +240,6 @@ static void xenon_voltage_switch(struct sdhci_host *host) { /* Wait for 5ms after set 1.8V signal enable bit */ usleep_range(5000, 5500); - - /* - * For some reason the controller's Host Control2 register reports - * the bit representing 1.8V signaling as 0 when read after it was - * written as 1. Subsequent read reports 1. - * - * Since this may cause some issues, do an empty read of the Host - * Control2 register here to circumvent this. - */ - sdhci_readw(host, SDHCI_HOST_CONTROL2); } static const struct sdhci_ops sdhci_xenon_ops = { diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index a64ea143d185..24cd6d3dc647 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -15,6 +15,7 @@ #include #include "cqhci.h" +#include "sdhci-cqhci.h" #include "sdhci-pltfm.h" /* CTL_CFG Registers */ @@ -147,6 +148,9 @@ struct sdhci_am654_data { int drv_strength; int strb_sel; u32 flags; + u32 quirks; + +#define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0) }; struct sdhci_am654_driver_data { @@ -369,6 +373,21 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg) } } +static void sdhci_am654_reset(struct sdhci_host *host, u8 mask) +{ + u8 ctrl; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); + + sdhci_and_cqhci_reset(host, mask); + + if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_FORCE_CDTEST) { + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + ctrl |= SDHCI_CTRL_CDTEST_INS | SDHCI_CTRL_CDTEST_EN; + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + } +} + static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct sdhci_host *host = mmc_priv(mmc); @@ -446,7 +465,7 @@ static struct sdhci_ops sdhci_am654_ops = { .set_clock = sdhci_am654_set_clock, .write_b = sdhci_am654_write_b, .irq = sdhci_am654_cqhci_irq, - .reset = sdhci_reset, + .reset = sdhci_and_cqhci_reset, }; static const struct sdhci_pltfm_data sdhci_am654_pdata = { @@ -476,7 +495,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = { .set_clock = sdhci_am654_set_clock, .write_b = sdhci_am654_write_b, .irq = sdhci_am654_cqhci_irq, - .reset = sdhci_reset, + .reset = sdhci_and_cqhci_reset, }; static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = { @@ -500,7 +519,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = { .set_clock = sdhci_j721e_4bit_set_clock, .write_b = sdhci_am654_write_b, .irq = sdhci_am654_cqhci_irq, - .reset = sdhci_reset, + .reset = sdhci_am654_reset, }; static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = { @@ -719,6 +738,9 @@ static int sdhci_am654_get_of_property(struct platform_device *pdev, device_property_read_u32(dev, "ti,clkbuf-sel", &sdhci_am654->clkbuf_sel); + if (device_property_read_bool(dev, "ti,fails-without-test-cd")) + sdhci_am654->quirks |= SDHCI_AM654_QUIRK_FORCE_CDTEST; + sdhci_get_of_property(pdev); return 0; diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c index cf10949fb0ac..8df722ec57ed 100644 --- a/drivers/mmc/host/wmt-sdmmc.c +++ b/drivers/mmc/host/wmt-sdmmc.c @@ -849,7 +849,7 @@ static int wmt_mci_probe(struct platform_device *pdev) if (IS_ERR(priv->clk_sdmmc)) { dev_err(&pdev->dev, "Error getting clock\n"); ret = PTR_ERR(priv->clk_sdmmc); - goto fail5; + goto fail5_and_a_half; } ret = clk_prepare_enable(priv->clk_sdmmc); @@ -866,6 +866,9 @@ static int wmt_mci_probe(struct platform_device *pdev) return 0; fail6: clk_put(priv->clk_sdmmc); +fail5_and_a_half: + dma_free_coherent(&pdev->dev, mmc->max_blk_count * 16, + priv->dma_desc_buffer, priv->dma_desc_device_addr); fail5: free_irq(dma_irq, priv); fail4: diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 96a27e06401f..9bd65f3f805c 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -59,6 +59,10 @@ #define CFI_SR_WBASB BIT(3) #define CFI_SR_SLSB BIT(1) +enum cfi_quirks { + CFI_QUIRK_DQ_TRUE_DATA = BIT(0), +}; + static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); #if !FORCE_WORD_WRITE @@ -432,6 +436,15 @@ static void fixup_s29ns512p_sectors(struct mtd_info *mtd) mtd->name); } +static void fixup_quirks(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + + if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x0c01) + cfi->quirks |= CFI_QUIRK_DQ_TRUE_DATA; +} + /* Used to fix CFI-Tables of chips without Extended Query Tables */ static struct cfi_fixup cfi_nopri_fixup_table[] = { { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ @@ -470,6 +483,7 @@ static struct cfi_fixup cfi_fixup_table[] = { #if !FORCE_WORD_WRITE { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, #endif + { CFI_MFR_ANY, CFI_ID_ANY, fixup_quirks }, { 0, 0, NULL } }; static struct cfi_fixup jedec_fixup_table[] = { @@ -797,47 +811,11 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) return NULL; } -/* - * Return true if the chip is ready. - * - * Ready is one of: read mode, query mode, erase-suspend-read mode (in any - * non-suspended sector) and is indicated by no toggle bits toggling. - * - * Note that anything more complicated than checking if no bits are toggling - * (including checking DQ5 for an error status) is tricky to get working - * correctly and is therefore not done (particularly with interleaved chips - * as each chip must be checked independently of the others). - */ -static int __xipram chip_ready(struct map_info *map, struct flchip *chip, - unsigned long addr) -{ - struct cfi_private *cfi = map->fldrv_priv; - map_word d, t; - - if (cfi_use_status_reg(cfi)) { - map_word ready = CMD(CFI_SR_DRB); - /* - * For chips that support status register, check device - * ready bit - */ - cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, - cfi->device_type, NULL); - d = map_read(map, addr); - - return map_word_andequal(map, d, ready, ready); - } - - d = map_read(map, addr); - t = map_read(map, addr); - - return map_word_equal(map, d, t); -} - /* * Return true if the chip is ready and has the correct value. * * Ready is one of: read mode, query mode, erase-suspend-read mode (in any - * non-suspended sector) and it is indicated by no bits toggling. + * non-suspended sector) and is indicated by no toggle bits toggling. * * Error are indicated by toggling bits or bits held with the wrong value, * or with bits toggling. @@ -846,33 +824,48 @@ static int __xipram chip_ready(struct map_info *map, struct flchip *chip, * (including checking DQ5 for an error status) is tricky to get working * correctly and is therefore not done (particularly with interleaved chips * as each chip must be checked independently of the others). - * */ -static int __xipram chip_good(struct map_info *map, struct flchip *chip, - unsigned long addr, map_word expected) +static int __xipram chip_ready(struct map_info *map, struct flchip *chip, + unsigned long addr, map_word *expected) { struct cfi_private *cfi = map->fldrv_priv; - map_word oldd, curd; + map_word d, t; + int ret; if (cfi_use_status_reg(cfi)) { map_word ready = CMD(CFI_SR_DRB); - /* * For chips that support status register, check device * ready bit */ cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); - curd = map_read(map, addr); + t = map_read(map, addr); - return map_word_andequal(map, curd, ready, ready); + return map_word_andequal(map, t, ready, ready); } - oldd = map_read(map, addr); - curd = map_read(map, addr); + d = map_read(map, addr); + t = map_read(map, addr); - return map_word_equal(map, oldd, curd) && - map_word_equal(map, curd, expected); + ret = map_word_equal(map, d, t); + + if (!ret || !expected) + return ret; + + return map_word_equal(map, t, *expected); +} + +static int __xipram chip_good(struct map_info *map, struct flchip *chip, + unsigned long addr, map_word *expected) +{ + struct cfi_private *cfi = map->fldrv_priv; + map_word *datum = expected; + + if (cfi->quirks & CFI_QUIRK_DQ_TRUE_DATA) + datum = NULL; + + return chip_ready(map, chip, addr, datum); } static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) @@ -889,7 +882,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr case FL_STATUS: for (;;) { - if (chip_ready(map, chip, adr)) + if (chip_ready(map, chip, adr, NULL)) break; if (time_after(jiffies, timeo)) { @@ -927,7 +920,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr chip->state = FL_ERASE_SUSPENDING; chip->erase_suspended = 1; for (;;) { - if (chip_ready(map, chip, adr)) + if (chip_ready(map, chip, adr, NULL)) break; if (time_after(jiffies, timeo)) { @@ -1458,7 +1451,7 @@ static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr, /* wait for chip to become ready */ timeo = jiffies + msecs_to_jiffies(2); for (;;) { - if (chip_ready(map, chip, adr)) + if (chip_ready(map, chip, adr, NULL)) break; if (time_after(jiffies, timeo)) { @@ -1694,7 +1687,7 @@ static int __xipram do_write_oneword_once(struct map_info *map, * "chip_good" to avoid the failure due to scheduling. */ if (time_after(jiffies, timeo) && - !chip_good(map, chip, adr, datum)) { + !chip_good(map, chip, adr, &datum)) { xip_enable(map, chip, adr); printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); xip_disable(map, chip, adr); @@ -1702,7 +1695,7 @@ static int __xipram do_write_oneword_once(struct map_info *map, break; } - if (chip_good(map, chip, adr, datum)) { + if (chip_good(map, chip, adr, &datum)) { if (cfi_check_err_status(map, chip, adr)) ret = -EIO; break; @@ -1974,14 +1967,14 @@ static int __xipram do_write_buffer_wait(struct map_info *map, * "chip_good" to avoid the failure due to scheduling. */ if (time_after(jiffies, timeo) && - !chip_good(map, chip, adr, datum)) { + !chip_good(map, chip, adr, &datum)) { pr_err("MTD %s(): software timeout, address:0x%.8lx.\n", __func__, adr); ret = -EIO; break; } - if (chip_good(map, chip, adr, datum)) { + if (chip_good(map, chip, adr, &datum)) { if (cfi_check_err_status(map, chip, adr)) ret = -EIO; break; @@ -2190,7 +2183,7 @@ static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, * If the driver thinks the chip is idle, and no toggle bits * are changing, then the chip is actually idle for sure. */ - if (chip->state == FL_READY && chip_ready(map, chip, adr)) + if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL)) return 0; /* @@ -2207,7 +2200,7 @@ static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, /* wait for the chip to become ready */ for (i = 0; i < jiffies_to_usecs(timeo); i++) { - if (chip_ready(map, chip, adr)) + if (chip_ready(map, chip, adr, NULL)) return 0; udelay(1); @@ -2271,13 +2264,13 @@ retry: map_write(map, datum, adr); for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) { - if (chip_ready(map, chip, adr)) + if (chip_ready(map, chip, adr, NULL)) break; udelay(1); } - if (!chip_good(map, chip, adr, datum) || + if (!chip_ready(map, chip, adr, &datum) || cfi_check_err_status(map, chip, adr)) { /* reset on all failures. */ map_write(map, CMD(0xF0), chip->start); @@ -2419,6 +2412,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) DECLARE_WAITQUEUE(wait, current); int ret; int retry_cnt = 0; + map_word datum = map_word_ff(map); adr = cfi->addr_unlock1; @@ -2473,7 +2467,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) chip->erase_suspended = 0; } - if (chip_good(map, chip, adr, map_word_ff(map))) { + if (chip_ready(map, chip, adr, &datum)) { if (cfi_check_err_status(map, chip, adr)) ret = -EIO; break; @@ -2518,6 +2512,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, DECLARE_WAITQUEUE(wait, current); int ret; int retry_cnt = 0; + map_word datum = map_word_ff(map); adr += chip->start; @@ -2572,7 +2567,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, chip->erase_suspended = 0; } - if (chip_good(map, chip, adr, map_word_ff(map))) { + if (chip_ready(map, chip, adr, &datum)) { if (cfi_check_err_status(map, chip, adr)) ret = -EIO; break; @@ -2766,7 +2761,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map, */ timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */ for (;;) { - if (chip_ready(map, chip, adr)) + if (chip_ready(map, chip, adr, NULL)) break; if (time_after(jiffies, timeo)) { diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index a030792115bc..fa42473d04c1 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -1975,9 +1975,14 @@ static int __init docg3_probe(struct platform_device *pdev) dev_err(dev, "No I/O memory resource defined\n"); return ret; } - base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE); ret = -ENOMEM; + base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE); + if (!base) { + dev_err(dev, "devm_ioremap dev failed\n"); + return ret; + } + cascade = devm_kcalloc(dev, DOC_MAX_NBFLOORS, sizeof(*cascade), GFP_KERNEL); if (!cascade) diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c index 1888523d9745..9bee99f07af0 100644 --- a/drivers/mtd/devices/st_spi_fsm.c +++ b/drivers/mtd/devices/st_spi_fsm.c @@ -2115,10 +2115,12 @@ static int stfsm_probe(struct platform_device *pdev) (long long)fsm->mtd.size, (long long)(fsm->mtd.size >> 20), fsm->mtd.erasesize, (fsm->mtd.erasesize >> 10)); - return mtd_device_register(&fsm->mtd, NULL, 0); - + ret = mtd_device_register(&fsm->mtd, NULL, 0); + if (ret) { err_clk_unprepare: - clk_disable_unprepare(fsm->clk); + clk_disable_unprepare(fsm->clk); + } + return ret; } diff --git a/drivers/mtd/maps/physmap-versatile.c b/drivers/mtd/maps/physmap-versatile.c index ad7cd9cfaee0..a1b8b7b25f88 100644 --- a/drivers/mtd/maps/physmap-versatile.c +++ b/drivers/mtd/maps/physmap-versatile.c @@ -93,6 +93,7 @@ static int ap_flash_init(struct platform_device *pdev) return -ENODEV; } ebi_base = of_iomap(ebi, 0); + of_node_put(ebi); if (!ebi_base) return -ENODEV; @@ -207,6 +208,7 @@ int of_flash_probe_versatile(struct platform_device *pdev, versatile_flashprot = (enum versatile_flashprot)devid->data; rmap = syscon_node_to_regmap(sysnp); + of_node_put(sysnp); if (IS_ERR(rmap)) return PTR_ERR(rmap); diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c index 0ee3192916d9..6a0d48c42cfa 100644 --- a/drivers/mtd/nand/raw/arasan-nand-controller.c +++ b/drivers/mtd/nand/raw/arasan-nand-controller.c @@ -91,7 +91,7 @@ #define DATA_INTERFACE_REG 0x6C #define DIFACE_SDR_MODE(x) FIELD_PREP(GENMASK(2, 0), (x)) -#define DIFACE_DDR_MODE(x) FIELD_PREP(GENMASK(5, 3), (X)) +#define DIFACE_DDR_MODE(x) FIELD_PREP(GENMASK(5, 3), (x)) #define DIFACE_SDR 0 #define DIFACE_NVDDR BIT(9) @@ -283,17 +283,17 @@ static int anfc_select_target(struct nand_chip *chip, int target) /* Update clock frequency */ if (nfc->cur_clk != anand->clk) { - clk_disable_unprepare(nfc->controller_clk); - ret = clk_set_rate(nfc->controller_clk, anand->clk); + clk_disable_unprepare(nfc->bus_clk); + ret = clk_set_rate(nfc->bus_clk, anand->clk); if (ret) { dev_err(nfc->dev, "Failed to change clock rate\n"); return ret; } - ret = clk_prepare_enable(nfc->controller_clk); + ret = clk_prepare_enable(nfc->bus_clk); if (ret) { dev_err(nfc->dev, - "Failed to re-enable the controller clock\n"); + "Failed to re-enable the bus clock\n"); return ret; } @@ -884,21 +884,60 @@ static int anfc_setup_interface(struct nand_chip *chip, int target, struct anand *anand = to_anand(chip); struct arasan_nfc *nfc = to_anfc(chip->controller); struct device_node *np = nfc->dev->of_node; + const struct nand_sdr_timings *sdr; + const struct nand_nvddr_timings *nvddr; + + if (nand_interface_is_nvddr(conf)) { + nvddr = nand_get_nvddr_timings(conf); + if (IS_ERR(nvddr)) + return PTR_ERR(nvddr); + + /* + * The controller only supports data payload requests which are + * a multiple of 4. In practice, most data accesses are 4-byte + * aligned and this is not an issue. However, rounding up will + * simply be refused by the controller if we reached the end of + * the device *and* we are using the NV-DDR interface(!). In + * this situation, unaligned data requests ending at the device + * boundary will confuse the controller and cannot be performed. + * + * This is something that happens in nand_read_subpage() when + * selecting software ECC support and must be avoided. + */ + if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) + return -ENOTSUPP; + } else { + sdr = nand_get_sdr_timings(conf); + if (IS_ERR(sdr)) + return PTR_ERR(sdr); + } if (target < 0) return 0; - anand->timings = DIFACE_SDR | DIFACE_SDR_MODE(conf->timings.mode); - anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK; + if (nand_interface_is_sdr(conf)) + anand->timings = DIFACE_SDR | + DIFACE_SDR_MODE(conf->timings.mode); + else + anand->timings = DIFACE_NVDDR | + DIFACE_DDR_MODE(conf->timings.mode); + + if (nand_interface_is_sdr(conf)) { + anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK; + } else { + /* ONFI timings are defined in picoseconds */ + anand->clk = div_u64((u64)NSEC_PER_SEC * 1000, + conf->timings.nvddr.tCK_min); + } /* * Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work * with f > 90MHz (default clock is 100MHz) but signals are unstable * with higher modes. Hence we decrease a little bit the clock rate to - * 80MHz when using modes 2-5 with this SoC. + * 80MHz when using SDR modes 2-5 with this SoC. */ if (of_device_is_compatible(np, "xlnx,zynqmp-nand-controller") && - conf->timings.mode >= 2) + nand_interface_is_sdr(conf) && conf->timings.mode >= 2) anand->clk = ANFC_XLNX_SDR_HS_CORE_CLK; return 0; diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index c048e826746a..0d84f8156d8e 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -405,6 +405,7 @@ static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc, dma_async_issue_pending(nc->dmac); wait_for_completion(&finished); + dma_unmap_single(nc->dev, buf_dma, len, dir); return 0; @@ -1246,7 +1247,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, nc = to_nand_controller(nand->base.controller); /* DDR interface not supported. */ - if (conf->type != NAND_SDR_IFACE) + if (!nand_interface_is_sdr(conf)) return -ENOTSUPP; /* diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index b46786cd53e0..4fdb39214a12 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -2983,11 +2983,10 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev) if (IS_ERR(cdns_ctrl->reg)) return PTR_ERR(cdns_ctrl->reg); - res = platform_get_resource(ofdev, IORESOURCE_MEM, 1); - cdns_ctrl->io.dma = res->start; - cdns_ctrl->io.virt = devm_ioremap_resource(&ofdev->dev, res); + cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res); if (IS_ERR(cdns_ctrl->io.virt)) return PTR_ERR(cdns_ctrl->io.virt); + cdns_ctrl->io.dma = res->start; dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk"); if (IS_ERR(dt->clk)) diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c index 20c085a30adc..de7e722d3826 100644 --- a/drivers/mtd/nand/raw/denali_pci.c +++ b/drivers/mtd/nand/raw/denali_pci.c @@ -74,22 +74,21 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) return ret; } - denali->reg = ioremap(csr_base, csr_len); + denali->reg = devm_ioremap(denali->dev, csr_base, csr_len); if (!denali->reg) { dev_err(&dev->dev, "Spectra: Unable to remap memory region\n"); return -ENOMEM; } - denali->host = ioremap(mem_base, mem_len); + denali->host = devm_ioremap(denali->dev, mem_base, mem_len); if (!denali->host) { dev_err(&dev->dev, "Spectra: ioremap failed!"); - ret = -ENOMEM; - goto out_unmap_reg; + return -ENOMEM; } ret = denali_init(denali); if (ret) - goto out_unmap_host; + return ret; nsels = denali->nbanks; @@ -117,10 +116,6 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) out_remove_denali: denali_remove(denali); -out_unmap_host: - iounmap(denali->host); -out_unmap_reg: - iounmap(denali->reg); return ret; } @@ -129,8 +124,6 @@ static void denali_pci_remove(struct pci_dev *dev) struct denali_controller *denali = pci_get_drvdata(dev); denali_remove(denali); - iounmap(denali->reg); - iounmap(denali->host); } static struct pci_driver denali_pci_driver = { diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c index b2af7f81fdf8..c174b6dc3c6b 100644 --- a/drivers/mtd/nand/raw/fsl_elbc_nand.c +++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c @@ -727,36 +727,40 @@ static int fsl_elbc_attach_chip(struct nand_chip *chip) struct fsl_lbc_regs __iomem *lbc = ctrl->regs; unsigned int al; - switch (chip->ecc.engine_type) { /* * if ECC was not chosen in DT, decide whether to use HW or SW ECC from * CS Base Register */ - case NAND_ECC_ENGINE_TYPE_NONE: + if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID) { /* If CS Base Register selects full hardware ECC then use it */ if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) == BR_DECC_CHK_GEN) { - chip->ecc.read_page = fsl_elbc_read_page; - chip->ecc.write_page = fsl_elbc_write_page; - chip->ecc.write_subpage = fsl_elbc_write_subpage; - chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; - mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops); - chip->ecc.size = 512; - chip->ecc.bytes = 3; - chip->ecc.strength = 1; } else { /* otherwise fall back to default software ECC */ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; chip->ecc.algo = NAND_ECC_ALGO_HAMMING; } + } + + switch (chip->ecc.engine_type) { + /* if HW ECC was chosen, setup ecc and oob layout */ + case NAND_ECC_ENGINE_TYPE_ON_HOST: + chip->ecc.read_page = fsl_elbc_read_page; + chip->ecc.write_page = fsl_elbc_write_page; + chip->ecc.write_subpage = fsl_elbc_write_subpage; + mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops); + chip->ecc.size = 512; + chip->ecc.bytes = 3; + chip->ecc.strength = 1; break; - /* if SW ECC was chosen in DT, we do not need to set anything here */ + /* if none or SW ECC was chosen, we do not need to set anything here */ + case NAND_ECC_ENGINE_TYPE_NONE: case NAND_ECC_ENGINE_TYPE_SOFT: + case NAND_ECC_ENGINE_TYPE_ON_DIE: break; - /* should we also implement *_ECC_ENGINE_CONTROLLER to do as above? */ default: return -EINVAL; } diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 92e8ca56f566..200d3ab343b0 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -653,8 +653,9 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this, unsigned int tRP_ps; bool use_half_period; int sample_delay_ps, sample_delay_factor; - u16 busy_timeout_cycles; + unsigned int busy_timeout_cycles; u8 wrn_dly_sel; + u64 busy_timeout_ps; if (sdr->tRC_min >= 30000) { /* ONFI non-EDO modes [0-3] */ @@ -678,7 +679,8 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this, addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps); data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps); data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps); - busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps); + busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max); + busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps); hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) | BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) | diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index d00c916f133b..dce35f81e0a5 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2672,7 +2672,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, chip->controller = &nfc->controller; nand_set_flash_node(chip, np); - if (!of_property_read_bool(np, "marvell,nand-keep-config")) + if (of_property_read_bool(np, "marvell,nand-keep-config")) chip->options |= NAND_KEEP_TIMINGS; mtd = nand_to_mtd(chip); diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 817bddccb775..38f490088d76 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -454,7 +454,7 @@ static int meson_nfc_ecc_correct(struct nand_chip *nand, u32 *bitflips, if (ECC_ERR_CNT(*info) != ECC_UNCORRECTABLE) { mtd->ecc_stats.corrected += ECC_ERR_CNT(*info); *bitflips = max_t(u32, *bitflips, ECC_ERR_CNT(*info)); - *correct_bitmap |= 1 >> i; + *correct_bitmap |= BIT_ULL(i); continue; } if ((nand->options & NAND_NEED_SCRAMBLING) && @@ -800,7 +800,7 @@ static int meson_nfc_read_page_hwecc(struct nand_chip *nand, u8 *buf, u8 *data = buf + i * ecc->size; u8 *oob = nand->oob_poi + i * (ecc->bytes + 2); - if (correct_bitmap & (1 << i)) + if (correct_bitmap & BIT_ULL(i)) continue; ret = nand_check_erased_ecc_chunk(data, ecc->size, oob, ecc->bytes + 2, @@ -1307,7 +1307,6 @@ static int meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc) if (ret) return ret; - meson_nfc_free_buffer(&meson_chip->nand); nand_cleanup(&meson_chip->nand); list_del(&meson_chip->node); } diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c index 75f1fa3d4d35..c115e03ede88 100644 --- a/drivers/mtd/nand/raw/mtk_ecc.c +++ b/drivers/mtd/nand/raw/mtk_ecc.c @@ -43,6 +43,7 @@ struct mtk_ecc_caps { u32 err_mask; + u32 err_shift; const u8 *ecc_strength; const u32 *ecc_regs; u8 num_ecc_strength; @@ -76,7 +77,7 @@ static const u8 ecc_strength_mt2712[] = { }; static const u8 ecc_strength_mt7622[] = { - 4, 6, 8, 10, 12, 14, 16 + 4, 6, 8, 10, 12 }; enum mtk_ecc_regs { @@ -221,7 +222,7 @@ void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats, for (i = 0; i < sectors; i++) { offset = (i >> 2) << 2; err = readl(ecc->regs + ECC_DECENUM0 + offset); - err = err >> ((i % 4) * 8); + err = err >> ((i % 4) * ecc->caps->err_shift); err &= ecc->caps->err_mask; if (err == ecc->caps->err_mask) { /* uncorrectable errors */ @@ -449,6 +450,7 @@ EXPORT_SYMBOL(mtk_ecc_get_parity_bits); static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = { .err_mask = 0x3f, + .err_shift = 8, .ecc_strength = ecc_strength_mt2701, .ecc_regs = mt2701_ecc_regs, .num_ecc_strength = 20, @@ -459,6 +461,7 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = { static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = { .err_mask = 0x7f, + .err_shift = 8, .ecc_strength = ecc_strength_mt2712, .ecc_regs = mt2712_ecc_regs, .num_ecc_strength = 23, @@ -468,10 +471,11 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = { }; static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = { - .err_mask = 0x3f, + .err_mask = 0x1f, + .err_shift = 5, .ecc_strength = ecc_strength_mt7622, .ecc_regs = mt7622_ecc_regs, - .num_ecc_strength = 7, + .num_ecc_strength = 5, .ecc_mode_shift = 4, .parity_bits = 13, .pg_irq_sel = 0, diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c index 94d832646487..481b56d5f60d 100644 --- a/drivers/mtd/nand/raw/nand_timings.c +++ b/drivers/mtd/nand/raw/nand_timings.c @@ -292,6 +292,261 @@ static const struct nand_interface_config onfi_sdr_timings[] = { }, }; +static const struct nand_interface_config onfi_nvddr_timings[] = { + /* Mode 0 */ + { + .type = NAND_NVDDR_IFACE, + .timings.mode = 0, + .timings.nvddr = { + .tCCS_min = 500000, + .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tAC_min = 3000, + .tAC_max = 25000, + .tADL_min = 400000, + .tCAD_min = 45000, + .tCAH_min = 10000, + .tCALH_min = 10000, + .tCALS_min = 10000, + .tCAS_min = 10000, + .tCEH_min = 20000, + .tCH_min = 10000, + .tCK_min = 50000, + .tCS_min = 35000, + .tDH_min = 5000, + .tDQSCK_min = 3000, + .tDQSCK_max = 25000, + .tDQSD_min = 0, + .tDQSD_max = 18000, + .tDQSHZ_max = 20000, + .tDQSQ_max = 5000, + .tDS_min = 5000, + .tDSC_min = 50000, + .tFEAT_max = 1000000, + .tITC_max = 1000000, + .tQHS_max = 6000, + .tRHW_min = 100000, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tWHR_min = 80000, + .tWRCK_min = 20000, + .tWW_min = 100000, + }, + }, + /* Mode 1 */ + { + .type = NAND_NVDDR_IFACE, + .timings.mode = 1, + .timings.nvddr = { + .tCCS_min = 500000, + .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tAC_min = 3000, + .tAC_max = 25000, + .tADL_min = 400000, + .tCAD_min = 45000, + .tCAH_min = 5000, + .tCALH_min = 5000, + .tCALS_min = 5000, + .tCAS_min = 5000, + .tCEH_min = 20000, + .tCH_min = 5000, + .tCK_min = 30000, + .tCS_min = 25000, + .tDH_min = 2500, + .tDQSCK_min = 3000, + .tDQSCK_max = 25000, + .tDQSD_min = 0, + .tDQSD_max = 18000, + .tDQSHZ_max = 20000, + .tDQSQ_max = 2500, + .tDS_min = 3000, + .tDSC_min = 30000, + .tFEAT_max = 1000000, + .tITC_max = 1000000, + .tQHS_max = 3000, + .tRHW_min = 100000, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tWHR_min = 80000, + .tWRCK_min = 20000, + .tWW_min = 100000, + }, + }, + /* Mode 2 */ + { + .type = NAND_NVDDR_IFACE, + .timings.mode = 2, + .timings.nvddr = { + .tCCS_min = 500000, + .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tAC_min = 3000, + .tAC_max = 25000, + .tADL_min = 400000, + .tCAD_min = 45000, + .tCAH_min = 4000, + .tCALH_min = 4000, + .tCALS_min = 4000, + .tCAS_min = 4000, + .tCEH_min = 20000, + .tCH_min = 4000, + .tCK_min = 20000, + .tCS_min = 15000, + .tDH_min = 1700, + .tDQSCK_min = 3000, + .tDQSCK_max = 25000, + .tDQSD_min = 0, + .tDQSD_max = 18000, + .tDQSHZ_max = 20000, + .tDQSQ_max = 1700, + .tDS_min = 2000, + .tDSC_min = 20000, + .tFEAT_max = 1000000, + .tITC_max = 1000000, + .tQHS_max = 2000, + .tRHW_min = 100000, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tWHR_min = 80000, + .tWRCK_min = 20000, + .tWW_min = 100000, + }, + }, + /* Mode 3 */ + { + .type = NAND_NVDDR_IFACE, + .timings.mode = 3, + .timings.nvddr = { + .tCCS_min = 500000, + .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tAC_min = 3000, + .tAC_max = 25000, + .tADL_min = 400000, + .tCAD_min = 45000, + .tCAH_min = 3000, + .tCALH_min = 3000, + .tCALS_min = 3000, + .tCAS_min = 3000, + .tCEH_min = 20000, + .tCH_min = 3000, + .tCK_min = 15000, + .tCS_min = 15000, + .tDH_min = 1300, + .tDQSCK_min = 3000, + .tDQSCK_max = 25000, + .tDQSD_min = 0, + .tDQSD_max = 18000, + .tDQSHZ_max = 20000, + .tDQSQ_max = 1300, + .tDS_min = 1500, + .tDSC_min = 15000, + .tFEAT_max = 1000000, + .tITC_max = 1000000, + .tQHS_max = 1500, + .tRHW_min = 100000, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tWHR_min = 80000, + .tWRCK_min = 20000, + .tWW_min = 100000, + }, + }, + /* Mode 4 */ + { + .type = NAND_NVDDR_IFACE, + .timings.mode = 4, + .timings.nvddr = { + .tCCS_min = 500000, + .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tAC_min = 3000, + .tAC_max = 25000, + .tADL_min = 400000, + .tCAD_min = 45000, + .tCAH_min = 2500, + .tCALH_min = 2500, + .tCALS_min = 2500, + .tCAS_min = 2500, + .tCEH_min = 20000, + .tCH_min = 2500, + .tCK_min = 12000, + .tCS_min = 15000, + .tDH_min = 1100, + .tDQSCK_min = 3000, + .tDQSCK_max = 25000, + .tDQSD_min = 0, + .tDQSD_max = 18000, + .tDQSHZ_max = 20000, + .tDQSQ_max = 1000, + .tDS_min = 1100, + .tDSC_min = 12000, + .tFEAT_max = 1000000, + .tITC_max = 1000000, + .tQHS_max = 1200, + .tRHW_min = 100000, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tWHR_min = 80000, + .tWRCK_min = 20000, + .tWW_min = 100000, + }, + }, + /* Mode 5 */ + { + .type = NAND_NVDDR_IFACE, + .timings.mode = 5, + .timings.nvddr = { + .tCCS_min = 500000, + .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tAC_min = 3000, + .tAC_max = 25000, + .tADL_min = 400000, + .tCAD_min = 45000, + .tCAH_min = 2000, + .tCALH_min = 2000, + .tCALS_min = 2000, + .tCAS_min = 2000, + .tCEH_min = 20000, + .tCH_min = 2000, + .tCK_min = 10000, + .tCS_min = 15000, + .tDH_min = 900, + .tDQSCK_min = 3000, + .tDQSCK_max = 25000, + .tDQSD_min = 0, + .tDQSD_max = 18000, + .tDQSHZ_max = 20000, + .tDQSQ_max = 850, + .tDS_min = 900, + .tDSC_min = 10000, + .tFEAT_max = 1000000, + .tITC_max = 1000000, + .tQHS_max = 1000, + .tRHW_min = 100000, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tWHR_min = 80000, + .tWRCK_min = 20000, + .tWW_min = 100000, + }, + }, +}; + /* All NAND chips share the same reset data interface: SDR mode 0 */ const struct nand_interface_config *nand_get_reset_interface_config(void) { diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c index 13df4bdf792a..8f89e2d3d817 100644 --- a/drivers/mtd/nand/raw/sh_flctl.c +++ b/drivers/mtd/nand/raw/sh_flctl.c @@ -384,7 +384,8 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf, dma_addr_t dma_addr; dma_cookie_t cookie; uint32_t reg; - int ret; + int ret = 0; + unsigned long time_left; if (dir == DMA_FROM_DEVICE) { chan = flctl->chan_fifo0_rx; @@ -425,13 +426,14 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf, goto out; } - ret = + time_left = wait_for_completion_timeout(&flctl->dma_complete, msecs_to_jiffies(3000)); - if (ret <= 0) { + if (time_left == 0) { dmaengine_terminate_all(chan); dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n"); + ret = -ETIMEDOUT; } out: @@ -441,7 +443,7 @@ out: dma_unmap_single(chan->device->dev, dma_addr, len, dir); - /* ret > 0 is success */ + /* ret == 0 is success */ return ret; } @@ -465,7 +467,7 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset) /* initiate DMA transfer */ if (flctl->chan_fifo0_rx && rlen >= 32 && - flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE) > 0) + !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE)) goto convert; /* DMA success */ /* do polling transfer */ @@ -524,7 +526,7 @@ static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, /* initiate DMA transfer */ if (flctl->chan_fifo0_tx && rlen >= 32 && - flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE) > 0) + !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE)) return; /* DMA success */ /* do polling transfer */ diff --git a/drivers/mtd/parsers/bcm47xxpart.c b/drivers/mtd/parsers/bcm47xxpart.c index 6012a10f10c8..13daf9bffd08 100644 --- a/drivers/mtd/parsers/bcm47xxpart.c +++ b/drivers/mtd/parsers/bcm47xxpart.c @@ -233,11 +233,11 @@ static int bcm47xxpart_parse(struct mtd_info *master, } /* Read middle of the block */ - err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read, + err = mtd_read(master, offset + (blocksize / 2), 0x4, &bytes_read, (uint8_t *)buf); if (err && !mtd_is_bitflip(err)) { pr_err("mtd_read error while parsing (offset: 0x%X): %d\n", - offset, err); + offset + (blocksize / 2), err); continue; } diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c index 3ccd6363ee8c..4f3bcc59a638 100644 --- a/drivers/mtd/parsers/redboot.c +++ b/drivers/mtd/parsers/redboot.c @@ -58,6 +58,7 @@ static void parse_redboot_of(struct mtd_info *master) return; ret = of_property_read_u32(npart, "fis-index-block", &dirblock); + of_node_put(npart); if (ret) return; diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index b9f272408c4d..2fedae67c07c 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c @@ -1098,9 +1098,9 @@ static void sm_release(struct mtd_blktrans_dev *dev) { struct sm_ftl *ftl = dev->priv; - mutex_lock(&ftl->mutex); del_timer_sync(&ftl->timer); cancel_work_sync(&ftl->flush_work); + mutex_lock(&ftl->mutex); sm_cache_flush(ftl); mutex_unlock(&ftl->mutex); } diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c index 555fe55d14ae..8a3c1f3c2d2e 100644 --- a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c +++ b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c @@ -16,12 +16,30 @@ #define BCR 0xdc #define BCR_WPD BIT(0) +static bool intel_spi_pci_set_writeable(void __iomem *base, void *data) +{ + struct pci_dev *pdev = data; + u32 bcr; + + /* Try to make the chip read/write */ + pci_read_config_dword(pdev, BCR, &bcr); + if (!(bcr & BCR_WPD)) { + bcr |= BCR_WPD; + pci_write_config_dword(pdev, BCR, bcr); + pci_read_config_dword(pdev, BCR, &bcr); + } + + return bcr & BCR_WPD; +} + static const struct intel_spi_boardinfo bxt_info = { .type = INTEL_SPI_BXT, + .set_writeable = intel_spi_pci_set_writeable, }; static const struct intel_spi_boardinfo cnl_info = { .type = INTEL_SPI_CNL, + .set_writeable = intel_spi_pci_set_writeable, }; static int intel_spi_pci_probe(struct pci_dev *pdev, @@ -29,7 +47,6 @@ static int intel_spi_pci_probe(struct pci_dev *pdev, { struct intel_spi_boardinfo *info; struct intel_spi *ispi; - u32 bcr; int ret; ret = pcim_enable_device(pdev); @@ -41,15 +58,7 @@ static int intel_spi_pci_probe(struct pci_dev *pdev, if (!info) return -ENOMEM; - /* Try to make the chip read/write */ - pci_read_config_dword(pdev, BCR, &bcr); - if (!(bcr & BCR_WPD)) { - bcr |= BCR_WPD; - pci_write_config_dword(pdev, BCR, bcr); - pci_read_config_dword(pdev, BCR, &bcr); - } - info->writeable = !!(bcr & BCR_WPD); - + info->data = pdev; ispi = intel_spi_probe(&pdev->dev, &pdev->resource[0], info); if (IS_ERR(ispi)) return PTR_ERR(ispi); diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.c b/drivers/mtd/spi-nor/controllers/intel-spi.c index b54a56a68100..6c802db6b4af 100644 --- a/drivers/mtd/spi-nor/controllers/intel-spi.c +++ b/drivers/mtd/spi-nor/controllers/intel-spi.c @@ -53,17 +53,17 @@ #define FRACC 0x50 #define FREG(n) (0x54 + ((n) * 4)) -#define FREG_BASE_MASK 0x3fff +#define FREG_BASE_MASK GENMASK(14, 0) #define FREG_LIMIT_SHIFT 16 -#define FREG_LIMIT_MASK (0x03fff << FREG_LIMIT_SHIFT) +#define FREG_LIMIT_MASK GENMASK(30, 16) /* Offset is from @ispi->pregs */ #define PR(n) ((n) * 4) #define PR_WPE BIT(31) #define PR_LIMIT_SHIFT 16 -#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT) +#define PR_LIMIT_MASK GENMASK(30, 16) #define PR_RPE BIT(15) -#define PR_BASE_MASK 0x3fff +#define PR_BASE_MASK GENMASK(14, 0) /* Offsets are from @ispi->sregs */ #define SSFSTS_CTL 0x00 @@ -117,7 +117,7 @@ #define ERASE_OPCODE_SHIFT 8 #define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) #define ERASE_64K_OPCODE_SHIFT 16 -#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) +#define ERASE_64K_OPCODE_MASK (0xff << ERASE_64K_OPCODE_SHIFT) #define INTEL_SPI_TIMEOUT 5000 /* ms */ #define INTEL_SPI_FIFO_SZ 64 @@ -132,7 +132,6 @@ * @sregs: Start of software sequencer registers * @nregions: Maximum number of regions * @pr_num: Maximum number of protected range registers - * @writeable: Is the chip writeable * @locked: Is SPI setting locked * @swseq_reg: Use SW sequencer in register reads/writes * @swseq_erase: Use SW sequencer in erase operation @@ -150,7 +149,6 @@ struct intel_spi { void __iomem *sregs; size_t nregions; size_t pr_num; - bool writeable; bool locked; bool swseq_reg; bool swseq_erase; @@ -305,6 +303,14 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi) INTEL_SPI_TIMEOUT * 1000); } +static bool intel_spi_set_writeable(struct intel_spi *ispi) +{ + if (!ispi->info->set_writeable) + return false; + + return ispi->info->set_writeable(ispi->base, ispi->info->data); +} + static int intel_spi_init(struct intel_spi *ispi) { u32 opmenu0, opmenu1, lvscc, uvscc, val; @@ -317,19 +323,6 @@ static int intel_spi_init(struct intel_spi *ispi) ispi->nregions = BYT_FREG_NUM; ispi->pr_num = BYT_PR_NUM; ispi->swseq_reg = true; - - if (writeable) { - /* Disable write protection */ - val = readl(ispi->base + BYT_BCR); - if (!(val & BYT_BCR_WPD)) { - val |= BYT_BCR_WPD; - writel(val, ispi->base + BYT_BCR); - val = readl(ispi->base + BYT_BCR); - } - - ispi->writeable = !!(val & BYT_BCR_WPD); - } - break; case INTEL_SPI_LPT: @@ -359,6 +352,12 @@ static int intel_spi_init(struct intel_spi *ispi) return -EINVAL; } + /* Try to disable write protection if user asked to do so */ + if (writeable && !intel_spi_set_writeable(ispi)) { + dev_warn(ispi->dev, "can't disable chip write protection\n"); + writeable = false; + } + /* Disable #SMI generation from HW sequencer */ val = readl(ispi->base + HSFSTS_CTL); val &= ~HSFSTS_CTL_FSMIE; @@ -885,9 +884,12 @@ static void intel_spi_fill_partition(struct intel_spi *ispi, /* * If any of the regions have protection bits set, make the * whole partition read-only to be on the safe side. + * + * Also if the user did not ask the chip to be writeable + * mask the bit too. */ - if (intel_spi_is_protected(ispi, base, limit)) - ispi->writeable = false; + if (!writeable || intel_spi_is_protected(ispi, base, limit)) + part->mask_flags |= MTD_WRITEABLE; end = (limit << 12) + 4096; if (end > part->size) @@ -928,7 +930,6 @@ struct intel_spi *intel_spi_probe(struct device *dev, ispi->dev = dev; ispi->info = info; - ispi->writeable = info->writeable; ret = intel_spi_init(ispi); if (ret) @@ -946,10 +947,6 @@ struct intel_spi *intel_spi_probe(struct device *dev, intel_spi_fill_partition(ispi, &part); - /* Prevent writes if not explicitly enabled */ - if (!ispi->writeable || !writeable) - ispi->nor.mtd.flags &= ~MTD_WRITEABLE; - ret = mtd_device_register(&ispi->nor.mtd, &part, 1); if (ret) return ERR_PTR(ret); diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index fdec7c815937..5c29cbf84916 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -882,6 +882,15 @@ static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1) if (ret) return ret; + ret = spi_nor_read_sr(nor, sr_cr); + if (ret) + return ret; + + if (sr1 != sr_cr[0]) { + dev_dbg(nor->dev, "SR: Read back test failed\n"); + return -EIO; + } + if (nor->flags & SNOR_F_NO_READ_CR) return 0; diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index 28f55f9cf715..053ab52668e8 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -97,6 +97,33 @@ out: return e; } +/* + * has_enough_free_count - whether ubi has enough free pebs to fill fm pools + * @ubi: UBI device description object + * @is_wl_pool: whether UBI is filling wear leveling pool + * + * This helper function checks whether there are enough free pebs (deducted + * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after + * there is at least one of free pebs is filled into fm_wl_pool. + * For wear leveling pool, UBI should also reserve free pebs for bad pebs + * handling, because there maybe no enough free pebs for user volumes after + * producing new bad pebs. + */ +static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool) +{ + int fm_used = 0; // fastmap non anchor pebs. + int beb_rsvd_pebs; + + if (!ubi->free.rb_node) + return false; + + beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0; + if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled)) + fm_used = ubi->fm_size / ubi->leb_size - 1; + + return ubi->free_count - beb_rsvd_pebs > fm_used; +} + /** * ubi_refill_pools - refills all fastmap PEB pools. * @ubi: UBI device description object @@ -120,21 +147,17 @@ void ubi_refill_pools(struct ubi_device *ubi) wl_tree_add(ubi->fm_anchor, &ubi->free); ubi->free_count++; } - if (ubi->fm_next_anchor) { - wl_tree_add(ubi->fm_next_anchor, &ubi->free); - ubi->free_count++; - } - /* All available PEBs are in ubi->free, now is the time to get + /* + * All available PEBs are in ubi->free, now is the time to get * the best anchor PEBs. */ ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1); - ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1); for (;;) { enough = 0; if (pool->size < pool->max_size) { - if (!ubi->free.rb_node) + if (!has_enough_free_count(ubi, false)) break; e = wl_get_wle(ubi); @@ -147,8 +170,7 @@ void ubi_refill_pools(struct ubi_device *ubi) enough++; if (wl_pool->size < wl_pool->max_size) { - if (!ubi->free.rb_node || - (ubi->free_count - ubi->beb_rsvd_pebs < 5)) + if (!has_enough_free_count(ubi, true)) break; e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); @@ -286,20 +308,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) int ubi_ensure_anchor_pebs(struct ubi_device *ubi) { struct ubi_work *wrk; + struct ubi_wl_entry *anchor; spin_lock(&ubi->wl_lock); - /* Do we have a next anchor? */ - if (!ubi->fm_next_anchor) { - ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1); - if (!ubi->fm_next_anchor) - /* Tell wear leveling to produce a new anchor PEB */ - ubi->fm_do_produce_anchor = 1; + /* Do we already have an anchor? */ + if (ubi->fm_anchor) { + spin_unlock(&ubi->wl_lock); + return 0; } - /* Do wear leveling to get a new anchor PEB or check the - * existing next anchor candidate. - */ + /* See if we can find an anchor PEB on the list of free PEBs */ + anchor = ubi_wl_get_fm_peb(ubi, 1); + if (anchor) { + ubi->fm_anchor = anchor; + spin_unlock(&ubi->wl_lock); + return 0; + } + + ubi->fm_do_produce_anchor = 1; + /* No luck, trigger wear leveling to produce a new anchor PEB. */ if (ubi->wl_scheduled) { spin_unlock(&ubi->wl_lock); return 0; @@ -381,11 +409,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi) ubi->fm_anchor = NULL; } - if (ubi->fm_next_anchor) { - return_unused_peb(ubi, ubi->fm_next_anchor); - ubi->fm_next_anchor = NULL; - } - if (ubi->fm) { for (i = 0; i < ubi->fm->used_blocks; i++) kfree(ubi->fm->e[i]); diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 6b5f1ffd961b..6e95c4b1473e 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -1230,17 +1230,6 @@ static int ubi_write_fastmap(struct ubi_device *ubi, fm_pos += sizeof(*fec); ubi_assert(fm_pos <= ubi->fm_size); } - if (ubi->fm_next_anchor) { - fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); - - fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum); - set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs); - fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec); - - free_peb_count++; - fm_pos += sizeof(*fec); - ubi_assert(fm_pos <= ubi->fm_size); - } fmh->free_peb_count = cpu_to_be32(free_peb_count); ubi_for_each_used_peb(ubi, wl_e, tmp_rb) { diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index c2da77163f94..da0bee13fe7f 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -491,8 +491,7 @@ struct ubi_debug_info { * @fm_work: fastmap work queue * @fm_work_scheduled: non-zero if fastmap work was scheduled * @fast_attach: non-zero if UBI was attached by fastmap - * @fm_anchor: The new anchor PEB used during fastmap update - * @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated + * @fm_anchor: The next anchor PEB to use for fastmap * @fm_do_produce_anchor: If true produce an anchor PEB in wl * * @used: RB-tree of used physical eraseblocks @@ -603,7 +602,6 @@ struct ubi_device { int fm_work_scheduled; int fast_attach; struct ubi_wl_entry *fm_anchor; - struct ubi_wl_entry *fm_next_anchor; int fm_do_produce_anchor; /* Wear-leveling sub-system's stuff */ diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 1bc7b3a05604..6ea95ade4ca6 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -309,7 +309,6 @@ out_mapping: ubi->volumes[vol_id] = NULL; ubi->vol_count -= 1; spin_unlock(&ubi->volumes_lock); - ubi_eba_destroy_table(eba_tbl); out_acc: spin_lock(&ubi->volumes_lock); ubi->rsvd_pebs -= vol->reserved_pebs; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 7847de75a74c..820b5c1c8e8e 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -688,16 +688,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, #ifdef CONFIG_MTD_UBI_FASTMAP e1 = find_anchor_wl_entry(&ubi->used); - if (e1 && ubi->fm_next_anchor && - (ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) { + if (e1 && ubi->fm_anchor && + (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) { ubi->fm_do_produce_anchor = 1; - /* fm_next_anchor is no longer considered a good anchor - * candidate. + /* + * fm_anchor is no longer considered a good anchor. * NULL assignment also prevents multiple wear level checks * of this PEB. */ - wl_tree_add(ubi->fm_next_anchor, &ubi->free); - ubi->fm_next_anchor = NULL; + wl_tree_add(ubi->fm_anchor, &ubi->free); + ubi->fm_anchor = NULL; ubi->free_count++; } @@ -1086,12 +1086,13 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) if (!err) { spin_lock(&ubi->wl_lock); - if (!ubi->fm_disabled && !ubi->fm_next_anchor && + if (!ubi->fm_disabled && !ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) { - /* Abort anchor production, if needed it will be + /* + * Abort anchor production, if needed it will be * enabled again in the wear leveling started below. */ - ubi->fm_next_anchor = e; + ubi->fm_anchor = e; ubi->fm_do_produce_anchor = 0; } else { wl_tree_add(e, &ubi->free); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 5be850effd29..72d85214e1b5 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -80,7 +80,6 @@ config WIREGUARD select CRYPTO select CRYPTO_LIB_CURVE25519 select CRYPTO_LIB_CHACHA20POLY1305 - select CRYPTO_LIB_BLAKE2S select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT select CRYPTO_POLY1305_X86_64 if X86 && 64BIT select CRYPTO_BLAKE2S_X86 if X86 && 64BIT diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c index 98df38fe553c..12d085405bd0 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c @@ -332,7 +332,7 @@ static int __init arc_rimi_init(void) dev->irq = 9; if (arcrimi_probe(dev)) { - free_netdev(dev); + free_arcdev(dev); return -EIO; } @@ -349,7 +349,7 @@ static void __exit arc_rimi_exit(void) iounmap(lp->mem_start); release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); free_irq(dev->irq, dev); - free_netdev(dev); + free_arcdev(dev); } #ifndef MODULE diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h index 22a49c6d7ae6..5d4a4c7efbbf 100644 --- a/drivers/net/arcnet/arcdevice.h +++ b/drivers/net/arcnet/arcdevice.h @@ -298,6 +298,10 @@ struct arcnet_local { int excnak_pending; /* We just got an excesive nak interrupt */ + /* RESET flag handling */ + int reset_in_progress; + struct work_struct reset_work; + struct { uint16_t sequence; /* sequence number (incs with each packet) */ __be16 aborted_seq; @@ -350,7 +354,9 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc) void arcnet_unregister_proto(struct ArcProto *proto); irqreturn_t arcnet_interrupt(int irq, void *dev_id); + struct net_device *alloc_arcdev(const char *name); +void free_arcdev(struct net_device *dev); int arcnet_open(struct net_device *dev); int arcnet_close(struct net_device *dev); diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index e04efc0a5c97..d76dd7d14299 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -387,10 +387,44 @@ static void arcnet_timer(struct timer_list *t) struct arcnet_local *lp = from_timer(lp, t, timer); struct net_device *dev = lp->dev; - if (!netif_carrier_ok(dev)) { + spin_lock_irq(&lp->lock); + + if (!lp->reset_in_progress && !netif_carrier_ok(dev)) { netif_carrier_on(dev); netdev_info(dev, "link up\n"); } + + spin_unlock_irq(&lp->lock); +} + +static void reset_device_work(struct work_struct *work) +{ + struct arcnet_local *lp; + struct net_device *dev; + + lp = container_of(work, struct arcnet_local, reset_work); + dev = lp->dev; + + /* Do not bring the network interface back up if an ifdown + * was already done. + */ + if (!netif_running(dev) || !lp->reset_in_progress) + return; + + rtnl_lock(); + + /* Do another check, in case of an ifdown that was triggered in + * the small race window between the exit condition above and + * acquiring RTNL. + */ + if (!netif_running(dev) || !lp->reset_in_progress) + goto out; + + dev_close(dev); + dev_open(dev, NULL); + +out: + rtnl_unlock(); } static void arcnet_reply_tasklet(unsigned long data) @@ -452,12 +486,25 @@ struct net_device *alloc_arcdev(const char *name) lp->dev = dev; spin_lock_init(&lp->lock); timer_setup(&lp->timer, arcnet_timer, 0); + INIT_WORK(&lp->reset_work, reset_device_work); } return dev; } EXPORT_SYMBOL(alloc_arcdev); +void free_arcdev(struct net_device *dev) +{ + struct arcnet_local *lp = netdev_priv(dev); + + /* Do not cancel this at ->ndo_close(), as the workqueue itself + * indirectly calls the ifdown path through dev_close(). + */ + cancel_work_sync(&lp->reset_work); + free_netdev(dev); +} +EXPORT_SYMBOL(free_arcdev); + /* Open/initialize the board. This is called sometime after booting when * the 'ifconfig' program is run. * @@ -587,6 +634,10 @@ int arcnet_close(struct net_device *dev) /* shut down the card */ lp->hw.close(dev); + + /* reset counters */ + lp->reset_in_progress = 0; + module_put(lp->hw.owner); return 0; } @@ -820,6 +871,9 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) spin_lock_irqsave(&lp->lock, flags); + if (lp->reset_in_progress) + goto out; + /* RESET flag was enabled - if device is not running, we must * clear it right away (but nothing else). */ @@ -852,11 +906,14 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) if (status & RESETflag) { arc_printk(D_NORMAL, dev, "spurious reset (status=%Xh)\n", status); - arcnet_close(dev); - arcnet_open(dev); + + lp->reset_in_progress = 1; + netif_stop_queue(dev); + netif_carrier_off(dev); + schedule_work(&lp->reset_work); /* get out of the interrupt handler! */ - break; + goto out; } /* RX is inhibited - we must have received something. * Prepare to receive into the next buffer. @@ -1052,6 +1109,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) udelay(1); lp->hw.intmask(dev, lp->intmask); +out: spin_unlock_irqrestore(&lp->lock, flags); return retval; } diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c index f983c4ce6b07..be618e4b9ed5 100644 --- a/drivers/net/arcnet/com20020-isa.c +++ b/drivers/net/arcnet/com20020-isa.c @@ -169,7 +169,7 @@ static int __init com20020_init(void) dev->irq = 9; if (com20020isa_probe(dev)) { - free_netdev(dev); + free_arcdev(dev); return -EIO; } @@ -182,7 +182,7 @@ static void __exit com20020_exit(void) unregister_netdev(my_dev); free_irq(my_dev->irq, my_dev); release_region(my_dev->base_addr, ARCNET_TOTAL_SIZE); - free_netdev(my_dev); + free_arcdev(my_dev); } #ifndef MODULE diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 9f44e2e458df..b4f8798d8c50 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -294,7 +294,7 @@ static void com20020pci_remove(struct pci_dev *pdev) unregister_netdev(dev); free_irq(dev->irq, dev); - free_netdev(dev); + free_arcdev(dev); } } diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c index cf607ffcf358..e0c7720bd5da 100644 --- a/drivers/net/arcnet/com20020_cs.c +++ b/drivers/net/arcnet/com20020_cs.c @@ -113,6 +113,7 @@ static int com20020_probe(struct pcmcia_device *p_dev) struct com20020_dev *info; struct net_device *dev; struct arcnet_local *lp; + int ret = -ENOMEM; dev_dbg(&p_dev->dev, "com20020_attach()\n"); @@ -142,12 +143,18 @@ static int com20020_probe(struct pcmcia_device *p_dev) info->dev = dev; p_dev->priv = info; - return com20020_config(p_dev); + ret = com20020_config(p_dev); + if (ret) + goto fail_config; + return 0; + +fail_config: + free_arcdev(dev); fail_alloc_dev: kfree(info); fail_alloc_info: - return -ENOMEM; + return ret; } /* com20020_attach */ static void com20020_detach(struct pcmcia_device *link) @@ -177,7 +184,7 @@ static void com20020_detach(struct pcmcia_device *link) dev = info->dev; if (dev) { dev_dbg(&link->dev, "kfree...\n"); - free_netdev(dev); + free_arcdev(dev); } dev_dbg(&link->dev, "kfree2...\n"); kfree(info); diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c index cf214b730671..3856b447d38e 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c @@ -396,7 +396,7 @@ static int __init com90io_init(void) err = com90io_probe(dev); if (err) { - free_netdev(dev); + free_arcdev(dev); return err; } @@ -419,7 +419,7 @@ static void __exit com90io_exit(void) free_irq(dev->irq, dev); release_region(dev->base_addr, ARCNET_TOTAL_SIZE); - free_netdev(dev); + free_arcdev(dev); } module_init(com90io_init) diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c index 3dc3d533cb19..d8dfb9ea0de8 100644 --- a/drivers/net/arcnet/com90xx.c +++ b/drivers/net/arcnet/com90xx.c @@ -554,7 +554,7 @@ err_free_irq: err_release_mem: release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); err_free_dev: - free_netdev(dev); + free_arcdev(dev); return -EIO; } @@ -672,7 +672,7 @@ static void __exit com90xx_exit(void) release_region(dev->base_addr, ARCNET_TOTAL_SIZE); release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); - free_netdev(dev); + free_arcdev(dev); } } diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index c2cef7ba2671..acb6ff0be5ff 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -85,8 +85,9 @@ static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = { static u16 ad_ticks_per_sec; static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000; -static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = - MULTICAST_LACPDU_ADDR; +const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = { + 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 +}; /* ================= main 802.3ad protocol functions ================== */ static int ad_lacpdu_send(struct port *port); @@ -1988,30 +1989,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout) */ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution) { - /* check that the bond is not initialized yet */ - if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr), - bond->dev->dev_addr)) { + BOND_AD_INFO(bond).aggregator_identifier = 0; + BOND_AD_INFO(bond).system.sys_priority = + bond->params.ad_actor_sys_prio; + if (is_zero_ether_addr(bond->params.ad_actor_system)) + BOND_AD_INFO(bond).system.sys_mac_addr = + *((struct mac_addr *)bond->dev->dev_addr); + else + BOND_AD_INFO(bond).system.sys_mac_addr = + *((struct mac_addr *)bond->params.ad_actor_system); - BOND_AD_INFO(bond).aggregator_identifier = 0; + /* initialize how many times this module is called in one + * second (should be about every 100ms) + */ + ad_ticks_per_sec = tick_resolution; - BOND_AD_INFO(bond).system.sys_priority = - bond->params.ad_actor_sys_prio; - if (is_zero_ether_addr(bond->params.ad_actor_system)) - BOND_AD_INFO(bond).system.sys_mac_addr = - *((struct mac_addr *)bond->dev->dev_addr); - else - BOND_AD_INFO(bond).system.sys_mac_addr = - *((struct mac_addr *)bond->params.ad_actor_system); - - /* initialize how many times this module is called in one - * second (should be about every 100ms) - */ - ad_ticks_per_sec = tick_resolution; - - bond_3ad_initiate_agg_selection(bond, - AD_AGGREGATOR_SELECTION_TIMER * - ad_ticks_per_sec); - } + bond_3ad_initiate_agg_selection(bond, + AD_AGGREGATOR_SELECTION_TIMER * + ad_ticks_per_sec); } /** @@ -2209,7 +2204,8 @@ void bond_3ad_unbind_slave(struct slave *slave) temp_aggregator->num_of_ports--; if (__agg_active_ports(temp_aggregator) == 0) { select_new_active_agg = temp_aggregator->is_active; - ad_clear_agg(temp_aggregator); + if (temp_aggregator->num_of_ports == 0) + ad_clear_agg(temp_aggregator); if (select_new_active_agg) { slave_info(bond->dev, slave->dev, "Removing an active aggregator\n"); /* select new active aggregator */ diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 0436aef9c9ef..152f76f86927 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1279,12 +1279,12 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled) return res; if (rlb_enabled) { - bond->alb_info.rlb_enabled = 1; res = rlb_initialize(bond); if (res) { tlb_deinitialize(bond); return res; } + bond->alb_info.rlb_enabled = 1; } else { bond->alb_info.rlb_enabled = 0; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index cbeb69bca0bb..f38a6ce5749b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -827,12 +827,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev, dev_uc_unsync(slave_dev, bond_dev); dev_mc_unsync(slave_dev, bond_dev); - if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* del lacpdu mc addr from mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - - dev_mc_del(slave_dev, lacpdu_multicast); - } + if (BOND_MODE(bond) == BOND_MODE_8023AD) + dev_mc_del(slave_dev, lacpdu_mcast_addr); } /*--------------------------- Active slave change ---------------------------*/ @@ -852,7 +848,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(old_active->dev, -1); - bond_hw_addr_flush(bond->dev, old_active->dev); + if (bond->dev->flags & IFF_UP) + bond_hw_addr_flush(bond->dev, old_active->dev); } if (new_active) { @@ -863,10 +860,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(new_active->dev, 1); - netif_addr_lock_bh(bond->dev); - dev_uc_sync(new_active->dev, bond->dev); - dev_mc_sync(new_active->dev, bond->dev); - netif_addr_unlock_bh(bond->dev); + if (bond->dev->flags & IFF_UP) { + netif_addr_lock_bh(bond->dev); + dev_uc_sync(new_active->dev, bond->dev); + dev_mc_sync(new_active->dev, bond->dev); + netif_addr_unlock_bh(bond->dev); + } } } @@ -2073,16 +2072,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, } } - netif_addr_lock_bh(bond_dev); - dev_mc_sync_multiple(slave_dev, bond_dev); - dev_uc_sync_multiple(slave_dev, bond_dev); - netif_addr_unlock_bh(bond_dev); + if (bond_dev->flags & IFF_UP) { + netif_addr_lock_bh(bond_dev); + dev_mc_sync_multiple(slave_dev, bond_dev); + dev_uc_sync_multiple(slave_dev, bond_dev); + netif_addr_unlock_bh(bond_dev); - if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* add lacpdu mc addr to mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - - dev_mc_add(slave_dev, lacpdu_multicast); + if (BOND_MODE(bond) == BOND_MODE_8023AD) + dev_mc_add(slave_dev, lacpdu_mcast_addr); } } @@ -2310,7 +2307,8 @@ static int __bond_release_one(struct net_device *bond_dev, if (old_flags & IFF_ALLMULTI) dev_set_allmulti(slave_dev, -1); - bond_hw_addr_flush(bond_dev, slave_dev); + if (old_flags & IFF_UP) + bond_hw_addr_flush(bond_dev, slave_dev); } slave_disable_netpoll(slave); @@ -3368,9 +3366,11 @@ re_arm: if (!rtnl_trylock()) return; - if (should_notify_peers) + if (should_notify_peers) { + bond->send_peer_notif--; call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); + } if (should_notify_rtnl) { bond_slave_state_notify(bond); bond_slave_link_notify(bond); @@ -3770,6 +3770,9 @@ static int bond_open(struct net_device *bond_dev) /* register to receive LACPDUs */ bond->recv_probe = bond_3ad_lacpdu_recv; bond_3ad_initiate_agg_selection(bond, 1); + + bond_for_each_slave(bond, slave, iter) + dev_mc_add(slave->dev, lacpdu_mcast_addr); } if (bond_mode_can_use_xmit_hash(bond)) @@ -3781,6 +3784,7 @@ static int bond_open(struct net_device *bond_dev) static int bond_close(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave; bond_work_cancel_all(bond); bond->send_peer_notif = 0; @@ -3788,6 +3792,19 @@ static int bond_close(struct net_device *bond_dev) bond_alb_deinitialize(bond); bond->recv_probe = NULL; + if (bond_uses_primary(bond)) { + rcu_read_lock(); + slave = rcu_dereference(bond->curr_active_slave); + if (slave) + bond_hw_addr_flush(bond_dev, slave->dev); + rcu_read_unlock(); + } else { + struct list_head *iter; + + bond_for_each_slave(bond, slave, iter) + bond_hw_addr_flush(bond_dev, slave->dev); + } + return 0; } diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index 47a6d62b7511..a701932f5cc2 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -723,13 +723,21 @@ static int cfv_probe(struct virtio_device *vdev) /* Carrier is off until netdevice is opened */ netif_carrier_off(netdev); + /* serialize netdev register + virtio_device_ready() with ndo_open() */ + rtnl_lock(); + /* register Netdev */ - err = register_netdev(netdev); + err = register_netdevice(netdev); if (err) { + rtnl_unlock(); dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err); goto err; } + virtio_device_ready(vdev); + + rtnl_unlock(); + debugfs_init(cfv); return 0; diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c index 194c86e0f340..8f6dccd5a587 100644 --- a/drivers/net/can/cc770/cc770_isa.c +++ b/drivers/net/can/cc770/cc770_isa.c @@ -264,22 +264,24 @@ static int cc770_isa_probe(struct platform_device *pdev) if (err) { dev_err(&pdev->dev, "couldn't register device (err=%d)\n", err); - goto exit_unmap; + goto exit_free; } dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n", priv->reg_base, dev->irq); return 0; - exit_unmap: +exit_free: + free_cc770dev(dev); +exit_unmap: if (mem[idx]) iounmap(base); - exit_release: +exit_release: if (mem[idx]) release_mem_region(mem[idx], iosize); else release_region(port[idx], iosize); - exit: +exit: return err; } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 7cbaac238ff6..429950241de3 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -954,11 +954,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, u32 reg_ctrl, reg_id, reg_iflag1; int i; - if (unlikely(drop)) { - skb = ERR_PTR(-ENOBUFS); - goto mark_as_read; - } - mb = flexcan_get_mb(priv, n); if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { @@ -987,6 +982,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, reg_ctrl = priv->read(&mb->can_ctrl); } + if (unlikely(drop)) { + skb = ERR_PTR(-ENOBUFS); + goto mark_as_read; + } + if (reg_ctrl & FLEXCAN_MB_CNT_EDL) skb = alloc_canfd_skb(offload->dev, &cfd); else diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index 39802f107eb1..3794a7b2c64e 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -241,13 +241,14 @@ struct grcan_device_config { .rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \ } -#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100 +#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 4100 #define GRLIB_VERSION_MASK 0xffff /* GRCAN private data structure */ struct grcan_priv { struct can_priv can; /* must be the first member */ struct net_device *dev; + struct device *ofdev_dev; struct napi_struct napi; struct grcan_registers __iomem *regs; /* ioremap'ed registers */ @@ -924,7 +925,7 @@ static void grcan_free_dma_buffers(struct net_device *dev) struct grcan_priv *priv = netdev_priv(dev); struct grcan_dma *dma = &priv->dma; - dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf, + dma_free_coherent(priv->ofdev_dev, dma->base_size, dma->base_buf, dma->base_handle); memset(dma, 0, sizeof(*dma)); } @@ -949,7 +950,7 @@ static int grcan_allocate_dma_buffers(struct net_device *dev, /* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */ dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT; - dma->base_buf = dma_alloc_coherent(&dev->dev, + dma->base_buf = dma_alloc_coherent(priv->ofdev_dev, dma->base_size, &dma->base_handle, GFP_KERNEL); @@ -1113,8 +1114,10 @@ static int grcan_close(struct net_device *dev) priv->closing = true; if (priv->need_txbug_workaround) { + spin_unlock_irqrestore(&priv->lock, flags); del_timer_sync(&priv->hang_timer); del_timer_sync(&priv->rr_timer); + spin_lock_irqsave(&priv->lock, flags); } netif_stop_queue(dev); grcan_stop_hardware(dev); @@ -1134,7 +1137,7 @@ static int grcan_close(struct net_device *dev) return 0; } -static int grcan_transmit_catch_up(struct net_device *dev, int budget) +static void grcan_transmit_catch_up(struct net_device *dev) { struct grcan_priv *priv = netdev_priv(dev); unsigned long flags; @@ -1142,7 +1145,7 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget) spin_lock_irqsave(&priv->lock, flags); - work_done = catch_up_echo_skb(dev, budget, true); + work_done = catch_up_echo_skb(dev, -1, true); if (work_done) { if (!priv->resetting && !priv->closing && !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) @@ -1156,8 +1159,6 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget) } spin_unlock_irqrestore(&priv->lock, flags); - - return work_done; } static int grcan_receive(struct net_device *dev, int budget) @@ -1239,19 +1240,13 @@ static int grcan_poll(struct napi_struct *napi, int budget) struct net_device *dev = priv->dev; struct grcan_registers __iomem *regs = priv->regs; unsigned long flags; - int tx_work_done, rx_work_done; - int rx_budget = budget / 2; - int tx_budget = budget - rx_budget; + int work_done; - /* Half of the budget for receiving messages */ - rx_work_done = grcan_receive(dev, rx_budget); + work_done = grcan_receive(dev, budget); - /* Half of the budget for transmitting messages as that can trigger echo - * frames being received - */ - tx_work_done = grcan_transmit_catch_up(dev, tx_budget); + grcan_transmit_catch_up(dev); - if (rx_work_done < rx_budget && tx_work_done < tx_budget) { + if (work_done < budget) { napi_complete(napi); /* Guarantee no interference with a running reset that otherwise @@ -1268,7 +1263,7 @@ static int grcan_poll(struct napi_struct *napi, int budget) spin_unlock_irqrestore(&priv->lock, flags); } - return rx_work_done + tx_work_done; + return work_done; } /* Work tx bug by waiting while for the risky situation to clear. If that fails, @@ -1600,6 +1595,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev, memcpy(&priv->config, &grcan_module_config, sizeof(struct grcan_device_config)); priv->dev = dev; + priv->ofdev_dev = &ofdev->dev; priv->regs = base; priv->can.bittiming_const = &grcan_bittiming_const; priv->can.do_set_bittiming = grcan_set_bittiming; @@ -1652,6 +1648,7 @@ exit_free_candev: static int grcan_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; + struct device_node *sysid_parent; u32 sysid, ambafreq; int irq, err; void __iomem *base; @@ -1660,10 +1657,14 @@ static int grcan_probe(struct platform_device *ofdev) /* Compare GRLIB version number with the first that does not * have the tx bug (see start_xmit) */ - err = of_property_read_u32(np, "systemid", &sysid); - if (!err && ((sysid & GRLIB_VERSION_MASK) - >= GRCAN_TXBUG_SAFE_GRLIB_VERSION)) - txbug = false; + sysid_parent = of_find_node_by_path("/ambapp0"); + if (sysid_parent) { + err = of_property_read_u32(sysid_parent, "systemid", &sysid); + if (!err && ((sysid & GRLIB_VERSION_MASK) >= + GRCAN_TXBUG_SAFE_GRLIB_VERSION)) + txbug = false; + of_node_put(sysid_parent); + } err = of_property_read_u32(np, "freq", &ambafreq); if (err) { diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index e254e04ae257..ef649764f9b4 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -325,14 +325,14 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev) &mscan_clksrc); if (!priv->can.clock.freq) { dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n"); - goto exit_free_mscan; + goto exit_put_clock; } err = register_mscandev(dev, mscan_clksrc); if (err) { dev_err(&ofdev->dev, "registering %s failed (err=%d)\n", DRV_NAME, err); - goto exit_free_mscan; + goto exit_put_clock; } dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n", @@ -340,7 +340,9 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev) return 0; -exit_free_mscan: +exit_put_clock: + if (data->put_clock) + data->put_clock(ofdev); free_candev(dev); exit_dispose_irq: irq_dispose_mapping(irq); diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index 79d9abdcc65a..1272ec793a8d 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -489,6 +489,7 @@ static void pch_can_error(struct net_device *ndev, u32 status) if (!skb) return; + errc = ioread32(&priv->regs->errc); if (status & PCH_BUS_OFF) { pch_can_set_tx_all(priv, 0); pch_can_set_rx_all(priv, 0); @@ -496,9 +497,11 @@ static void pch_can_error(struct net_device *ndev, u32 status) cf->can_id |= CAN_ERR_BUSOFF; priv->can.can_stats.bus_off++; can_bus_off(ndev); + } else { + cf->data[6] = errc & PCH_TEC; + cf->data[7] = (errc & PCH_REC) >> 8; } - errc = ioread32(&priv->regs->errc); /* Warning interrupt. */ if (status & PCH_EWARN) { state = CAN_STATE_ERROR_WARNING; @@ -556,9 +559,6 @@ static void pch_can_error(struct net_device *ndev, u32 status) break; } - cf->data[6] = errc & PCH_TEC; - cf->data[7] = (errc & PCH_REC) >> 8; - priv->can.state = state; netif_receive_skb(skb); diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 3570a4de0085..134eda66f0dc 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -235,11 +235,8 @@ static void rcar_can_error(struct net_device *ndev) if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) { txerr = readb(&priv->regs->tecr); rxerr = readb(&priv->regs->recr); - if (skb) { + if (skb) cf->can_id |= CAN_ERR_CRTL; - cf->data[6] = txerr; - cf->data[7] = rxerr; - } } if (eifr & RCAR_CAN_EIFR_BEIF) { int rx_errors = 0, tx_errors = 0; @@ -339,6 +336,9 @@ static void rcar_can_error(struct net_device *ndev) can_bus_off(ndev); if (skb) cf->can_id |= CAN_ERR_BUSOFF; + } else if (skb) { + cf->data[6] = txerr; + cf->data[7] = rxerr; } if (eifr & RCAR_CAN_EIFR_ORIF) { netdev_dbg(priv->ndev, "Receive overrun error interrupt\n"); diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 67f0f14e2bf4..c61534a2a2d3 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1075,7 +1075,7 @@ static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id) struct rcar_canfd_global *gpriv = dev_id; struct net_device *ndev; struct rcar_canfd_channel *priv; - u32 sts, gerfl; + u32 sts, cc, gerfl; u32 ch, ridx; /* Global error interrupts still indicate a condition specific @@ -1093,7 +1093,9 @@ static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id) /* Handle Rx interrupts */ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx)); - if (likely(sts & RCANFD_RFSTS_RFIF)) { + cc = rcar_canfd_read(priv->base, RCANFD_RFCC(ridx)); + if (likely(sts & RCANFD_RFSTS_RFIF && + cc & RCANFD_RFCC_RFIE)) { if (napi_schedule_prep(&priv->napi)) { /* Disable Rx FIFO interrupts */ rcar_canfd_clear_bit(priv->base, diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 25a4d7d0b349..ee34baeb2afe 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -405,9 +405,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) txerr = priv->read_reg(priv, SJA1000_TXERR); rxerr = priv->read_reg(priv, SJA1000_RXERR); - cf->data[6] = txerr; - cf->data[7] = rxerr; - if (isrc & IRQ_DOI) { /* data overrun interrupt */ netdev_dbg(dev, "data overrun interrupt\n"); @@ -429,6 +426,10 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) else state = CAN_STATE_ERROR_ACTIVE; } + if (state != CAN_STATE_BUS_OFF) { + cf->data[6] = txerr; + cf->data[7] = rxerr; + } if (isrc & IRQ_BEI) { /* bus error interrupt */ priv->can.can_stats.bus_error++; diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c index d513fac50718..db3e767d5320 100644 --- a/drivers/net/can/sja1000/sja1000_isa.c +++ b/drivers/net/can/sja1000/sja1000_isa.c @@ -202,22 +202,24 @@ static int sja1000_isa_probe(struct platform_device *pdev) if (err) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", DRV_NAME, err); - goto exit_unmap; + goto exit_free; } dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n", DRV_NAME, priv->reg_base, dev->irq); return 0; - exit_unmap: +exit_free: + free_sja1000dev(dev); +exit_unmap: if (mem[idx]) iounmap(base); - exit_release: +exit_release: if (mem[idx]) release_mem_region(mem[idx], iosize); else release_region(port[idx], iosize); - exit: +exit: return err; } diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index 7d2315c8cacb..28273e84171a 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -670,8 +670,6 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) txerr = hi3110_read(spi, HI3110_READ_TEC); rxerr = hi3110_read(spi, HI3110_READ_REC); - cf->data[6] = txerr; - cf->data[7] = rxerr; tx_state = txerr >= rxerr ? new_state : 0; rx_state = txerr <= rxerr ? new_state : 0; can_change_state(net, cf, tx_state, rx_state); @@ -684,6 +682,9 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) hi3110_hw_sleep(spi); break; } + } else { + cf->data[6] = txerr; + cf->data[7] = rxerr; } } diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 89897a2d41fa..ffcb04aac972 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -1074,9 +1074,6 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) mcp251x_read_2regs(spi, CANINTF, &intf, &eflag); - /* mask out flags we don't care about */ - intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR; - /* receive buffer 0 */ if (intf & CANINTF_RX0IF) { mcp251x_hw_rx(spi, 0); @@ -1086,6 +1083,18 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) if (mcp251x_is_2510(spi)) mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00); + + /* check if buffer 1 is already known to be full, no need to re-read */ + if (!(intf & CANINTF_RX1IF)) { + u8 intf1, eflag1; + + /* intf needs to be read again to avoid a race condition */ + mcp251x_read_2regs(spi, CANINTF, &intf1, &eflag1); + + /* combine flags from both operations for error handling */ + intf |= intf1; + eflag |= eflag1; + } } /* receive buffer 1 */ @@ -1096,6 +1105,9 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) clear_intf |= CANINTF_RX1IF; } + /* mask out flags we don't care about */ + intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR; + /* any error or tx interrupt we need to clear? */ if (intf & (CANINTF_ERR | CANINTF_TX)) clear_intf |= intf & (CANINTF_ERR | CANINTF_TX); @@ -1407,11 +1419,14 @@ static int mcp251x_can_probe(struct spi_device *spi) ret = mcp251x_gpio_setup(priv); if (ret) - goto error_probe; + goto out_unregister_candev; netdev_info(net, "MCP%x successfully initialized.\n", priv->model); return 0; +out_unregister_candev: + unregister_candev(net); + error_probe: destroy_workqueue(priv->wq); priv->wq = NULL; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index fa1246e39980..766dbd19bba6 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -426,7 +426,7 @@ struct mcp251xfd_hw_tef_obj { /* The tx_obj_raw version is used in spi async, i.e. without * regmap. We have to take care of endianness ourselves. */ -struct mcp251xfd_hw_tx_obj_raw { +struct __packed mcp251xfd_hw_tx_obj_raw { __le32 id; __le32 flags; u8 data[sizeof_field(struct canfd_frame, data)]; diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index b3f2f4fe5ee0..39ddb3d849dd 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -525,11 +525,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) rxerr = (errc >> 16) & 0xFF; txerr = errc & 0xFF; - if (skb) { - cf->data[6] = txerr; - cf->data[7] = rxerr; - } - if (isrc & SUN4I_INT_DATA_OR) { /* data overrun interrupt */ netdev_dbg(dev, "data overrun interrupt\n"); @@ -560,6 +555,10 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) else state = CAN_STATE_ERROR_ACTIVE; } + if (skb && state != CAN_STATE_BUS_OFF) { + cf->data[6] = txerr; + cf->data[7] = rxerr; + } if (isrc & SUN4I_INT_BUS_ERR) { /* bus error interrupt */ netdev_dbg(dev, "bus error interrupt\n"); diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 6458da9c13b9..ff05b5230f0b 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -194,7 +194,7 @@ struct __packed ems_cpc_msg { __le32 ts_sec; /* timestamp in seconds */ __le32 ts_nsec; /* timestamp in nano seconds */ - union { + union __packed { u8 generic[64]; struct cpc_can_msg can_msg; struct cpc_can_params can_params; diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 8847942a8d97..73c5343e609b 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -227,6 +227,10 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, u8 rxerr = msg->msg.rx.data[2]; u8 txerr = msg->msg.rx.data[3]; + netdev_dbg(priv->netdev, + "CAN_ERR_EV_EXT: dlc=%#02x state=%02x ecc=%02x rec=%02x tec=%02x\n", + msg->msg.rx.dlc, state, ecc, rxerr, txerr); + skb = alloc_can_err_skb(priv->netdev, &cf); if (skb == NULL) { stats->rx_dropped++; @@ -253,6 +257,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, break; default: priv->can.state = CAN_STATE_ERROR_ACTIVE; + txerr = 0; + rxerr = 0; break; } } else { diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index e023c401f4f7..a879200eaab0 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -184,6 +184,8 @@ struct gs_can { struct usb_anchor tx_submitted; atomic_t active_tx_urbs; + void *rxbuf[GS_MAX_RX_URBS]; + dma_addr_t rxbuf_dma[GS_MAX_RX_URBS]; }; /* usb interface struct */ @@ -592,6 +594,7 @@ static int gs_can_open(struct net_device *netdev) for (i = 0; i < GS_MAX_RX_URBS; i++) { struct urb *urb; u8 *buf; + dma_addr_t buf_dma; /* alloc rx urb */ urb = usb_alloc_urb(0, GFP_KERNEL); @@ -602,7 +605,7 @@ static int gs_can_open(struct net_device *netdev) buf = usb_alloc_coherent(dev->udev, sizeof(struct gs_host_frame), GFP_KERNEL, - &urb->transfer_dma); + &buf_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); @@ -610,6 +613,8 @@ static int gs_can_open(struct net_device *netdev) return -ENOMEM; } + urb->transfer_dma = buf_dma; + /* fill, anchor, and submit rx urb */ usb_fill_bulk_urb(urb, dev->udev, @@ -633,10 +638,17 @@ static int gs_can_open(struct net_device *netdev) rc); usb_unanchor_urb(urb); + usb_free_coherent(dev->udev, + sizeof(struct gs_host_frame), + buf, + buf_dma); usb_free_urb(urb); break; } + dev->rxbuf[i] = buf; + dev->rxbuf_dma[i] = buf_dma; + /* Drop reference, * USB core will take care of freeing it */ @@ -666,6 +678,7 @@ static int gs_can_open(struct net_device *netdev) flags |= GS_CAN_MODE_TRIPLE_SAMPLE; /* finally start device */ + dev->can.state = CAN_STATE_ERROR_ACTIVE; dm->mode = cpu_to_le32(GS_CAN_MODE_START); dm->flags = cpu_to_le32(flags); rc = usb_control_msg(interface_to_usbdev(dev->iface), @@ -682,13 +695,12 @@ static int gs_can_open(struct net_device *netdev) if (rc < 0) { netdev_err(netdev, "Couldn't start device (err=%d)\n", rc); kfree(dm); + dev->can.state = CAN_STATE_STOPPED; return rc; } kfree(dm); - dev->can.state = CAN_STATE_ERROR_ACTIVE; - parent->active_channels++; if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(netdev); @@ -701,13 +713,20 @@ static int gs_can_close(struct net_device *netdev) int rc; struct gs_can *dev = netdev_priv(netdev); struct gs_usb *parent = dev->parent; + unsigned int i; netif_stop_queue(netdev); /* Stop polling */ parent->active_channels--; - if (!parent->active_channels) + if (!parent->active_channels) { usb_kill_anchored_urbs(&parent->rx_submitted); + for (i = 0; i < GS_MAX_RX_URBS; i++) + usb_free_coherent(dev->udev, + sizeof(struct gs_host_frame), + dev->rxbuf[i], + dev->rxbuf_dma[i]); + } /* Stop sending URBs */ usb_kill_anchored_urbs(&dev->tx_submitted); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h index 390b6bde883c..62958f04a2f2 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h @@ -35,9 +35,10 @@ #define KVASER_USB_RX_BUFFER_SIZE 3072 #define KVASER_USB_MAX_NET_DEVICES 5 -/* USB devices features */ -#define KVASER_USB_HAS_SILENT_MODE BIT(0) -#define KVASER_USB_HAS_TXRX_ERRORS BIT(1) +/* Kvaser USB device quirks */ +#define KVASER_USB_QUIRK_HAS_SILENT_MODE BIT(0) +#define KVASER_USB_QUIRK_HAS_TXRX_ERRORS BIT(1) +#define KVASER_USB_QUIRK_IGNORE_CLK_FREQ BIT(2) /* Device capabilities */ #define KVASER_USB_CAP_BERR_CAP 0x01 @@ -65,12 +66,7 @@ struct kvaser_usb_dev_card_data_hydra { struct kvaser_usb_dev_card_data { u32 ctrlmode_supported; u32 capabilities; - union { - struct { - enum kvaser_usb_leaf_family family; - } leaf; - struct kvaser_usb_dev_card_data_hydra hydra; - }; + struct kvaser_usb_dev_card_data_hydra hydra; }; /* Context for an outstanding, not yet ACKed, transmission */ @@ -84,7 +80,7 @@ struct kvaser_usb { struct usb_device *udev; struct usb_interface *intf; struct kvaser_usb_net_priv *nets[KVASER_USB_MAX_NET_DEVICES]; - const struct kvaser_usb_dev_ops *ops; + const struct kvaser_usb_driver_info *driver_info; const struct kvaser_usb_dev_cfg *cfg; struct usb_endpoint_descriptor *bulk_in, *bulk_out; @@ -166,6 +162,12 @@ struct kvaser_usb_dev_ops { int *cmd_len, u16 transid); }; +struct kvaser_usb_driver_info { + u32 quirks; + enum kvaser_usb_leaf_family family; + const struct kvaser_usb_dev_ops *ops; +}; + struct kvaser_usb_dev_cfg { const struct can_clock clock; const unsigned int timestamp_freq; @@ -176,6 +178,8 @@ struct kvaser_usb_dev_cfg { extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops; extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops; +void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv); + int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len, int *actual_len); @@ -185,4 +189,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd, int len); int kvaser_usb_can_rx_over_error(struct net_device *netdev); + +extern const struct can_bittiming_const kvaser_usb_flexc_bittiming_const; + #endif /* KVASER_USB_H */ diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index 0f1d3e807d63..7491f85e85b3 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -79,104 +79,134 @@ #define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 269 #define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 270 -static inline bool kvaser_is_leaf(const struct usb_device_id *id) -{ - return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID && - id->idProduct <= USB_CAN_R_PRODUCT_ID) || - (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID && - id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID); -} +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = { + .quirks = 0, + .ops = &kvaser_usb_hydra_dev_ops, +}; -static inline bool kvaser_is_usbcan(const struct usb_device_id *id) -{ - return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID && - id->idProduct <= USB_MEMORATOR_PRODUCT_ID; -} +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_usbcan = { + .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS | + KVASER_USB_QUIRK_HAS_SILENT_MODE, + .family = KVASER_USBCAN, + .ops = &kvaser_usb_leaf_dev_ops, +}; -static inline bool kvaser_is_hydra(const struct usb_device_id *id) -{ - return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID && - id->idProduct <= USB_HYBRID_PRO_CANLIN_PRODUCT_ID; -} +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf = { + .quirks = KVASER_USB_QUIRK_IGNORE_CLK_FREQ, + .family = KVASER_LEAF, + .ops = &kvaser_usb_leaf_dev_ops, +}; + +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err = { + .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS | + KVASER_USB_QUIRK_IGNORE_CLK_FREQ, + .family = KVASER_LEAF, + .ops = &kvaser_usb_leaf_dev_ops, +}; + +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_listen = { + .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS | + KVASER_USB_QUIRK_HAS_SILENT_MODE | + KVASER_USB_QUIRK_IGNORE_CLK_FREQ, + .family = KVASER_LEAF, + .ops = &kvaser_usb_leaf_dev_ops, +}; + +static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = { + .quirks = 0, + .ops = &kvaser_usb_leaf_dev_ops, +}; static const struct usb_device_id kvaser_usb_table[] = { - /* Leaf USB product IDs */ - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) }, + /* Leaf M32C USB product IDs */ + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS | - KVASER_USB_HAS_SILENT_MODE }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, + + /* Leaf i.MX28 USB product IDs */ + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, /* USBCANII USB product IDs */ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan }, { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan }, { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID), - .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan }, /* Minihydra USB product IDs */ - { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { } }; MODULE_DEVICE_TABLE(usb, kvaser_usb_table); @@ -267,6 +297,7 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev) static void kvaser_usb_read_bulk_callback(struct urb *urb) { struct kvaser_usb *dev = urb->context; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int err; unsigned int i; @@ -283,8 +314,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) goto resubmit_urb; } - dev->ops->dev_read_bulk_callback(dev, urb->transfer_buffer, - urb->actual_length); + ops->dev_read_bulk_callback(dev, urb->transfer_buffer, + urb->actual_length); resubmit_urb: usb_fill_bulk_urb(urb, dev->udev, @@ -378,6 +409,7 @@ static int kvaser_usb_open(struct net_device *netdev) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int err; err = open_candev(netdev); @@ -388,11 +420,11 @@ static int kvaser_usb_open(struct net_device *netdev) if (err) goto error; - err = dev->ops->dev_set_opt_mode(priv); + err = ops->dev_set_opt_mode(priv); if (err) goto error; - err = dev->ops->dev_start_chip(priv); + err = ops->dev_start_chip(priv); if (err) { netdev_warn(netdev, "Cannot start device, error %d\n", err); goto error; @@ -421,7 +453,7 @@ static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) /* This method might sleep. Do not call it in the atomic context * of URB completions. */ -static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) +void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) { usb_kill_anchored_urbs(&priv->tx_submitted); kvaser_usb_reset_tx_urb_contexts(priv); @@ -449,22 +481,23 @@ static int kvaser_usb_close(struct net_device *netdev) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int err; netif_stop_queue(netdev); - err = dev->ops->dev_flush_queue(priv); + err = ops->dev_flush_queue(priv); if (err) netdev_warn(netdev, "Cannot flush queue, error %d\n", err); - if (dev->ops->dev_reset_chip) { - err = dev->ops->dev_reset_chip(dev, priv->channel); + if (ops->dev_reset_chip) { + err = ops->dev_reset_chip(dev, priv->channel); if (err) netdev_warn(netdev, "Cannot reset card, error %d\n", err); } - err = dev->ops->dev_stop_chip(priv); + err = ops->dev_stop_chip(priv); if (err) netdev_warn(netdev, "Cannot stop device, error %d\n", err); @@ -503,6 +536,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; struct net_device_stats *stats = &netdev->stats; struct kvaser_usb_tx_urb_context *context = NULL; struct urb *urb; @@ -545,8 +579,8 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, goto freeurb; } - buf = dev->ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len, - context->echo_index); + buf = ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len, + context->echo_index); if (!buf) { stats->tx_dropped++; dev_kfree_skb(skb); @@ -630,15 +664,16 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) } } -static int kvaser_usb_init_one(struct kvaser_usb *dev, - const struct usb_device_id *id, int channel) +static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) { struct net_device *netdev; struct kvaser_usb_net_priv *priv; + const struct kvaser_usb_driver_info *driver_info = dev->driver_info; + const struct kvaser_usb_dev_ops *ops = driver_info->ops; int err; - if (dev->ops->dev_reset_chip) { - err = dev->ops->dev_reset_chip(dev, channel); + if (ops->dev_reset_chip) { + err = ops->dev_reset_chip(dev, channel); if (err) return err; } @@ -655,6 +690,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, init_usb_anchor(&priv->tx_submitted); init_completion(&priv->start_comp); init_completion(&priv->stop_comp); + init_completion(&priv->flush_comp); priv->can.ctrlmode_supported = 0; priv->dev = dev; @@ -667,20 +703,19 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, priv->can.state = CAN_STATE_STOPPED; priv->can.clock.freq = dev->cfg->clock.freq; priv->can.bittiming_const = dev->cfg->bittiming_const; - priv->can.do_set_bittiming = dev->ops->dev_set_bittiming; - priv->can.do_set_mode = dev->ops->dev_set_mode; - if ((id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) || + priv->can.do_set_bittiming = ops->dev_set_bittiming; + priv->can.do_set_mode = ops->dev_set_mode; + if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) || (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP)) - priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter; - if (id->driver_info & KVASER_USB_HAS_SILENT_MODE) + priv->can.do_get_berr_counter = ops->dev_get_berr_counter; + if (driver_info->quirks & KVASER_USB_QUIRK_HAS_SILENT_MODE) priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported; if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) { priv->can.data_bittiming_const = dev->cfg->data_bittiming_const; - priv->can.do_set_data_bittiming = - dev->ops->dev_set_data_bittiming; + priv->can.do_set_data_bittiming = ops->dev_set_data_bittiming; } netdev->flags |= IFF_ECHO; @@ -711,29 +746,22 @@ static int kvaser_usb_probe(struct usb_interface *intf, struct kvaser_usb *dev; int err; int i; + const struct kvaser_usb_driver_info *driver_info; + const struct kvaser_usb_dev_ops *ops; + + driver_info = (const struct kvaser_usb_driver_info *)id->driver_info; + if (!driver_info) + return -ENODEV; dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; - if (kvaser_is_leaf(id)) { - dev->card_data.leaf.family = KVASER_LEAF; - dev->ops = &kvaser_usb_leaf_dev_ops; - } else if (kvaser_is_usbcan(id)) { - dev->card_data.leaf.family = KVASER_USBCAN; - dev->ops = &kvaser_usb_leaf_dev_ops; - } else if (kvaser_is_hydra(id)) { - dev->ops = &kvaser_usb_hydra_dev_ops; - } else { - dev_err(&intf->dev, - "Product ID (%d) is not a supported Kvaser USB device\n", - id->idProduct); - return -ENODEV; - } - dev->intf = intf; + dev->driver_info = driver_info; + ops = driver_info->ops; - err = dev->ops->dev_setup_endpoints(dev); + err = ops->dev_setup_endpoints(dev); if (err) { dev_err(&intf->dev, "Cannot get usb endpoint(s)"); return err; @@ -747,22 +775,22 @@ static int kvaser_usb_probe(struct usb_interface *intf, dev->card_data.ctrlmode_supported = 0; dev->card_data.capabilities = 0; - err = dev->ops->dev_init_card(dev); + err = ops->dev_init_card(dev); if (err) { dev_err(&intf->dev, "Failed to initialize card, error %d\n", err); return err; } - err = dev->ops->dev_get_software_info(dev); + err = ops->dev_get_software_info(dev); if (err) { dev_err(&intf->dev, "Cannot get software info, error %d\n", err); return err; } - if (dev->ops->dev_get_software_details) { - err = dev->ops->dev_get_software_details(dev); + if (ops->dev_get_software_details) { + err = ops->dev_get_software_details(dev); if (err) { dev_err(&intf->dev, "Cannot get software details, error %d\n", err); @@ -780,14 +808,14 @@ static int kvaser_usb_probe(struct usb_interface *intf, dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs); - err = dev->ops->dev_get_card_info(dev); + err = ops->dev_get_card_info(dev); if (err) { dev_err(&intf->dev, "Cannot get card info, error %d\n", err); return err; } - if (dev->ops->dev_get_capabilities) { - err = dev->ops->dev_get_capabilities(dev); + if (ops->dev_get_capabilities) { + err = ops->dev_get_capabilities(dev); if (err) { dev_err(&intf->dev, "Cannot get capabilities, error %d\n", err); @@ -797,7 +825,7 @@ static int kvaser_usb_probe(struct usb_interface *intf, } for (i = 0; i < dev->nchannels; i++) { - err = kvaser_usb_init_one(dev, id, i); + err = kvaser_usb_init_one(dev, i); if (err) { kvaser_usb_remove_interfaces(dev); return err; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index 218fadc91155..45d278724883 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -371,7 +371,7 @@ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = { .brp_inc = 1, }; -static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = { +const struct can_bittiming_const kvaser_usb_flexc_bittiming_const = { .name = "kvaser_usb_flex", .tseg1_min = 4, .tseg1_max = 16, @@ -890,8 +890,10 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv, new_state < CAN_STATE_BUS_OFF) priv->can.can_stats.restarts++; - cf->data[6] = bec->txerr; - cf->data[7] = bec->rxerr; + if (new_state != CAN_STATE_BUS_OFF) { + cf->data[6] = bec->txerr; + cf->data[7] = bec->rxerr; + } stats = &netdev->stats; stats->rx_packets++; @@ -1045,8 +1047,10 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv, shhwtstamps->hwtstamp = hwtstamp; cf->can_id |= CAN_ERR_BUSERROR; - cf->data[6] = bec.txerr; - cf->data[7] = bec.rxerr; + if (new_state != CAN_STATE_BUS_OFF) { + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + } stats->rx_packets++; stats->rx_bytes += cf->can_dlc; @@ -1841,7 +1845,7 @@ static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->start_comp); + reinit_completion(&priv->start_comp); err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ, priv->channel); @@ -1859,7 +1863,7 @@ static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->stop_comp); + reinit_completion(&priv->stop_comp); /* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT * see comment in kvaser_usb_hydra_update_state() @@ -1882,7 +1886,7 @@ static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->flush_comp); + reinit_completion(&priv->flush_comp); err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE, priv->channel); @@ -2024,5 +2028,5 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = { .freq = 24000000, }, .timestamp_freq = 1, - .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c, + .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 8b5d1add899a..15380cc08ee6 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -100,16 +100,6 @@ #define USBCAN_ERROR_STATE_RX_ERROR BIT(1) #define USBCAN_ERROR_STATE_BUSERROR BIT(2) -/* bittiming parameters */ -#define KVASER_USB_TSEG1_MIN 1 -#define KVASER_USB_TSEG1_MAX 16 -#define KVASER_USB_TSEG2_MIN 1 -#define KVASER_USB_TSEG2_MAX 8 -#define KVASER_USB_SJW_MAX 4 -#define KVASER_USB_BRP_MIN 1 -#define KVASER_USB_BRP_MAX 64 -#define KVASER_USB_BRP_INC 1 - /* ctrl modes */ #define KVASER_CTRL_MODE_NORMAL 1 #define KVASER_CTRL_MODE_SILENT 2 @@ -319,6 +309,38 @@ struct kvaser_cmd { } u; } __packed; +#define CMD_SIZE_ANY 0xff +#define kvaser_fsize(field) sizeof_field(struct kvaser_cmd, field) + +static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = { + [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple), + [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple), + [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo), + [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header), + [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo), + [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can), + [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can), + [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message), + [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event), + [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event), + /* ignored events: */ + [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY, +}; + +static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = { + [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple), + [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple), + [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo), + [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header), + [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo), + [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), + [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), + [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event), + [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event), + /* ignored events: */ + [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY, +}; + /* Summary of a kvaser error event, for a unified Leaf/Usbcan error * handling. Some discrepancies between the two families exist: * @@ -342,50 +364,107 @@ struct kvaser_usb_err_summary { }; }; -static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = { - .name = "kvaser_usb", - .tseg1_min = KVASER_USB_TSEG1_MIN, - .tseg1_max = KVASER_USB_TSEG1_MAX, - .tseg2_min = KVASER_USB_TSEG2_MIN, - .tseg2_max = KVASER_USB_TSEG2_MAX, - .sjw_max = KVASER_USB_SJW_MAX, - .brp_min = KVASER_USB_BRP_MIN, - .brp_max = KVASER_USB_BRP_MAX, - .brp_inc = KVASER_USB_BRP_INC, +static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = { + .name = "kvaser_usb_ucii", + .tseg1_min = 4, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 16, + .brp_inc = 1, }; -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = { +static const struct can_bittiming_const kvaser_usb_leaf_m32c_bittiming_const = { + .name = "kvaser_usb_leaf", + .tseg1_min = 3, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 2, + .brp_max = 128, + .brp_inc = 2, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = { .clock = { .freq = 8000000, }, .timestamp_freq = 1, - .bittiming_const = &kvaser_usb_leaf_bittiming_const, + .bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const, }; -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = { +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg = { .clock = { .freq = 16000000, }, .timestamp_freq = 1, - .bittiming_const = &kvaser_usb_leaf_bittiming_const, + .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const, }; -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = { +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = { + .clock = { + .freq = 16000000, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_flexc_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = { .clock = { .freq = 24000000, }, .timestamp_freq = 1, - .bittiming_const = &kvaser_usb_leaf_bittiming_const, + .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = { +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = { .clock = { .freq = 32000000, }, .timestamp_freq = 1, - .bittiming_const = &kvaser_usb_leaf_bittiming_const, + .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; +static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + /* buffer size >= cmd->len ensured by caller */ + u8 min_size = 0; + + switch (dev->driver_info->family) { + case KVASER_LEAF: + if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_leaf)) + min_size = kvaser_usb_leaf_cmd_sizes_leaf[cmd->id]; + break; + case KVASER_USBCAN: + if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_usbcan)) + min_size = kvaser_usb_leaf_cmd_sizes_usbcan[cmd->id]; + break; + } + + if (min_size == CMD_SIZE_ANY) + return 0; + + if (min_size) { + min_size += CMD_HEADER_LEN; + if (cmd->len >= min_size) + return 0; + + dev_err_ratelimited(&dev->intf->dev, + "Received command %u too short (size %u, needed %u)", + cmd->id, cmd->len, min_size); + return -EIO; + } + + dev_warn_ratelimited(&dev->intf->dev, + "Unhandled command (%d, size %d)\n", + cmd->id, cmd->len); + return -EINVAL; +} + static void * kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, const struct sk_buff *skb, int *frame_len, @@ -405,7 +484,7 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, sizeof(struct kvaser_cmd_tx_can); cmd->u.tx_can.channel = priv->channel; - switch (dev->card_data.leaf.family) { + switch (dev->driver_info->family) { case KVASER_LEAF: cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags; break; @@ -493,6 +572,9 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id, end: kfree(buf); + if (err == 0) + err = kvaser_usb_leaf_verify_size(dev, cmd); + return err; } @@ -525,16 +607,23 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev, dev->fw_version = le32_to_cpu(softinfo->fw_version); dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx); - switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) { - case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK: - dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz; - break; - case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK: - dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz; - break; - case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK: - dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz; - break; + if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) { + /* Firmware expects bittiming parameters calculated for 16MHz + * clock, regardless of the actual clock + */ + dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg; + } else { + switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) { + case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_16mhz; + break; + case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_24mhz; + break; + case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_32mhz; + break; + } } } @@ -551,7 +640,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) if (err) return err; - switch (dev->card_data.leaf.family) { + switch (dev->driver_info->family) { case KVASER_LEAF: kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo); break; @@ -559,7 +648,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version); dev->max_tx_urbs = le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx); - dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz; + dev->cfg = &kvaser_usb_leaf_usbcan_dev_cfg; break; } @@ -598,7 +687,7 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev) dev->nchannels = cmd.u.cardinfo.nchannels; if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES || - (dev->card_data.leaf.family == KVASER_USBCAN && + (dev->driver_info->family == KVASER_USBCAN && dev->nchannels > MAX_USBCAN_NET_DEVICES)) return -EINVAL; @@ -734,7 +823,7 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, new_state < CAN_STATE_BUS_OFF) priv->can.can_stats.restarts++; - switch (dev->card_data.leaf.family) { + switch (dev->driver_info->family) { case KVASER_LEAF: if (es->leaf.error_factor) { priv->can.can_stats.bus_error++; @@ -813,7 +902,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, } } - switch (dev->card_data.leaf.family) { + switch (dev->driver_info->family) { case KVASER_LEAF: if (es->leaf.error_factor) { cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; @@ -840,8 +929,10 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, break; } - cf->data[6] = es->txerr; - cf->data[7] = es->rxerr; + if (new_state != CAN_STATE_BUS_OFF) { + cf->data[6] = es->txerr; + cf->data[7] = es->rxerr; + } stats->rx_packets++; stats->rx_bytes += cf->can_dlc; @@ -1005,7 +1096,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, stats = &priv->netdev->stats; if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) && - (dev->card_data.leaf.family == KVASER_LEAF && + (dev->driver_info->family == KVASER_LEAF && cmd->id == CMD_LEAF_LOG_MESSAGE)) { kvaser_usb_leaf_leaf_rx_error(dev, cmd); return; @@ -1021,7 +1112,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, return; } - switch (dev->card_data.leaf.family) { + switch (dev->driver_info->family) { case KVASER_LEAF: rx_data = cmd->u.leaf.rx_can.data; break; @@ -1036,7 +1127,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, return; } - if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id == + if (dev->driver_info->family == KVASER_LEAF && cmd->id == CMD_LEAF_LOG_MESSAGE) { cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id); if (cf->can_id & KVASER_EXTENDED_FRAME) @@ -1118,6 +1209,9 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev, static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { + if (kvaser_usb_leaf_verify_size(dev, cmd) < 0) + return; + switch (cmd->id) { case CMD_START_CHIP_REPLY: kvaser_usb_leaf_start_chip_reply(dev, cmd); @@ -1133,14 +1227,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, break; case CMD_LEAF_LOG_MESSAGE: - if (dev->card_data.leaf.family != KVASER_LEAF) + if (dev->driver_info->family != KVASER_LEAF) goto warn; kvaser_usb_leaf_rx_can_msg(dev, cmd); break; case CMD_CHIP_STATE_EVENT: case CMD_CAN_ERROR_EVENT: - if (dev->card_data.leaf.family == KVASER_LEAF) + if (dev->driver_info->family == KVASER_LEAF) kvaser_usb_leaf_leaf_rx_error(dev, cmd); else kvaser_usb_leaf_usbcan_rx_error(dev, cmd); @@ -1152,12 +1246,12 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, /* Ignored commands */ case CMD_USBCAN_CLOCK_OVERFLOW_EVENT: - if (dev->card_data.leaf.family != KVASER_USBCAN) + if (dev->driver_info->family != KVASER_USBCAN) goto warn; break; case CMD_FLUSH_QUEUE_REPLY: - if (dev->card_data.leaf.family != KVASER_LEAF) + if (dev->driver_info->family != KVASER_LEAF) goto warn; break; @@ -1230,7 +1324,7 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->start_comp); + reinit_completion(&priv->start_comp); err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP, priv->channel); @@ -1248,7 +1342,7 @@ static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->stop_comp); + reinit_completion(&priv->stop_comp); err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP, priv->channel); @@ -1336,9 +1430,13 @@ static int kvaser_usb_leaf_set_mode(struct net_device *netdev, switch (mode) { case CAN_MODE_START: + kvaser_usb_unlink_tx_urbs(priv); + err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP); if (err) return err; + + priv->can.state = CAN_STATE_ERROR_ACTIVE; break; default: return -EOPNOTSUPP; diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 21063335ab59..c07e327929ba 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -47,6 +47,10 @@ #define MCBA_VER_REQ_USB 1 #define MCBA_VER_REQ_CAN 2 +/* Drive the CAN_RES signal LOW "0" to activate R24 and R25 */ +#define MCBA_VER_TERMINATION_ON 0 +#define MCBA_VER_TERMINATION_OFF 1 + #define MCBA_SIDL_EXID_MASK 0x8 #define MCBA_DLC_MASK 0xf #define MCBA_DLC_RTR_MASK 0x40 @@ -469,7 +473,7 @@ static void mcba_usb_process_ka_usb(struct mcba_priv *priv, priv->usb_ka_first_pass = false; } - if (msg->termination_state) + if (msg->termination_state == MCBA_VER_TERMINATION_ON) priv->can.termination = MCBA_TERMINATION_ENABLED; else priv->can.termination = MCBA_TERMINATION_DISABLED; @@ -789,9 +793,9 @@ static int mcba_set_termination(struct net_device *netdev, u16 term) }; if (term == MCBA_TERMINATION_ENABLED) - usb_msg.termination = 1; + usb_msg.termination = MCBA_VER_TERMINATION_ON; else - usb_msg.termination = 0; + usb_msg.termination = MCBA_VER_TERMINATION_OFF; mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg); diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index 985e00aee4ee..885c54c6f81a 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -442,9 +442,10 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv, if (rx_errors) stats->rx_errors++; - - cf->data[6] = txerr; - cf->data[7] = rxerr; + if (priv->can.state != CAN_STATE_BUS_OFF) { + cf->data[6] = txerr; + cf->data[7] = rxerr; + } priv->bec.txerr = txerr; priv->bec.rxerr = rxerr; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 375998263af7..1a3fba352cad 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -239,7 +239,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = { }; /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */ -static struct can_bittiming_const xcan_data_bittiming_const_canfd = { +static const struct can_bittiming_const xcan_data_bittiming_const_canfd = { .name = DRIVER_NAME, .tseg1_min = 1, .tseg1_max = 16, @@ -259,20 +259,20 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { .tseg2_min = 1, .tseg2_max = 128, .sjw_max = 128, - .brp_min = 2, + .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */ -static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { +static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { .name = DRIVER_NAME, .tseg1_min = 1, .tseg1_max = 32, .tseg2_min = 1, .tseg2_max = 16, .sjw_max = 16, - .brp_min = 2, + .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 08a675a5328d..c6563d212476 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -710,6 +710,9 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port, struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 reg, offset; + if (priv->wol_ports_mask & BIT(port)) + return; + if (port != core_readl(priv, CORE_IMP0_PRT_ID)) { if (priv->type == BCM7445_DEVICE_ID) offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); @@ -771,6 +774,11 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, if (duplex == DUPLEX_FULL) reg |= DUPLX_MODE; + if (tx_pause) + reg |= TXFLOW_CNTL; + if (rx_pause) + reg |= RXFLOW_CNTL; + core_writel(priv, reg, offset); } diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index e38906ae8f23..fbeb99ab9e4d 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -376,6 +376,17 @@ static struct mdio_driver dsa_loop_drv = { #define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2) +static void dsa_loop_phydevs_unregister(void) +{ + unsigned int i; + + for (i = 0; i < NUM_FIXED_PHYS; i++) + if (!IS_ERR(phydevs[i])) { + fixed_phy_unregister(phydevs[i]); + phy_device_free(phydevs[i]); + } +} + static int __init dsa_loop_init(void) { struct fixed_phy_status status = { @@ -383,23 +394,23 @@ static int __init dsa_loop_init(void) .speed = SPEED_100, .duplex = DUPLEX_FULL, }; - unsigned int i; + unsigned int i, ret; for (i = 0; i < NUM_FIXED_PHYS; i++) phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL); - return mdio_driver_register(&dsa_loop_drv); + ret = mdio_driver_register(&dsa_loop_drv); + if (ret) + dsa_loop_phydevs_unregister(); + + return ret; } module_init(dsa_loop_init); static void __exit dsa_loop_exit(void) { - unsigned int i; - mdio_driver_unregister(&dsa_loop_drv); - for (i = 0; i < NUM_FIXED_PHYS; i++) - if (!IS_ERR(phydevs[i])) - fixed_phy_unregister(phydevs[i]); + dsa_loop_phydevs_unregister(); } module_exit(dsa_loop_exit); diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 2044d440d7de..c79bb8cf962c 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -958,7 +958,7 @@ static const struct lan9303_mib_desc lan9303_mib[] = { { .offset = LAN9303_MAC_TX_BRDCST_CNT_0, .name = "TxBroad", }, { .offset = LAN9303_MAC_TX_PAUSE_CNT_0, .name = "TxPause", }, { .offset = LAN9303_MAC_TX_MULCST_CNT_0, .name = "TxMulti", }, - { .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "TxUnderRun", }, + { .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "RxShort", }, { .offset = LAN9303_MAC_TX_64_CNT_0, .name = "Tx64Byte", }, { .offset = LAN9303_MAC_TX_127_CNT_0, .name = "Tx128Byte", }, { .offset = LAN9303_MAC_TX_255_CNT_0, .name = "Tx256Byte", }, diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 80ef7ea77954..70895e480683 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -1629,9 +1629,6 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, break; case PHY_INTERFACE_MODE_RMII: miicfg |= GSWIP_MII_CFG_MODE_RMIIM; - - /* Configure the RMII clock as output: */ - miicfg |= GSWIP_MII_CFG_RMII_CLK; break; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: @@ -1984,8 +1981,10 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv, for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) { err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i], gphy_fw_np, i); - if (err) + if (err) { + of_node_put(gphy_fw_np); goto remove_gphy; + } i++; } diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index b3aa99eb6c2c..ece4c0512ee2 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -762,6 +762,9 @@ static int ksz9477_port_fdb_dump(struct dsa_switch *ds, int port, goto exit; } + if (!(ksz_data & ALU_VALID)) + continue; + /* read ALU table */ ksz9477_read_table(dev, alu_table); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 5ee8809bc271..70155e996f7d 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -502,14 +502,19 @@ static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv) static int mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface) { - struct mt7530_priv *priv = ds->priv; + return 0; +} + +static void +mt7531_pll_setup(struct mt7530_priv *priv) +{ u32 top_sig; u32 hwstrap; u32 xtal; u32 val; if (mt7531_dual_sgmii_supported(priv)) - return 0; + return; val = mt7530_read(priv, MT7531_CREV); top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR); @@ -588,8 +593,6 @@ mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface) val |= EN_COREPLL; mt7530_write(priv, MT7531_PLLGP_EN, val); usleep_range(25, 35); - - return 0; } static void @@ -1663,6 +1666,7 @@ mt7530_setup(struct dsa_switch *ds) ret = of_get_phy_mode(mac_np, &interface); if (ret && ret != -ENODEV) { of_node_put(mac_np); + of_node_put(phy_node); return ret; } id = of_mdio_parse_addr(ds->dev, phy_node); @@ -1730,6 +1734,8 @@ mt7531_setup(struct dsa_switch *ds) SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST | SYS_CTRL_REG_RST); + mt7531_pll_setup(priv); + if (mt7531_dual_sgmii_supported(priv)) { priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII; @@ -1951,13 +1957,7 @@ static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port, /* Port5 supports ethier RGMII or SGMII. * Port6 supports SGMII only. */ - switch (port) { - case 5: - if (mt7531_is_rgmii_port(priv, port)) - break; - fallthrough; - case 6: - phylink_set(supported, 1000baseX_Full); + if (port == 6) { phylink_set(supported, 2500baseX_Full); phylink_set(supported, 2500baseT_Full); } @@ -2286,8 +2286,6 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port) case 6: interface = PHY_INTERFACE_MODE_2500BASEX; - mt7531_pad_setup(ds, interface); - priv->p6_interface = interface; break; default: @@ -2314,8 +2312,6 @@ static void mt7530_mac_port_validate(struct dsa_switch *ds, int port, unsigned long *supported) { - if (port == 5) - phylink_set(supported, 1000baseX_Full); } static void mt7531_mac_port_validate(struct dsa_switch *ds, int port, @@ -2352,8 +2348,10 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port, } /* This switch only supports 1G full-duplex. */ - if (state->interface != PHY_INTERFACE_MODE_MII) + if (state->interface != PHY_INTERFACE_MODE_MII) { phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + } priv->info->mac_port_validate(ds, port, mask); diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 24b8219fd607..dafddf8000d0 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -118,6 +118,9 @@ static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p) int addr = REG_PORT(p); int ret; + if (dsa_is_unused_port(priv->ds, p)) + return 0; + /* Do not force flow control, disable Ingress and Egress * Header tagging, disable VLAN tunneling, and set the port * state to Forwarding. Additionally, if this is the CPU diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index e79a808375fc..371b345635e6 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -666,44 +666,48 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, { struct mv88e6xxx_chip *chip = ds->priv; struct mv88e6xxx_port *p; - int err; + int err = 0; p = &chip->ports[port]; - /* FIXME: is this the correct test? If we're in fixed mode on an - * internal port, why should we process this any different from - * PHY mode? On the other hand, the port may be automedia between - * an internal PHY and the serdes... - */ - if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port)) - return; - mv88e6xxx_reg_lock(chip); - /* In inband mode, the link may come up at any time while the link - * is not forced down. Force the link down while we reconfigure the - * interface mode. - */ - if (mode == MLO_AN_INBAND && p->interface != state->interface && - chip->info->ops->port_set_link) - chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN); - err = mv88e6xxx_port_config_interface(chip, port, state->interface); - if (err && err != -EOPNOTSUPP) - goto err_unlock; + if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(ds, port)) { + /* In inband mode, the link may come up at any time while the + * link is not forced down. Force the link down while we + * reconfigure the interface mode. + */ + if (mode == MLO_AN_INBAND && + p->interface != state->interface && + chip->info->ops->port_set_link) + chip->info->ops->port_set_link(chip, port, + LINK_FORCED_DOWN); - err = mv88e6xxx_serdes_pcs_config(chip, port, mode, state->interface, - state->advertising); - /* FIXME: we should restart negotiation if something changed - which - * is something we get if we convert to using phylinks PCS operations. - */ - if (err > 0) - err = 0; + err = mv88e6xxx_port_config_interface(chip, port, + state->interface); + if (err && err != -EOPNOTSUPP) + goto err_unlock; + + err = mv88e6xxx_serdes_pcs_config(chip, port, mode, + state->interface, + state->advertising); + /* FIXME: we should restart negotiation if something changed - + * which is something we get if we convert to using phylinks + * PCS operations. + */ + if (err > 0) + err = 0; + } /* Undo the forced down state above after completing configuration - * irrespective of its state on entry, which allows the link to come up. + * irrespective of its state on entry, which allows the link to come + * up in the in-band case where there is no separate SERDES. Also + * ensure that the link can come up if the PPU is in use and we are + * in PHY mode (we treat the PPU as an effective in-band mechanism.) */ - if (mode == MLO_AN_INBAND && p->interface != state->interface && - chip->info->ops->port_set_link) + if (chip->info->ops->port_set_link && + ((mode == MLO_AN_INBAND && p->interface != state->interface) || + (mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port)))) chip->info->ops->port_set_link(chip, port, LINK_UNFORCED); p->interface = state->interface; @@ -3148,6 +3152,7 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip, */ child = of_get_child_by_name(np, "mdio"); err = mv88e6xxx_mdio_register(chip, child, false); + of_node_put(child); if (err) return err; diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index cd8d9b0e0edb..161a5eac60d6 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -578,7 +578,8 @@ static const struct ocelot_stat_layout vsc9959_stats_layout[] = { { .offset = 0x87, .name = "tx_frames_below_65_octets", }, { .offset = 0x88, .name = "tx_frames_65_to_127_octets", }, { .offset = 0x89, .name = "tx_frames_128_255_octets", }, - { .offset = 0x8B, .name = "tx_frames_256_511_octets", }, + { .offset = 0x8A, .name = "tx_frames_256_511_octets", }, + { .offset = 0x8B, .name = "tx_frames_512_1023_octets", }, { .offset = 0x8C, .name = "tx_frames_1024_1526_octets", }, { .offset = 0x8D, .name = "tx_frames_over_1526_octets", }, { .offset = 0x8E, .name = "tx_yellow_prio_0", }, @@ -1466,7 +1467,7 @@ static int felix_pci_probe(struct pci_dev *pdev, err = dsa_register_switch(ds); if (err) { - dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err); + dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n"); goto err_register_ds; } diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c index 4a2ec395bcb0..8e3d185c8460 100644 --- a/drivers/net/dsa/sja1105/sja1105_devlink.c +++ b/drivers/net/dsa/sja1105/sja1105_devlink.c @@ -93,8 +93,10 @@ static int sja1105_setup_devlink_regions(struct dsa_switch *ds) region = dsa_devlink_region_create(ds, ops, 1, size); if (IS_ERR(region)) { - while (i-- >= 0) + while (--i >= 0) dsa_devlink_region_destroy(priv->regions[i]); + + kfree(priv->regions); return PTR_ERR(region); } diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index f4f50b3a472e..0d56cb4f5dd9 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -258,6 +258,7 @@ static int greth_init_rings(struct greth_private *greth) if (dma_mapping_error(greth->dev, dma_addr)) { if (netif_msg_ifup(greth)) dev_err(greth->dev, "Could not create initial DMA mapping\n"); + dev_kfree_skb(skb); goto cleanup; } greth->rx_skbuff[i] = skb; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index a7d8d45e0e94..b779f3adbc56 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -163,7 +163,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) mdio = mdiobus_alloc(); if (mdio == NULL) { netdev_err(dev, "Error allocating MDIO bus\n"); - return -ENOMEM; + ret = -ENOMEM; + goto put_node; } mdio->name = ALTERA_TSE_RESOURCE_NAME; @@ -180,6 +181,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) mdio->id); goto out_free_mdio; } + of_node_put(mdio_node); if (netif_msg_drv(priv)) netdev_info(dev, "MDIO bus %s: created\n", mdio->id); @@ -189,6 +191,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) out_free_mdio: mdiobus_free(mdio); mdio = NULL; +put_node: + of_node_put(mdio_node); return ret; } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 52414ac2c901..1722d4091ea3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -4488,13 +4488,19 @@ static struct pci_driver ena_pci_driver = { static int __init ena_init(void) { + int ret; + ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME); if (!ena_wq) { pr_err("Failed to create workqueue\n"); return -ENOMEM; } - return pci_register_driver(&ena_pci_driver); + ret = pci_register_driver(&ena_pci_driver); + if (ret) + destroy_workqueue(ena_wq); + + return ret; } static void __exit ena_cleanup(void) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 213769054391..a7166cd1179f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -239,6 +239,7 @@ enum xgbe_sfp_speed { #define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d #define XGBE_SFP_BASE_BR_10GBE_MIN 0x64 #define XGBE_SFP_BASE_BR_10GBE_MAX 0x68 +#define XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX 0x78 #define XGBE_SFP_BASE_CU_CABLE_LEN 18 @@ -284,6 +285,8 @@ struct xgbe_sfp_eeprom { #define XGBE_BEL_FUSE_VENDOR "BEL-FUSE " #define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 " +#define XGBE_MOLEX_VENDOR "Molex Inc. " + struct xgbe_sfp_ascii { union { char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1]; @@ -834,7 +837,11 @@ static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom, break; case XGBE_SFP_SPEED_10000: min = XGBE_SFP_BASE_BR_10GBE_MIN; - max = XGBE_SFP_BASE_BR_10GBE_MAX; + if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], + XGBE_MOLEX_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN) == 0) + max = XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX; + else + max = XGBE_SFP_BASE_BR_10GBE_MAX; break; default: return false; @@ -1151,7 +1158,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) } /* Determine the type of SFP */ - if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR) + if (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE && + xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000)) + phy_data->sfp_base = XGBE_SFP_BASE_10000_CR; + else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR) phy_data->sfp_base = XGBE_SFP_BASE_10000_SR; else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR) phy_data->sfp_base = XGBE_SFP_BASE_10000_LR; @@ -1167,9 +1177,6 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) phy_data->sfp_base = XGBE_SFP_BASE_1000_CX; else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T) phy_data->sfp_base = XGBE_SFP_BASE_1000_T; - else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) && - xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000)) - phy_data->sfp_base = XGBE_SFP_BASE_10000_CR; switch (phy_data->sfp_base) { case XGBE_SFP_BASE_1000_T: diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 78c7cbc372b0..71151f675a49 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1004,8 +1004,10 @@ static int xgene_enet_open(struct net_device *ndev) xgene_enet_napi_enable(pdata); ret = xgene_enet_register_irq(ndev); - if (ret) + if (ret) { + xgene_enet_napi_disable(pdata); return ret; + } if (ndev->phydev) { phy_start(ndev->phydev); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index de2a9348bc3f..1d512e6a89f5 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -13,6 +13,7 @@ #include "aq_ptp.h" #include "aq_filters.h" #include "aq_macsec.h" +#include "aq_main.h" #include @@ -841,7 +842,7 @@ static int aq_set_ringparam(struct net_device *ndev, if (netif_running(ndev)) { ndev_running = true; - dev_close(ndev); + aq_ndev_close(ndev); } cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min); @@ -857,7 +858,7 @@ static int aq_set_ringparam(struct net_device *ndev, goto err_exit; if (ndev_running) - err = dev_open(ndev, NULL); + err = aq_ndev_open(ndev); err_exit: return err; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c index 4a6dfac857ca..ee823a18294c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c @@ -585,6 +585,7 @@ static int aq_update_txsa(struct aq_nic_s *nic, const unsigned int sc_idx, ret = aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx); + memzero_explicit(&key_rec, sizeof(key_rec)); return ret; } @@ -932,6 +933,7 @@ static int aq_update_rxsa(struct aq_nic_s *nic, const unsigned int sc_idx, ret = aq_mss_set_ingress_sakey_record(hw, &sa_key_record, sa_idx); + memzero_explicit(&sa_key_record, sizeof(sa_key_record)); return ret; } @@ -1451,26 +1453,57 @@ static void aq_check_txsa_expiration(struct aq_nic_s *nic) egress_sa_threshold_expired); } +#define AQ_LOCKED_MDO_DEF(mdo) \ +static int aq_locked_mdo_##mdo(struct macsec_context *ctx) \ +{ \ + struct aq_nic_s *nic = netdev_priv(ctx->netdev); \ + int ret; \ + mutex_lock(&nic->macsec_mutex); \ + ret = aq_mdo_##mdo(ctx); \ + mutex_unlock(&nic->macsec_mutex); \ + return ret; \ +} + +AQ_LOCKED_MDO_DEF(dev_open) +AQ_LOCKED_MDO_DEF(dev_stop) +AQ_LOCKED_MDO_DEF(add_secy) +AQ_LOCKED_MDO_DEF(upd_secy) +AQ_LOCKED_MDO_DEF(del_secy) +AQ_LOCKED_MDO_DEF(add_rxsc) +AQ_LOCKED_MDO_DEF(upd_rxsc) +AQ_LOCKED_MDO_DEF(del_rxsc) +AQ_LOCKED_MDO_DEF(add_rxsa) +AQ_LOCKED_MDO_DEF(upd_rxsa) +AQ_LOCKED_MDO_DEF(del_rxsa) +AQ_LOCKED_MDO_DEF(add_txsa) +AQ_LOCKED_MDO_DEF(upd_txsa) +AQ_LOCKED_MDO_DEF(del_txsa) +AQ_LOCKED_MDO_DEF(get_dev_stats) +AQ_LOCKED_MDO_DEF(get_tx_sc_stats) +AQ_LOCKED_MDO_DEF(get_tx_sa_stats) +AQ_LOCKED_MDO_DEF(get_rx_sc_stats) +AQ_LOCKED_MDO_DEF(get_rx_sa_stats) + const struct macsec_ops aq_macsec_ops = { - .mdo_dev_open = aq_mdo_dev_open, - .mdo_dev_stop = aq_mdo_dev_stop, - .mdo_add_secy = aq_mdo_add_secy, - .mdo_upd_secy = aq_mdo_upd_secy, - .mdo_del_secy = aq_mdo_del_secy, - .mdo_add_rxsc = aq_mdo_add_rxsc, - .mdo_upd_rxsc = aq_mdo_upd_rxsc, - .mdo_del_rxsc = aq_mdo_del_rxsc, - .mdo_add_rxsa = aq_mdo_add_rxsa, - .mdo_upd_rxsa = aq_mdo_upd_rxsa, - .mdo_del_rxsa = aq_mdo_del_rxsa, - .mdo_add_txsa = aq_mdo_add_txsa, - .mdo_upd_txsa = aq_mdo_upd_txsa, - .mdo_del_txsa = aq_mdo_del_txsa, - .mdo_get_dev_stats = aq_mdo_get_dev_stats, - .mdo_get_tx_sc_stats = aq_mdo_get_tx_sc_stats, - .mdo_get_tx_sa_stats = aq_mdo_get_tx_sa_stats, - .mdo_get_rx_sc_stats = aq_mdo_get_rx_sc_stats, - .mdo_get_rx_sa_stats = aq_mdo_get_rx_sa_stats, + .mdo_dev_open = aq_locked_mdo_dev_open, + .mdo_dev_stop = aq_locked_mdo_dev_stop, + .mdo_add_secy = aq_locked_mdo_add_secy, + .mdo_upd_secy = aq_locked_mdo_upd_secy, + .mdo_del_secy = aq_locked_mdo_del_secy, + .mdo_add_rxsc = aq_locked_mdo_add_rxsc, + .mdo_upd_rxsc = aq_locked_mdo_upd_rxsc, + .mdo_del_rxsc = aq_locked_mdo_del_rxsc, + .mdo_add_rxsa = aq_locked_mdo_add_rxsa, + .mdo_upd_rxsa = aq_locked_mdo_upd_rxsa, + .mdo_del_rxsa = aq_locked_mdo_del_rxsa, + .mdo_add_txsa = aq_locked_mdo_add_txsa, + .mdo_upd_txsa = aq_locked_mdo_upd_txsa, + .mdo_del_txsa = aq_locked_mdo_del_txsa, + .mdo_get_dev_stats = aq_locked_mdo_get_dev_stats, + .mdo_get_tx_sc_stats = aq_locked_mdo_get_tx_sc_stats, + .mdo_get_tx_sa_stats = aq_locked_mdo_get_tx_sa_stats, + .mdo_get_rx_sc_stats = aq_locked_mdo_get_rx_sc_stats, + .mdo_get_rx_sa_stats = aq_locked_mdo_get_rx_sa_stats, }; int aq_macsec_init(struct aq_nic_s *nic) @@ -1492,6 +1525,7 @@ int aq_macsec_init(struct aq_nic_s *nic) nic->ndev->features |= NETIF_F_HW_MACSEC; nic->ndev->macsec_ops = &aq_macsec_ops; + mutex_init(&nic->macsec_mutex); return 0; } @@ -1515,7 +1549,7 @@ int aq_macsec_enable(struct aq_nic_s *nic) if (!nic->macsec_cfg) return 0; - rtnl_lock(); + mutex_lock(&nic->macsec_mutex); if (nic->aq_fw_ops->send_macsec_req) { struct macsec_cfg_request cfg = { 0 }; @@ -1564,7 +1598,7 @@ int aq_macsec_enable(struct aq_nic_s *nic) ret = aq_apply_macsec_cfg(nic); unlock: - rtnl_unlock(); + mutex_unlock(&nic->macsec_mutex); return ret; } @@ -1576,9 +1610,9 @@ void aq_macsec_work(struct aq_nic_s *nic) if (!netif_carrier_ok(nic->ndev)) return; - rtnl_lock(); + mutex_lock(&nic->macsec_mutex); aq_check_txsa_expiration(nic); - rtnl_unlock(); + mutex_unlock(&nic->macsec_mutex); } int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic) @@ -1589,21 +1623,30 @@ int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic) if (!cfg) return 0; + mutex_lock(&nic->macsec_mutex); + for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { if (!test_bit(i, &cfg->rxsc_idx_busy)) continue; cnt += hweight_long(cfg->aq_rxsc[i].rx_sa_idx_busy); } + mutex_unlock(&nic->macsec_mutex); return cnt; } int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic) { + int cnt; + if (!nic->macsec_cfg) return 0; - return hweight_long(nic->macsec_cfg->txsc_idx_busy); + mutex_lock(&nic->macsec_mutex); + cnt = hweight_long(nic->macsec_cfg->txsc_idx_busy); + mutex_unlock(&nic->macsec_mutex); + + return cnt; } int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic) @@ -1614,12 +1657,15 @@ int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic) if (!cfg) return 0; + mutex_lock(&nic->macsec_mutex); + for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { if (!test_bit(i, &cfg->txsc_idx_busy)) continue; cnt += hweight_long(cfg->aq_txsc[i].tx_sa_idx_busy); } + mutex_unlock(&nic->macsec_mutex); return cnt; } @@ -1691,6 +1737,8 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data) if (!cfg) return data; + mutex_lock(&nic->macsec_mutex); + aq_macsec_update_stats(nic); common_stats = &cfg->stats; @@ -1773,5 +1821,7 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data) data += i; + mutex_unlock(&nic->macsec_mutex); + return data; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 4af0cd9530de..1401fc4632b5 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -53,7 +53,7 @@ struct net_device *aq_ndev_alloc(void) return ndev; } -static int aq_ndev_open(struct net_device *ndev) +int aq_ndev_open(struct net_device *ndev) { struct aq_nic_s *aq_nic = netdev_priv(ndev); int err = 0; @@ -83,17 +83,14 @@ err_exit: return err; } -static int aq_ndev_close(struct net_device *ndev) +int aq_ndev_close(struct net_device *ndev) { struct aq_nic_s *aq_nic = netdev_priv(ndev); int err = 0; err = aq_nic_stop(aq_nic); - if (err < 0) - goto err_exit; aq_nic_deinit(aq_nic, true); -err_exit: return err; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h index a5a624b9ce73..2a562ab7a5af 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h @@ -14,5 +14,7 @@ void aq_ndev_schedule_work(struct work_struct *work); struct net_device *aq_ndev_alloc(void); +int aq_ndev_open(struct net_device *ndev); +int aq_ndev_close(struct net_device *ndev); #endif /* AQ_MAIN_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 0cf8ae8aeac8..2d491efa11bd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -265,12 +265,10 @@ static void aq_nic_service_timer_cb(struct timer_list *t) static void aq_nic_polling_timer_cb(struct timer_list *t) { struct aq_nic_s *self = from_timer(self, t, polling_timer); - struct aq_vec_s *aq_vec = NULL; unsigned int i = 0U; - for (i = 0U, aq_vec = self->aq_vec[0]; - self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) - aq_vec_isr(i, (void *)aq_vec); + for (i = 0U; self->aq_vecs > i; ++i) + aq_vec_isr(i, (void *)self->aq_vec[i]); mod_timer(&self->polling_timer, jiffies + AQ_CFG_POLLING_TIMER_INTERVAL); @@ -480,8 +478,8 @@ int aq_nic_start(struct aq_nic_s *self) if (err < 0) goto err_exit; - for (i = 0U, aq_vec = self->aq_vec[0]; - self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + for (i = 0U; self->aq_vecs > i; ++i) { + aq_vec = self->aq_vec[i]; err = aq_vec_start(aq_vec); if (err < 0) goto err_exit; @@ -511,8 +509,8 @@ int aq_nic_start(struct aq_nic_s *self) mod_timer(&self->polling_timer, jiffies + AQ_CFG_POLLING_TIMER_INTERVAL); } else { - for (i = 0U, aq_vec = self->aq_vec[0]; - self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + for (i = 0U; self->aq_vecs > i; ++i) { + aq_vec = self->aq_vec[i]; err = aq_pci_func_alloc_irq(self, i, self->ndev->name, aq_vec_isr, aq_vec, aq_vec_get_affinity_mask(aq_vec)); @@ -872,7 +870,6 @@ int aq_nic_get_regs_count(struct aq_nic_s *self) u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data) { - struct aq_vec_s *aq_vec = NULL; struct aq_stats_s *stats; unsigned int count = 0U; unsigned int i = 0U; @@ -922,11 +919,11 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data) data += i; for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) { - for (i = 0U, aq_vec = self->aq_vec[0]; - aq_vec && self->aq_vecs > i; - ++i, aq_vec = self->aq_vec[i]) { + for (i = 0U; self->aq_vecs > i; ++i) { + if (!self->aq_vec[i]) + break; data += count; - count = aq_vec_get_sw_stats(aq_vec, tc, data); + count = aq_vec_get_sw_stats(self->aq_vec[i], tc, data); } } @@ -1240,7 +1237,6 @@ int aq_nic_set_loopback(struct aq_nic_s *self) int aq_nic_stop(struct aq_nic_s *self) { - struct aq_vec_s *aq_vec = NULL; unsigned int i = 0U; netif_tx_disable(self->ndev); @@ -1258,9 +1254,8 @@ int aq_nic_stop(struct aq_nic_s *self) aq_ptp_irq_free(self); - for (i = 0U, aq_vec = self->aq_vec[0]; - self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) - aq_vec_stop(aq_vec); + for (i = 0U; self->aq_vecs > i; ++i) + aq_vec_stop(self->aq_vec[i]); aq_ptp_ring_stop(self); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 926cca9a0c83..6da3efa289a3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -152,6 +152,8 @@ struct aq_nic_s { struct mutex fwreq_mutex; #if IS_ENABLED(CONFIG_MACSEC) struct aq_macsec_cfg *macsec_cfg; + /* mutex to protect data in macsec_cfg */ + struct mutex macsec_mutex; #endif /* PTP support */ struct aq_ptp_s *aq_ptp; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 1826253f97dc..a0ce213c473b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -385,7 +385,7 @@ static void aq_pci_shutdown(struct pci_dev *pdev) } } -static int aq_suspend_common(struct device *dev, bool deep) +static int aq_suspend_common(struct device *dev) { struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev)); @@ -398,17 +398,15 @@ static int aq_suspend_common(struct device *dev, bool deep) if (netif_running(nic->ndev)) aq_nic_stop(nic); - if (deep) { - aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); - aq_nic_set_power(nic); - } + aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); + aq_nic_set_power(nic); rtnl_unlock(); return 0; } -static int atl_resume_common(struct device *dev, bool deep) +static int atl_resume_common(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct aq_nic_s *nic; @@ -421,11 +419,6 @@ static int atl_resume_common(struct device *dev, bool deep) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - if (deep) { - /* Reinitialize Nic/Vecs objects */ - aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); - } - if (netif_running(nic->ndev)) { ret = aq_nic_init(nic); if (ret) @@ -450,22 +443,22 @@ err_exit: static int aq_pm_freeze(struct device *dev) { - return aq_suspend_common(dev, false); + return aq_suspend_common(dev); } static int aq_pm_suspend_poweroff(struct device *dev) { - return aq_suspend_common(dev, true); + return aq_suspend_common(dev); } static int aq_pm_thaw(struct device *dev) { - return atl_resume_common(dev, false); + return atl_resume_common(dev); } static int aq_pm_resume_restore(struct device *dev) { - return atl_resume_common(dev, true); + return atl_resume_common(dev); } static const struct dev_pm_ops aq_pm_ops = { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 72f8751784c3..e9c6f1fa0b1a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -345,7 +345,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self, int budget) { struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); - bool is_rsc_completed = true; int err = 0; for (; (self->sw_head != self->hw_head) && budget; @@ -363,12 +362,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self, continue; if (!buff->is_eop) { + unsigned int frag_cnt = 0U; buff_ = buff; do { + bool is_rsc_completed = true; + if (buff_->next >= self->size) { err = -EIO; goto err_exit; } + + frag_cnt++; next_ = buff_->next, buff_ = &self->buff_ring[next_]; is_rsc_completed = @@ -376,18 +380,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self, next_, self->hw_head); - if (unlikely(!is_rsc_completed)) - break; + if (unlikely(!is_rsc_completed) || + frag_cnt > MAX_SKB_FRAGS) { + err = 0; + goto err_exit; + } buff->is_error |= buff_->is_error; buff->is_cso_err |= buff_->is_cso_err; } while (!buff_->is_eop); - if (!is_rsc_completed) { - err = 0; - goto err_exit; - } if (buff->is_error || (buff->is_lro && buff->is_cso_err)) { buff_ = buff; @@ -445,7 +448,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, ALIGN(hdr_len, sizeof(long))); if (buff->len - hdr_len > 0) { - skb_add_rx_frag(skb, 0, buff->rxdata.page, + skb_add_rx_frag(skb, i++, buff->rxdata.page, buff->rxdata.pg_off + hdr_len, buff->len - hdr_len, AQ_CFG_RX_FRAME_MAX); @@ -454,7 +457,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self, if (!buff->is_eop) { buff_ = buff; - i = 1U; do { next_ = buff_->next; buff_ = &self->buff_ring[next_]; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index f4774cf051c9..6ab1f3212d24 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -43,8 +43,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) if (!self) { err = -EINVAL; } else { - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp); ring[AQ_VEC_RX_ID].stats.rx.polls++; u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp); @@ -182,8 +182,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops, self->aq_hw_ops = aq_hw_ops; self->aq_hw = aq_hw; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX); if (err < 0) goto err_exit; @@ -224,8 +224,8 @@ int aq_vec_start(struct aq_vec_s *self) unsigned int i = 0U; int err = 0; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw, &ring[AQ_VEC_TX_ID]); if (err < 0) @@ -248,8 +248,8 @@ void aq_vec_stop(struct aq_vec_s *self) struct aq_ring_s *ring = NULL; unsigned int i = 0U; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw, &ring[AQ_VEC_TX_ID]); @@ -268,8 +268,8 @@ void aq_vec_deinit(struct aq_vec_s *self) if (!self) goto err_exit; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); } @@ -297,8 +297,8 @@ void aq_vec_ring_free(struct aq_vec_s *self) if (!self) goto err_exit; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; aq_ring_free(&ring[AQ_VEC_TX_ID]); if (i < self->rx_rings) aq_ring_free(&ring[AQ_VEC_RX_ID]); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 9f1b15077e7d..45c17c585d74 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -889,6 +889,13 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self, err = -ENXIO; goto err_exit; } + + /* Validate that the new hw_head_ is reasonable. */ + if (hw_head_ >= ring->size) { + err = -ENXIO; + goto err_exit; + } + ring->hw_head = hw_head_; err = aq_hw_err_from_flags(self); diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c index 36c7cf05630a..431924959520 100644 --- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c +++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c @@ -757,6 +757,7 @@ set_ingress_sakey_record(struct aq_hw_s *hw, u16 table_index) { u16 packed_record[18]; + int ret; if (table_index >= NUMROWS_INGRESSSAKEYRECORD) return -EINVAL; @@ -789,9 +790,12 @@ set_ingress_sakey_record(struct aq_hw_s *hw, packed_record[16] = rec->key_len & 0x3; - return set_raw_ingress_record(hw, packed_record, 18, 2, - ROWOFFSET_INGRESSSAKEYRECORD + - table_index); + ret = set_raw_ingress_record(hw, packed_record, 18, 2, + ROWOFFSET_INGRESSSAKEYRECORD + + table_index); + + memzero_explicit(packed_record, sizeof(packed_record)); + return ret; } int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw, @@ -1739,14 +1743,14 @@ static int set_egress_sakey_record(struct aq_hw_s *hw, ret = set_raw_egress_record(hw, packed_record, 8, 2, ROWOFFSET_EGRESSSAKEYRECORD + table_index); if (unlikely(ret)) - return ret; + goto clear_key; ret = set_raw_egress_record(hw, packed_record + 8, 8, 2, ROWOFFSET_EGRESSSAKEYRECORD + table_index - 32); - if (unlikely(ret)) - return ret; - return 0; +clear_key: + memzero_explicit(packed_record, sizeof(packed_record)); + return ret; } int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw, diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index c26c9b0c00d8..fe3ca3af431a 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1468,7 +1468,7 @@ static int ag71xx_open(struct net_device *ndev) if (ret) { netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n", ret); - goto err; + return ret; } max_frame_len = ag71xx_max_frame_len(ndev->mtu); @@ -1489,6 +1489,7 @@ static int ag71xx_open(struct net_device *ndev) err: ag71xx_rings_cleanup(ag); + phylink_disconnect_phy(ag->phylink); return ret; } diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile index 7046ad6d3d0e..ac50da49ca77 100644 --- a/drivers/net/ethernet/broadcom/Makefile +++ b/drivers/net/ethernet/broadcom/Makefile @@ -16,3 +16,8 @@ obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o obj-$(CONFIG_BNXT) += bnxt/ + +# FIXME: temporarily silence -Warray-bounds on non W=1+ builds +ifndef KBUILD_EXTRA_WARN +CFLAGS_tg3.o += -Wno-array-bounds +endif diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 1a703b95208b..82d369d9f7a5 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2592,8 +2592,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) device_set_wakeup_capable(&pdev->dev, 1); priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol"); - if (IS_ERR(priv->wol_clk)) - return PTR_ERR(priv->wol_clk); + if (IS_ERR(priv->wol_clk)) { + ret = PTR_ERR(priv->wol_clk); + goto err_deregister_fixed_link; + } /* Set the needed headroom once and for all */ BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index a5fd161ab5ee..26746197515f 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -323,7 +323,6 @@ static void bgmac_remove(struct bcma_device *core) bcma_mdio_mii_unregister(bgmac->mii_bus); bgmac_enet_remove(bgmac); bcma_set_drvdata(core, NULL); - kfree(bgmac); } static struct bcma_driver bgmac_bcma_driver = { diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 98ec1b8a7d8e..9960127f612e 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -189,8 +189,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, } slot->skb = skb; - ring->end += nr_frags + 1; netdev_sent_queue(net_dev, skb->len); + ring->end += nr_frags + 1; wmb(); @@ -1568,7 +1568,6 @@ void bgmac_enet_remove(struct bgmac *bgmac) phy_disconnect(bgmac->net_dev->phydev); netif_napi_del(&bgmac->napi); bgmac_dma_free(bgmac); - free_netdev(bgmac->net_dev); } EXPORT_SYMBOL_GPL(bgmac_enet_remove); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 198e041d8410..4f669e7c7558 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -788,6 +788,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n", pad, len, fp->rx_buf_size); bnx2x_panic(); + bnx2x_frag_free(fp, new_data); return; } #endif diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 6333471916be..afb6d3ee1f56 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -14213,10 +14213,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) /* Stop Tx */ bnx2x_tx_disable(bp); - /* Delete all NAPI objects */ - bnx2x_del_all_napi(bp); - if (CNIC_LOADED(bp)) - bnx2x_del_all_napi_cnic(bp); netdev_reset_tc(bp->dev); del_timer_sync(&bp->timer); @@ -14321,6 +14317,11 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) bnx2x_drain_tx_queues(bp); bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); bnx2x_netif_stop(bp, 1); + bnx2x_del_all_napi(bp); + + if (CNIC_LOADED(bp)) + bnx2x_del_all_napi_cnic(bp); + bnx2x_free_irq(bp); /* Report UNLOAD_DONE to MCP */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 08437eaacbb9..ac327839eed9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -795,16 +795,20 @@ static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) { - struct pci_dev *dev; struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); + struct pci_dev *dev; + bool pending; if (!vf) return false; dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn); - if (dev) - return bnx2x_is_pcie_pending(dev); - return false; + if (!dev) + return false; + pending = bnx2x_is_pcie_pending(dev); + pci_dev_put(dev); + + return pending; } int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index cb0c270418a4..92f54e333395 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2575,6 +2575,10 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget) u32 idx = le32_to_cpu(nqcmp->cq_handle_low); struct bnxt_cp_ring_info *cpr2; + /* No more budget for RX work */ + if (budget && work_done >= budget && idx == BNXT_RX_HDL) + break; + cpr2 = cpr->cp_ring_arr[idx]; work_done += __bnxt_poll_work(bp, cpr2, budget - work_done); @@ -10453,7 +10457,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp) if (bp->flags & BNXT_FLAG_CHIP_P5) return bnxt_rfs_supported(bp); - if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) + if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) return false; vnics = 1 + bp->rx_nr_rings; @@ -12004,8 +12008,8 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, rcu_read_lock(); hlist_for_each_entry_rcu(fltr, head, hash) { if (bnxt_fltr_match(fltr, new_fltr)) { + rc = fltr->sw_id; rcu_read_unlock(); - rc = 0; goto err_free; } } @@ -12481,10 +12485,9 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp) goto init_dflt_ring_err; bp->tx_nr_rings_per_tc = bp->tx_nr_rings; - if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { - bp->flags |= BNXT_FLAG_RFS; - bp->dev->features |= NETIF_F_NTUPLE; - } + + bnxt_set_dflt_rfs(bp); + init_dflt_ring_err: bnxt_ulp_irq_restart(bp, rc); return rc; @@ -13108,8 +13111,16 @@ static struct pci_driver bnxt_pci_driver = { static int __init bnxt_init(void) { + int err; + bnxt_debug_init(); - return pci_register_driver(&bnxt_pci_driver); + err = pci_register_driver(&bnxt_pci_driver); + if (err) { + bnxt_debug_exit(); + return err; + } + + return 0; } static void __exit bnxt_exit(void) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 92f9f7f5240b..34affd1de91d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -569,7 +569,8 @@ struct nqe_cn { #define BNXT_MAX_MTU 9500 #define BNXT_MAX_PAGE_MODE_MTU \ ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \ - XDP_PACKET_HEADROOM) + XDP_PACKET_HEADROOM - \ + SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info))) #define BNXT_MIN_PKT_SIZE 52 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 98087b278d1f..81b63d1c2391 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -125,7 +125,7 @@ static int bnxt_set_coalesce(struct net_device *dev, } reset_coalesce: - if (netif_running(dev)) { + if (test_bit(BNXT_STATE_OPEN, &bp->state)) { if (update_stats) { rc = bnxt_close_nic(bp, true, false); if (!rc) @@ -2041,9 +2041,7 @@ static int bnxt_set_pauseparam(struct net_device *dev, } link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; - if (bp->hwrm_spec_code >= 0x10201) - link_info->req_flow_ctrl = - PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; + link_info->req_flow_ctrl = 0; } else { /* when transition from auto pause to force pause, * force a link change diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 23b80aa171dd..819f9df9425c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -599,7 +599,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; if (bp->flags & BNXT_FLAG_CHIP_P5) - hw_resc->max_irqs -= vf_msix * n; + hw_resc->max_nqs -= vf_msix; rc = pf->active_vfs; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 7dcd5613ee56..e0a6a2e62d23 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset) if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) __raw_writel(value, offset); else - writel(value, offset); + writel_relaxed(value, offset); } static inline u32 bcmgenet_readl(void __iomem *offset) @@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset) if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) return __raw_readl(offset); else - return readl(offset); + return readl_relaxed(offset); } static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, @@ -1987,6 +1987,11 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, return skb; } +static void bcmgenet_hide_tsb(struct sk_buff *skb) +{ + __skb_pull(skb, sizeof(struct status_64)); +} + static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); @@ -2093,6 +2098,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) } GENET_CB(skb)->last_cb = tx_cb_ptr; + + bcmgenet_hide_tsb(skb); skb_tx_timestamp(skb); /* Decrement total BD count and advance our write pointer */ @@ -3939,6 +3946,10 @@ static int bcmgenet_probe(struct platform_device *pdev) goto err; } priv->wol_irq = platform_get_irq_optional(pdev, 2); + if (priv->wol_irq == -EPROBE_DEFER) { + err = priv->wol_irq; + goto err; + } priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 5143cdd0eeca..be96116dc2cc 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -18146,16 +18146,20 @@ static void tg3_shutdown(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct tg3 *tp = netdev_priv(dev); + tg3_reset_task_cancel(tp); + rtnl_lock(); + netif_device_detach(dev); if (netif_running(dev)) dev_close(dev); - if (system_state == SYSTEM_POWER_OFF) - tg3_power_down(tp); + tg3_power_down(tp); rtnl_unlock(); + + pci_disable_device(pdev); } /** diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index f29ec765d684..792c8147c2c4 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1092,7 +1092,6 @@ static void gem_rx_refill(struct macb_queue *queue) /* Make hw descriptor updates visible to CPU */ rmb(); - queue->rx_prepared_head++; desc = macb_rx_desc(queue, entry); if (!queue->rx_skbuff[entry]) { @@ -1131,6 +1130,7 @@ static void gem_rx_refill(struct macb_queue *queue) dma_wmb(); desc->addr &= ~MACB_BIT(RX_USED); } + queue->rx_prepared_head++; } /* Make descriptor updates visible to hardware */ @@ -1531,6 +1531,7 @@ static void macb_tx_restart(struct macb_queue *queue) unsigned int head = queue->tx_head; unsigned int tail = queue->tx_tail; struct macb *bp = queue->bp; + unsigned int head_idx, tbqp; if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(queue, ISR, MACB_BIT(TXUBR)); @@ -1538,6 +1539,13 @@ static void macb_tx_restart(struct macb_queue *queue) if (head == tail) return; + tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp); + tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp)); + head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head)); + + if (tbqp == head_idx) + return; + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index e0d18e917108..eefb25bcf57f 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1798,13 +1798,10 @@ static int liquidio_open(struct net_device *netdev) ifstate_set(lio, LIO_IFSTATE_RUNNING); - if (OCTEON_CN23XX_PF(oct)) { - if (!oct->msix_on) - if (setup_tx_poll_fn(netdev)) - return -1; - } else { - if (setup_tx_poll_fn(netdev)) - return -1; + if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) { + ret = setup_tx_poll_fn(netdev); + if (ret) + goto err_poll; } netif_tx_start_all_queues(netdev); @@ -1817,7 +1814,7 @@ static int liquidio_open(struct net_device *netdev) /* tell Octeon to start forwarding packets to host */ ret = send_rx_ctrl_cmd(lio, 1); if (ret) - return ret; + goto err_rx_ctrl; /* start periodical statistics fetch */ INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); @@ -1828,6 +1825,27 @@ static int liquidio_open(struct net_device *netdev) dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); + return 0; + +err_rx_ctrl: + if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) + cleanup_tx_poll_fn(netdev); +err_poll: + if (lio->ptp_clock) { + ptp_clock_unregister(lio->ptp_clock); + lio->ptp_clock = NULL; + } + + if (oct->props[lio->ifidx].napi_enabled == 1) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + if (OCTEON_CN23XX_PF(oct)) + oct->droq[0]->ops.poll_mode = 0; + } + return ret; } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index c00f1a7ffc15..488da767cfdf 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -2258,7 +2258,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_unregister_interrupts; + goto err_destroy_workqueue; } nic->msg_enable = debug; @@ -2267,6 +2267,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; +err_destroy_workqueue: + destroy_workqueue(nic->nicvf_rx_mode_wq); err_unregister_interrupts: nicvf_unregister_interrupts(nic); err_free_netdev: diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 8ff28ed04b7f..f0e48b9373d6 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1438,8 +1438,10 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl, return AE_OK; } - if (strncmp(string.pointer, bgx_sel, 4)) + if (strncmp(string.pointer, bgx_sel, 4)) { + kfree(string.pointer); return AE_OK; + } acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, bgx_acpi_register_phy, NULL, bgx, NULL); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 84ad7261e243..8a167eea288c 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -1302,6 +1302,7 @@ static int cxgb_up(struct adapter *adap) if (ret < 0) { CH_ERR(adap, "failed to bind qsets, err %d\n", ret); t3_intr_disable(adap); + quiesce_rx(adap); free_irq_resources(adap); err = ret; goto out; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index c5b0e725b238..2169351b6afc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -14,6 +14,7 @@ #include "cudbg_entity.h" #include "cudbg_lib.h" #include "cudbg_zlib.h" +#include "cxgb4_tc_mqprio.h" static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = { {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */ @@ -3476,7 +3477,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, for (i = 0; i < utxq->ntxq; i++) QDESC_GET_TXQ(&utxq->uldtxq[i].q, cudbg_uld_txq_to_qtype(j), - out_unlock); + out_unlock_uld); } } @@ -3493,7 +3494,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, for (i = 0; i < urxq->nrxq; i++) QDESC_GET_RXQ(&urxq->uldrxq[i].rspq, cudbg_uld_rxq_to_qtype(j), - out_unlock); + out_unlock_uld); } /* ULD FLQ */ @@ -3505,7 +3506,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, for (i = 0; i < urxq->nrxq; i++) QDESC_GET_FLQ(&urxq->uldrxq[i].fl, cudbg_uld_flq_to_qtype(j), - out_unlock); + out_unlock_uld); } /* ULD CIQ */ @@ -3518,29 +3519,34 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, for (i = 0; i < urxq->nciq; i++) QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq, cudbg_uld_ciq_to_qtype(j), - out_unlock); + out_unlock_uld); } } + mutex_unlock(&uld_mutex); + if (!padap->tc_mqprio) + goto out; + + mutex_lock(&padap->tc_mqprio->mqprio_mutex); /* ETHOFLD TXQ */ if (s->eohw_txq) for (i = 0; i < s->eoqsets; i++) QDESC_GET_TXQ(&s->eohw_txq[i].q, - CUDBG_QTYPE_ETHOFLD_TXQ, out); + CUDBG_QTYPE_ETHOFLD_TXQ, out_unlock_mqprio); /* ETHOFLD RXQ and FLQ */ if (s->eohw_rxq) { for (i = 0; i < s->eoqsets; i++) QDESC_GET_RXQ(&s->eohw_rxq[i].rspq, - CUDBG_QTYPE_ETHOFLD_RXQ, out); + CUDBG_QTYPE_ETHOFLD_RXQ, out_unlock_mqprio); for (i = 0; i < s->eoqsets; i++) QDESC_GET_FLQ(&s->eohw_rxq[i].fl, - CUDBG_QTYPE_ETHOFLD_FLQ, out); + CUDBG_QTYPE_ETHOFLD_FLQ, out_unlock_mqprio); } -out_unlock: - mutex_unlock(&uld_mutex); +out_unlock_mqprio: + mutex_unlock(&padap->tc_mqprio->mqprio_mutex); out: qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry); @@ -3578,6 +3584,10 @@ out_free: #undef QDESC_GET return rc; + +out_unlock_uld: + mutex_unlock(&uld_mutex); + goto out; } int cudbg_collect_flash(struct cudbg_init *pdbg_init, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 2820a0bb971b..5e1e46425014 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -858,7 +858,7 @@ static int cxgb4vf_open(struct net_device *dev) */ err = t4vf_update_port_info(pi); if (err < 0) - return err; + goto err_unwind; /* * Note that this interface is up and start everything up ... diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c index a262c949ed76..aceb1fd38ddb 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c @@ -1150,7 +1150,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk, fl6.daddr = ip6h->saddr; fl6.fl6_dport = inet_rsk(oreq)->ir_rmt_port; fl6.fl6_sport = htons(inet_rsk(oreq)->ir_num); - security_req_classify_flow(oreq, flowi6_to_flowi(&fl6)); + security_req_classify_flow(oreq, flowi6_to_flowi_common(&fl6)); dst = ip6_dst_lookup_flow(sock_net(lsk), lsk, &fl6, NULL); if (IS_ERR(dst)) goto free_sk; @@ -1235,8 +1235,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk, csk->sndbuf = newsk->sk_sndbuf; csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx; RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk), - sock_net(newsk)-> - ipv4.sysctl_tcp_window_scaling, + READ_ONCE(sock_net(newsk)-> + ipv4.sysctl_tcp_window_scaling), tp->window_clamp); neigh_release(n); inet_inherit_port(&tcp_hashinfo, lsk, newsk); @@ -1383,7 +1383,7 @@ static void chtls_pass_accept_request(struct sock *sk, #endif } if (req->tcpopt.wsf <= 14 && - sock_net(sk)->ipv4.sysctl_tcp_window_scaling) { + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) { inet_rsk(oreq)->wscale_ok = 1; inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf; } diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index d11fcfd927c0..85ea073b742f 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -1920,7 +1920,7 @@ static void gmac_get_stats64(struct net_device *netdev, /* Racing with RX NAPI */ do { - start = u64_stats_fetch_begin(&port->rx_stats_syncp); + start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp); stats->rx_packets = port->stats.rx_packets; stats->rx_bytes = port->stats.rx_bytes; @@ -1932,11 +1932,11 @@ static void gmac_get_stats64(struct net_device *netdev, stats->rx_crc_errors = port->stats.rx_crc_errors; stats->rx_frame_errors = port->stats.rx_frame_errors; - } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start)); + } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start)); /* Racing with MIB and TX completion interrupts */ do { - start = u64_stats_fetch_begin(&port->ir_stats_syncp); + start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp); stats->tx_errors = port->stats.tx_errors; stats->tx_packets = port->stats.tx_packets; @@ -1946,15 +1946,15 @@ static void gmac_get_stats64(struct net_device *netdev, stats->rx_missed_errors = port->stats.rx_missed_errors; stats->rx_fifo_errors = port->stats.rx_fifo_errors; - } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start)); + } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start)); /* Racing with hard_start_xmit */ do { - start = u64_stats_fetch_begin(&port->tx_stats_syncp); + start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp); stats->tx_dropped = port->stats.tx_dropped; - } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start)); + } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start)); stats->rx_dropped += stats->rx_missed_errors; } @@ -2032,18 +2032,18 @@ static void gmac_get_ethtool_stats(struct net_device *netdev, /* Racing with MIB interrupt */ do { p = values; - start = u64_stats_fetch_begin(&port->ir_stats_syncp); + start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp); for (i = 0; i < RX_STATS_NUM; i++) *p++ = port->hw_stats[i]; - } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start)); + } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start)); values = p; /* Racing with RX NAPI */ do { p = values; - start = u64_stats_fetch_begin(&port->rx_stats_syncp); + start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp); for (i = 0; i < RX_STATUS_NUM; i++) *p++ = port->rx_stats[i]; @@ -2051,13 +2051,13 @@ static void gmac_get_ethtool_stats(struct net_device *netdev, *p++ = port->rx_csum_stats[i]; *p++ = port->rx_napi_exits; - } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start)); + } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start)); values = p; /* Racing with TX start_xmit */ do { p = values; - start = u64_stats_fetch_begin(&port->tx_stats_syncp); + start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp); for (i = 0; i < TX_MAX_FRAGS; i++) { *values++ = port->tx_frag_stats[i]; @@ -2066,7 +2066,7 @@ static void gmac_get_ethtool_stats(struct net_device *netdev, *values++ = port->tx_frags_linearized; *values++ = port->tx_hw_csummed; - } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start)); + } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start)); } static int gmac_get_ksettings(struct net_device *netdev, diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index e7b0d7de40fd..c22d945a79fd 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1396,8 +1396,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* alloc_etherdev ensures aligned and zeroed private structures */ dev = alloc_etherdev (sizeof (*tp)); - if (!dev) + if (!dev) { + pci_disable_device(pdev); return -ENOMEM; + } SET_NETDEV_DEV(dev, &pdev->dev); if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { @@ -1774,6 +1776,7 @@ err_out_free_res: err_out_free_netdev: free_netdev (dev); + pci_disable_device(pdev); return -ENODEV; } diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 649c5c429bd7..1288b5e3d220 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -2287,7 +2287,7 @@ err: /* Uses sync mcc */ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, - u8 page_num, u8 *data) + u8 page_num, u32 off, u32 len, u8 *data) { struct be_dma_mem cmd; struct be_mcc_wrb *wrb; @@ -2321,10 +2321,10 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, req->port = cpu_to_le32(adapter->hba_port_num); req->page_num = cpu_to_le32(page_num); status = be_mcc_notify_wait(adapter); - if (!status) { + if (!status && len > 0) { struct be_cmd_resp_port_type *resp = cmd.va; - memcpy(data, resp->page_data, PAGE_DATA_LEN); + memcpy(data, resp->page_data + off, len); } err: mutex_unlock(&adapter->mcc_lock); @@ -2415,7 +2415,7 @@ int be_cmd_query_cable_type(struct be_adapter *adapter) int status; status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, - page_data); + 0, PAGE_DATA_LEN, page_data); if (!status) { switch (adapter->phy.interface_type) { case PHY_TYPE_QSFP: @@ -2440,7 +2440,7 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter) int status; status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, - page_data); + 0, PAGE_DATA_LEN, page_data); if (!status) { strlcpy(adapter->phy.vendor_name, page_data + SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index c30d6d6f0f3a..9e17d6a7ab8c 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -2427,7 +2427,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon, int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state); int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, - u8 page_num, u8 *data); + u8 page_num, u32 off, u32 len, u8 *data); int be_cmd_query_cable_type(struct be_adapter *adapter); int be_cmd_query_sfp_info(struct be_adapter *adapter); int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 99cc1c46fb30..d90bf457e49c 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1338,7 +1338,7 @@ static int be_get_module_info(struct net_device *netdev, return -EOPNOTSUPP; status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, - page_data); + 0, PAGE_DATA_LEN, page_data); if (!status) { if (!page_data[SFP_PLUS_SFF_8472_COMP]) { modinfo->type = ETH_MODULE_SFF_8079; @@ -1356,25 +1356,32 @@ static int be_get_module_eeprom(struct net_device *netdev, { struct be_adapter *adapter = netdev_priv(netdev); int status; + u32 begin, end; if (!check_privilege(adapter, MAX_PRIVILEGES)) return -EOPNOTSUPP; - status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, - data); - if (status) - goto err; + begin = eeprom->offset; + end = eeprom->offset + eeprom->len; - if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) { - status = be_cmd_read_port_transceiver_data(adapter, - TR_PAGE_A2, - data + - PAGE_DATA_LEN); + if (begin < PAGE_DATA_LEN) { + status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin, + min_t(u32, end, PAGE_DATA_LEN) - begin, + data); + if (status) + goto err; + + data += PAGE_DATA_LEN - begin; + begin = PAGE_DATA_LEN; + } + + if (end > PAGE_DATA_LEN) { + status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A2, + begin - PAGE_DATA_LEN, + end - begin, data); if (status) goto err; } - if (eeprom->offset) - memcpy(data, data + eeprom->offset, eeprom->len); err: return be_cmd_status(status); } diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 5bc11d1bb9df..969af4dd6405 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1747,6 +1747,19 @@ cleanup_clk: return rc; } +static bool ftgmac100_has_child_node(struct device_node *np, const char *name) +{ + struct device_node *child_np = of_get_child_by_name(np, name); + bool ret = false; + + if (child_np) { + ret = true; + of_node_put(child_np); + } + + return ret; +} + static int ftgmac100_probe(struct platform_device *pdev) { struct resource *res; @@ -1860,7 +1873,7 @@ static int ftgmac100_probe(struct platform_device *pdev) /* Display what we found */ phy_attached_info(phy); - } else if (np && !of_get_child_by_name(np, "mdio")) { + } else if (np && !ftgmac100_has_child_node(np, "mdio")) { /* Support legacy ASPEED devicetree descriptions that decribe a * MAC with an embedded MDIO controller but have no "mdio" * child node. Automatically scan the MDIO bus for available @@ -1893,6 +1906,11 @@ static int ftgmac100_probe(struct platform_device *pdev) /* AST2400 doesn't have working HW checksum generation */ if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac"))) netdev->hw_features &= ~NETIF_F_HW_CSUM; + + /* AST2600 tx checksum with NCSI is broken */ + if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac")) + netdev->hw_features &= ~NETIF_F_HW_CSUM; + if (np && of_get_property(np, "no-hw-checksum", NULL)) netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM); netdev->features |= netdev->hw_features; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 1268996b7030..2f9075429c43 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -489,11 +489,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev, info->phc_index = -1; fman_node = of_get_parent(mac_node); - if (fman_node) + if (fman_node) { ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0); + of_node_put(fman_node); + } - if (ptp_node) + if (ptp_node) { ptp_dev = of_find_device_by_node(ptp_node); + of_node_put(ptp_node); + } if (ptp_dev) ptp = platform_get_drvdata(ptp_dev); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index d89ddc165ec2..35401202523e 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1349,8 +1349,8 @@ static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv, buf_array[i] = addr; /* tracing point */ - trace_dpaa2_eth_buf_seed(priv->net_dev, - page, DPAA2_ETH_RX_BUF_RAW_SIZE, + trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page), + DPAA2_ETH_RX_BUF_RAW_SIZE, addr, priv->rx_buf_size, bpid); } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c index 32b5faa87bb8..208a3459f2e2 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c @@ -168,7 +168,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) base = of_iomap(node, 0); if (!base) { err = -ENOMEM; - goto err_close; + goto err_put; } err = fsl_mc_allocate_irqs(mc_dev); @@ -212,6 +212,8 @@ err_free_mc_irq: fsl_mc_free_irqs(mc_dev); err_unmap: iounmap(base); +err_put: + of_node_put(node); err_close: dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); err_free_mcp: diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 15aa3b3c0089..975762ccb66f 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1212,7 +1212,7 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) /* enable Tx ints by setting pkt thr to 1 */ enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1); - tbmr = ENETC_TBMR_EN; + tbmr = ENETC_TBMR_EN | ENETC_TBMR_SET_PRIO(tx_ring->prio); if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) tbmr |= ENETC_TBMR_VIH; @@ -1241,7 +1241,12 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE); + /* Also prepare the consumer index in case page allocation never + * succeeds. In that case, hardware will never advance producer index + * to match consumer index, and will drop all frames. + */ enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); + enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1); /* enable Rx ints by setting pkt thr to 1 */ enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1); @@ -1267,13 +1272,14 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) static void enetc_setup_bdrs(struct enetc_ndev_priv *priv) { + struct enetc_hw *hw = &priv->si->hw; int i; for (i = 0; i < priv->num_tx_rings; i++) - enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]); + enetc_setup_txbdr(hw, priv->tx_ring[i]); for (i = 0; i < priv->num_rx_rings; i++) - enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]); + enetc_setup_rxbdr(hw, priv->rx_ring[i]); } static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) @@ -1306,13 +1312,14 @@ static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) static void enetc_clear_bdrs(struct enetc_ndev_priv *priv) { + struct enetc_hw *hw = &priv->si->hw; int i; for (i = 0; i < priv->num_tx_rings; i++) - enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]); + enetc_clear_txbdr(hw, priv->tx_ring[i]); for (i = 0; i < priv->num_rx_rings; i++) - enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]); + enetc_clear_rxbdr(hw, priv->rx_ring[i]); udelay(1); } @@ -1320,13 +1327,13 @@ static void enetc_clear_bdrs(struct enetc_ndev_priv *priv) static int enetc_setup_irqs(struct enetc_ndev_priv *priv) { struct pci_dev *pdev = priv->si->pdev; + struct enetc_hw *hw = &priv->si->hw; int i, j, err; for (i = 0; i < priv->bdr_int_num; i++) { int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); struct enetc_int_vector *v = priv->int_vector[i]; int entry = ENETC_BDR_INT_BASE_IDX + i; - struct enetc_hw *hw = &priv->si->hw; snprintf(v->name, sizeof(v->name), "%s-rxtx%d", priv->ndev->name, i); @@ -1414,13 +1421,14 @@ static void enetc_setup_interrupts(struct enetc_ndev_priv *priv) static void enetc_clear_interrupts(struct enetc_ndev_priv *priv) { + struct enetc_hw *hw = &priv->si->hw; int i; for (i = 0; i < priv->num_tx_rings; i++) - enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0); + enetc_txbdr_wr(hw, i, ENETC_TBIER, 0); for (i = 0; i < priv->num_rx_rings; i++) - enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0); + enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0); } static int enetc_phylink_connect(struct net_device *ndev) @@ -1560,6 +1568,7 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct tc_mqprio_qopt *mqprio = type_data; + struct enetc_hw *hw = &priv->si->hw; struct enetc_bdr *tx_ring; u8 num_tc; int i; @@ -1574,7 +1583,8 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) /* Reset all ring priorities to 0 */ for (i = 0; i < priv->num_tx_rings; i++) { tx_ring = priv->tx_ring[i]; - enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0); + tx_ring->prio = 0; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); } return 0; @@ -1593,7 +1603,8 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) */ for (i = 0; i < num_tc; i++) { tx_ring = priv->tx_ring[i]; - enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i); + tx_ring->prio = i; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); } /* Reset the number of netdev queues based on the TC count */ @@ -1671,52 +1682,29 @@ static int enetc_set_rss(struct net_device *ndev, int en) return 0; } -static int enetc_set_psfp(struct net_device *ndev, int en) -{ - struct enetc_ndev_priv *priv = netdev_priv(ndev); - int err; - - if (en) { - err = enetc_psfp_enable(priv); - if (err) - return err; - - priv->active_offloads |= ENETC_F_QCI; - return 0; - } - - err = enetc_psfp_disable(priv); - if (err) - return err; - - priv->active_offloads &= ~ENETC_F_QCI; - - return 0; -} - static void enetc_enable_rxvlan(struct net_device *ndev, bool en) { struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; int i; for (i = 0; i < priv->num_rx_rings; i++) - enetc_bdr_enable_rxvlan(&priv->si->hw, i, en); + enetc_bdr_enable_rxvlan(hw, i, en); } static void enetc_enable_txvlan(struct net_device *ndev, bool en) { struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; int i; for (i = 0; i < priv->num_tx_rings; i++) - enetc_bdr_enable_txvlan(&priv->si->hw, i, en); + enetc_bdr_enable_txvlan(hw, i, en); } -int enetc_set_features(struct net_device *ndev, - netdev_features_t features) +void enetc_set_features(struct net_device *ndev, netdev_features_t features) { netdev_features_t changed = ndev->features ^ features; - int err = 0; if (changed & NETIF_F_RXHASH) enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH)); @@ -1728,11 +1716,6 @@ int enetc_set_features(struct net_device *ndev, if (changed & NETIF_F_HW_VLAN_CTAG_TX) enetc_enable_txvlan(ndev, !!(features & NETIF_F_HW_VLAN_CTAG_TX)); - - if (changed & NETIF_F_HW_TC) - err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC)); - - return err; } #ifdef CONFIG_FSL_ENETC_PTP_CLOCK diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 15d19cbd5a95..725c3d1cbb19 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -58,6 +58,7 @@ struct enetc_bdr { void __iomem *rcir; }; u16 index; + u16 prio; int bd_count; /* # of BDs */ int next_to_use; int next_to_clean; @@ -301,8 +302,7 @@ void enetc_start(struct net_device *ndev); void enetc_stop(struct net_device *ndev); netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev); struct net_device_stats *enetc_get_stats(struct net_device *ndev); -int enetc_set_features(struct net_device *ndev, - netdev_features_t features); +void enetc_set_features(struct net_device *ndev, netdev_features_t features); int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd); int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, void *type_data); @@ -335,22 +335,24 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data); int enetc_psfp_init(struct enetc_ndev_priv *priv); int enetc_psfp_clean(struct enetc_ndev_priv *priv); +int enetc_set_psfp(struct net_device *ndev, bool en); static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv) { + struct enetc_hw *hw = &priv->si->hw; u32 reg; - reg = enetc_port_rd(&priv->si->hw, ENETC_PSIDCAPR); + reg = enetc_port_rd(hw, ENETC_PSIDCAPR); priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK; /* Port stream filter capability */ - reg = enetc_port_rd(&priv->si->hw, ENETC_PSFCAPR); + reg = enetc_port_rd(hw, ENETC_PSFCAPR); priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK; /* Port stream gate capability */ - reg = enetc_port_rd(&priv->si->hw, ENETC_PSGCAPR); + reg = enetc_port_rd(hw, ENETC_PSGCAPR); priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK); priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16; /* Port flow meter capability */ - reg = enetc_port_rd(&priv->si->hw, ENETC_PFMCAPR); + reg = enetc_port_rd(hw, ENETC_PFMCAPR); priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK; } @@ -410,4 +412,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv) { return 0; } + +static inline int enetc_set_psfp(struct net_device *ndev, bool en) +{ + return 0; +} #endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 716b396bf094..515db7e6e649 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -671,6 +671,13 @@ static int enetc_pf_set_features(struct net_device *ndev, { netdev_features_t changed = ndev->features ^ features; struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + if (changed & NETIF_F_HW_TC) { + err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC)); + if (err) + return err; + } if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { struct enetc_pf *pf = enetc_si_priv(priv->si); @@ -684,7 +691,9 @@ static int enetc_pf_set_features(struct net_device *ndev, if (changed & NETIF_F_LOOPBACK) enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK)); - return enetc_set_features(ndev, features); + enetc_set_features(ndev, features); + + return 0; } static const struct net_device_ops enetc_ndev_ops = { @@ -739,9 +748,6 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->priv_flags |= IFF_UNICAST_FLT; - if (si->hw_features & ENETC_SI_F_QBV) - priv->active_offloads |= ENETC_F_QBV; - if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) { priv->active_offloads |= ENETC_F_QCI; ndev->features |= NETIF_F_HW_TC; @@ -987,7 +993,8 @@ static void enetc_pl_mac_link_up(struct phylink_config *config, struct enetc_ndev_priv *priv; priv = netdev_priv(pf->si->ndev); - if (priv->active_offloads & ENETC_F_QBV) + + if (pf->si->hw_features & ENETC_SI_F_QBV) enetc_sched_speed_set(priv, speed); if (!phylink_autoneg_inband(mode) && diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 9e6988fd3787..5841721c8119 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -17,8 +17,9 @@ static u16 enetc_get_max_gcl_len(struct enetc_hw *hw) void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed) { + struct enetc_hw *hw = &priv->si->hw; u32 old_speed = priv->speed; - u32 pspeed; + u32 pspeed, tmp; if (speed == old_speed) return; @@ -39,16 +40,15 @@ void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed) } priv->speed = speed; - enetc_port_wr(&priv->si->hw, ENETC_PMR, - (enetc_port_rd(&priv->si->hw, ENETC_PMR) - & (~ENETC_PMR_PSPEED_MASK)) - | pspeed); + tmp = enetc_port_rd(hw, ENETC_PMR); + enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed); } static int enetc_setup_taprio(struct net_device *ndev, struct tc_taprio_qopt_offload *admin_conf) { struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; struct enetc_cbd cbd = {.cmd = 0}; struct tgs_gcl_conf *gcl_config; struct tgs_gcl_data *gcl_data; @@ -60,15 +60,16 @@ static int enetc_setup_taprio(struct net_device *ndev, int err; int i; - if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw)) + if (admin_conf->num_entries > enetc_get_max_gcl_len(hw)) return -EINVAL; gcl_len = admin_conf->num_entries; - tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET); + tge = enetc_rd(hw, ENETC_QBV_PTGCR_OFFSET); if (!admin_conf->enable) { - enetc_wr(&priv->si->hw, - ENETC_QBV_PTGCR_OFFSET, - tge & (~ENETC_QBV_TGE)); + enetc_wr(hw, ENETC_QBV_PTGCR_OFFSET, tge & ~ENETC_QBV_TGE); + + priv->active_offloads &= ~ENETC_F_QBV; + return 0; } @@ -123,18 +124,18 @@ static int enetc_setup_taprio(struct net_device *ndev, cbd.cls = BDCR_CMD_PORT_GCL; cbd.status_flags = 0; - enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET, - tge | ENETC_QBV_TGE); + enetc_wr(hw, ENETC_QBV_PTGCR_OFFSET, tge | ENETC_QBV_TGE); err = enetc_send_cmd(priv->si, &cbd); if (err) - enetc_wr(&priv->si->hw, - ENETC_QBV_PTGCR_OFFSET, - tge & (~ENETC_QBV_TGE)); + enetc_wr(hw, ENETC_QBV_PTGCR_OFFSET, tge & ~ENETC_QBV_TGE); dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE); kfree(gcl_data); + if (!err) + priv->active_offloads |= ENETC_F_QBV; + return err; } @@ -142,6 +143,8 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) { struct tc_taprio_qopt_offload *taprio = type_data; struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + struct enetc_bdr *tx_ring; int err; int i; @@ -150,18 +153,20 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) if (priv->tx_ring[i]->tsd_enable) return -EBUSY; - for (i = 0; i < priv->num_tx_rings; i++) - enetc_set_bdr_prio(&priv->si->hw, - priv->tx_ring[i]->index, - taprio->enable ? i : 0); + for (i = 0; i < priv->num_tx_rings; i++) { + tx_ring = priv->tx_ring[i]; + tx_ring->prio = taprio->enable ? i : 0; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); + } err = enetc_setup_taprio(ndev, taprio); - - if (err) - for (i = 0; i < priv->num_tx_rings; i++) - enetc_set_bdr_prio(&priv->si->hw, - priv->tx_ring[i]->index, - taprio->enable ? 0 : i); + if (err) { + for (i = 0; i < priv->num_tx_rings; i++) { + tx_ring = priv->tx_ring[i]; + tx_ring->prio = taprio->enable ? 0 : i; + enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); + } + } return err; } @@ -182,7 +187,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) struct tc_cbs_qopt_offload *cbs = type_data; u32 port_transmit_rate = priv->speed; u8 tc_nums = netdev_get_num_tc(ndev); - struct enetc_si *si = priv->si; + struct enetc_hw *hw = &priv->si->hw; u32 hi_credit_bit, hi_credit_reg; u32 max_interference_size; u32 port_frame_max_size; @@ -203,15 +208,15 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) * lower than this TC have been disabled. */ if (tc == prio_top && - enetc_get_cbs_enable(&si->hw, prio_next)) { + enetc_get_cbs_enable(hw, prio_next)) { dev_err(&ndev->dev, "Disable TC%d before disable TC%d\n", prio_next, tc); return -EINVAL; } - enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0); - enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0); + enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0); + enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0); return 0; } @@ -228,13 +233,13 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) * higher than this TC have been enabled. */ if (tc == prio_next) { - if (!enetc_get_cbs_enable(&si->hw, prio_top)) { + if (!enetc_get_cbs_enable(hw, prio_top)) { dev_err(&ndev->dev, "Enable TC%d first before enable TC%d\n", prio_top, prio_next); return -EINVAL; } - bw_sum += enetc_get_cbs_bw(&si->hw, prio_top); + bw_sum += enetc_get_cbs_bw(hw, prio_top); } if (bw_sum + bw >= 100) { @@ -243,7 +248,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) return -EINVAL; } - enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc)); + enetc_port_rd(hw, ENETC_PTCMSDUR(tc)); /* For top prio TC, the max_interfrence_size is maxSizedFrame. * @@ -263,8 +268,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) u32 m0, ma, r0, ra; m0 = port_frame_max_size * 8; - ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8; - ra = enetc_get_cbs_bw(&si->hw, prio_top) * + ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8; + ra = enetc_get_cbs_bw(hw, prio_top) * port_transmit_rate * 10000ULL; r0 = port_transmit_rate * 1000000ULL; max_interference_size = m0 + ma + @@ -284,10 +289,10 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data) hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit, port_transmit_rate * 1000000ULL); - enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg); + enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg); /* Set bw register and enable this traffic class */ - enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE); + enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE); return 0; } @@ -297,6 +302,7 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data) struct enetc_ndev_priv *priv = netdev_priv(ndev); struct tc_etf_qopt_offload *qopt = type_data; u8 tc_nums = netdev_get_num_tc(ndev); + struct enetc_hw *hw = &priv->si->hw; int tc; if (!tc_nums) @@ -312,12 +318,11 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data) return -EBUSY; /* TSD and Qbv are mutually exclusive in hardware */ - if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE) + if (enetc_rd(hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE) return -EBUSY; priv->tx_ring[tc]->tsd_enable = qopt->enable; - enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc), - qopt->enable ? ENETC_TSDE : 0); + enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0); return 0; } @@ -1525,6 +1530,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } } +int enetc_set_psfp(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + if (en) { + err = enetc_psfp_enable(priv); + if (err) + return err; + + priv->active_offloads |= ENETC_F_QCI; + return 0; + } + + err = enetc_psfp_disable(priv); + if (err) + return err; + + priv->active_offloads &= ~ENETC_F_QCI; + + return 0; +} + int enetc_psfp_init(struct enetc_ndev_priv *priv) { if (epsfp.psfp_sfi_bitmap) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index 33c125735db7..5ce3e2593bdd 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -88,7 +88,9 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr) static int enetc_vf_set_features(struct net_device *ndev, netdev_features_t features) { - return enetc_set_features(ndev, features); + enetc_set_features(ndev, features); + + return 0; } /* Probing/ Init */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 166bc3f3b34c..686bb873125c 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -623,7 +623,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "Tx DMA memory map failed\n"); - return NETDEV_TX_BUSY; + return NETDEV_TX_OK; } bdp->cbd_datlen = cpu_to_fec16(size); @@ -685,7 +685,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "Tx DMA memory map failed\n"); - return NETDEV_TX_BUSY; + return NETDEV_TX_OK; } } @@ -2251,6 +2251,31 @@ static u32 fec_enet_register_offset[] = { IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, IEEE_R_FDXFC, IEEE_R_OCTETS_OK }; +/* for i.MX6ul */ +static u32 fec_enet_register_offset_6ul[] = { + FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, + FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, + FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0, + FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, + FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0, + FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, + FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, + RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, + RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, + RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, + RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, + RMON_T_P_GTE2048, RMON_T_OCTETS, + IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, + IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, + IEEE_T_FDXFC, IEEE_T_OCTETS_OK, + RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, + RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, + RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, + RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, + RMON_R_P_GTE2048, RMON_R_OCTETS, + IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, + IEEE_R_FDXFC, IEEE_R_OCTETS_OK +}; #else static __u32 fec_enet_register_version = 1; static u32 fec_enet_register_offset[] = { @@ -2275,7 +2300,24 @@ static void fec_enet_get_regs(struct net_device *ndev, u32 *buf = (u32 *)regbuf; u32 i, off; int ret; +#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ + defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) + u32 *reg_list; + u32 reg_cnt; + if (!of_machine_is_compatible("fsl,imx6ul")) { + reg_list = fec_enet_register_offset; + reg_cnt = ARRAY_SIZE(fec_enet_register_offset); + } else { + reg_list = fec_enet_register_offset_6ul; + reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul); + } +#else + /* coldfire */ + static u32 *reg_list = fec_enet_register_offset; + static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); +#endif ret = pm_runtime_resume_and_get(dev); if (ret < 0) return; @@ -2284,8 +2326,8 @@ static void fec_enet_get_regs(struct net_device *ndev, memset(buf, 0, regs->len); - for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { - off = fec_enet_register_offset[i]; + for (i = 0; i < reg_cnt; i++) { + off = reg_list[i]; if ((off == FEC_R_BOUND || off == FEC_R_FSTART) && !(fep->quirks & FEC_QUIRK_HAS_FRREG)) @@ -3529,7 +3571,7 @@ static int fec_enet_init_stop_mode(struct fec_enet_private *fep, ARRAY_SIZE(out_val)); if (ret) { dev_dbg(&fep->pdev->dev, "no stop mode property\n"); - return ret; + goto out; } fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index d71eac7e1924..c5ae67300590 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -136,11 +136,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds * to current timer would be next second. */ - tempval = readl(fep->hwp + FEC_ATIME_CTRL); - tempval |= FEC_T_CTRL_CAPTURE; - writel(tempval, fep->hwp + FEC_ATIME_CTRL); - - tempval = readl(fep->hwp + FEC_ATIME); + tempval = fep->cc.read(&fep->cc); /* Convert the ptp local counter to 1588 timestamp */ ns = timecounter_cyc2time(&fep->tc, tempval); ts = ns_to_timespec64(ns); diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 6eeccc11b76e..3312dc4083a0 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -884,12 +884,21 @@ _return: return err; } +static int mac_remove(struct platform_device *pdev) +{ + struct mac_device *mac_dev = platform_get_drvdata(pdev); + + platform_device_unregister(mac_dev->priv->eth_dev); + return 0; +} + static struct platform_driver mac_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = mac_match, }, .probe = mac_probe, + .remove = mac_remove, }; builtin_platform_driver(mac_driver); diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index 99fe2c210d0f..61f4b6e50d29 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -98,7 +98,7 @@ static int do_pd_setup(struct fs_enet_private *fep) return -EINVAL; fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0); - if (!fep->fcc.fccp) + if (!fep->fec.fecp) return -EINVAL; return 0; diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index 7b44769bd87c..c53a04313944 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -172,14 +172,14 @@ gve_get_ethtool_stats(struct net_device *netdev, struct gve_rx_ring *rx = &priv->rx[ring]; start = - u64_stats_fetch_begin(&priv->rx[ring].statss); + u64_stats_fetch_begin_irq(&priv->rx[ring].statss); tmp_rx_pkts = rx->rpackets; tmp_rx_bytes = rx->rbytes; tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; tmp_rx_desc_err_dropped_pkt = rx->rx_desc_err_dropped_pkt; - } while (u64_stats_fetch_retry(&priv->rx[ring].statss, + } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss, start)); rx_pkts += tmp_rx_pkts; rx_bytes += tmp_rx_bytes; @@ -193,10 +193,10 @@ gve_get_ethtool_stats(struct net_device *netdev, if (priv->tx) { do { start = - u64_stats_fetch_begin(&priv->tx[ring].statss); + u64_stats_fetch_begin_irq(&priv->tx[ring].statss); tmp_tx_pkts = priv->tx[ring].pkt_done; tmp_tx_bytes = priv->tx[ring].bytes_done; - } while (u64_stats_fetch_retry(&priv->tx[ring].statss, + } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss, start)); tx_pkts += tmp_tx_pkts; tx_bytes += tmp_tx_bytes; @@ -254,13 +254,13 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = rx->cnt; do { start = - u64_stats_fetch_begin(&priv->rx[ring].statss); + u64_stats_fetch_begin_irq(&priv->rx[ring].statss); tmp_rx_bytes = rx->rbytes; tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; tmp_rx_desc_err_dropped_pkt = rx->rx_desc_err_dropped_pkt; - } while (u64_stats_fetch_retry(&priv->rx[ring].statss, + } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss, start)); data[i++] = tmp_rx_bytes; /* rx dropped packets */ @@ -313,9 +313,9 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = tx->done; do { start = - u64_stats_fetch_begin(&priv->tx[ring].statss); + u64_stats_fetch_begin_irq(&priv->tx[ring].statss); tmp_tx_bytes = tx->bytes_done; - } while (u64_stats_fetch_retry(&priv->tx[ring].statss, + } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss, start)); data[i++] = tmp_tx_bytes; data[i++] = tx->wake_queue; diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 6cb75bb1ed05..f0c1e6c80b61 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -40,10 +40,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s) for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { do { start = - u64_stats_fetch_begin(&priv->rx[ring].statss); + u64_stats_fetch_begin_irq(&priv->rx[ring].statss); packets = priv->rx[ring].rpackets; bytes = priv->rx[ring].rbytes; - } while (u64_stats_fetch_retry(&priv->rx[ring].statss, + } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss, start)); s->rx_packets += packets; s->rx_bytes += bytes; @@ -53,10 +53,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s) for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) { do { start = - u64_stats_fetch_begin(&priv->tx[ring].statss); + u64_stats_fetch_begin_irq(&priv->tx[ring].statss); packets = priv->tx[ring].pkt_done; bytes = priv->tx[ring].bytes_done; - } while (u64_stats_fetch_retry(&priv->tx[ring].statss, + } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss, start)); s->tx_packets += packets; s->tx_bytes += bytes; @@ -1041,9 +1041,9 @@ void gve_handle_report_stats(struct gve_priv *priv) if (priv->tx) { for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { do { - start = u64_stats_fetch_begin(&priv->tx[idx].statss); + start = u64_stats_fetch_begin_irq(&priv->tx[idx].statss); tx_bytes = priv->tx[idx].bytes_done; - } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start)); + } while (u64_stats_fetch_retry_irq(&priv->tx[idx].statss, start)); stats[stats_idx++] = (struct stats) { .stat_name = cpu_to_be32(TX_WAKE_CNT), .value = cpu_to_be64(priv->tx[idx].wake_queue), diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 57c3bc4f7089..c16dfd869363 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -283,7 +283,7 @@ static int hisi_femac_rx(struct net_device *dev, int limit) skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(&priv->napi, skb); dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + dev->stats.rx_bytes += len; next: pos = (pos + 1) % rxq->num; if (rx_pkts_num >= limit) diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 8b2bf85039f1..43f3146caf07 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -550,7 +550,7 @@ static int hix5hd2_rx(struct net_device *dev, int limit) skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(&priv->napi, skb); dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + dev->stats.rx_bytes += len; next: pos = dma_ring_incr(pos, RX_DESC_NUM); } diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 00fafc0f8512..430eccea8e5e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -419,8 +419,10 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner) hdev->cls_dev.release = hnae_release; (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id); ret = device_register(&hdev->cls_dev); - if (ret) + if (ret) { + put_device(&hdev->cls_dev); return ret; + } __module_get(THIS_MODULE); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 5d3996767256..51b7b46f2f30 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -91,6 +91,13 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, enum hclge_cmd_status status; struct hclge_desc desc; + if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) { + dev_err(&hdev->pdev->dev, + "msg data length(=%u) exceeds maximum(=%u)\n", + msg_len, HCLGE_MBX_MAX_MSG_SIZE); + return -EMSGSIZE; + } + resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); @@ -173,7 +180,7 @@ static int hclge_get_ring_chain_from_mbx( ring_num = req->msg.ring_num; if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM) - return -ENOMEM; + return -EINVAL; for (i = 0; i < ring_num; i++) { if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { @@ -577,9 +584,9 @@ static int hclge_set_vf_mtu(struct hclge_vport *vport, return hclge_set_vport_mtu(vport, mtu); } -static void hclge_get_queue_id_in_pf(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - struct hclge_respond_to_vf_msg *resp_msg) +static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) { struct hnae3_handle *handle = &vport->nic; struct hclge_dev *hdev = vport->back; @@ -589,17 +596,18 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport, if (queue_id >= handle->kinfo.num_tqps) { dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n", queue_id, mbx_req->mbx_src_vfid); - return; + return -EINVAL; } qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf)); resp_msg->len = sizeof(qid_in_pf); + return 0; } -static void hclge_get_rss_key(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - struct hclge_respond_to_vf_msg *resp_msg) +static int hclge_get_rss_key(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) { #define HCLGE_RSS_MBX_RESP_LEN 8 struct hclge_dev *hdev = vport->back; @@ -615,13 +623,14 @@ static void hclge_get_rss_key(struct hclge_vport *vport, dev_warn(&hdev->pdev->dev, "failed to get the rss hash key, the index(%u) invalid !\n", index); - return; + return -EINVAL; } memcpy(resp_msg->data, &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], HCLGE_RSS_MBX_RESP_LEN); resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; + return 0; } static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) @@ -800,10 +809,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev) "VF fail(%d) to set mtu\n", ret); break; case HCLGE_MBX_GET_QID_IN_PF: - hclge_get_queue_id_in_pf(vport, req, &resp_msg); + ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg); break; case HCLGE_MBX_GET_RSS_KEY: - hclge_get_rss_key(vport, req, &resp_msg); + ret = hclge_get_rss_key(vport, req, &resp_msg); break; case HCLGE_MBX_GET_LINK_MODE: hclge_get_link_mode(vport, req); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c index 19eb839177ec..061952c6c21a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c @@ -85,6 +85,7 @@ static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx) struct tag_sml_funcfg_tbl *funcfg_table_elem; struct hinic_cmd_lt_rd *read_data; u16 out_size = sizeof(*read_data); + int ret = ~0; int err; read_data = kzalloc(sizeof(*read_data), GFP_KERNEL); @@ -111,20 +112,25 @@ static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx) switch (idx) { case VALID: - return funcfg_table_elem->dw0.bs.valid; + ret = funcfg_table_elem->dw0.bs.valid; + break; case RX_MODE: - return funcfg_table_elem->dw0.bs.nic_rx_mode; + ret = funcfg_table_elem->dw0.bs.nic_rx_mode; + break; case MTU: - return funcfg_table_elem->dw1.bs.mtu; + ret = funcfg_table_elem->dw1.bs.mtu; + break; case RQ_DEPTH: - return funcfg_table_elem->dw13.bs.cfg_rq_depth; + ret = funcfg_table_elem->dw13.bs.cfg_rq_depth; + break; case QUEUE_NUM: - return funcfg_table_elem->dw13.bs.cfg_q_num; + ret = funcfg_table_elem->dw13.bs.cfg_q_num; + break; } kfree(read_data); - return ~0; + return ret; } static ssize_t hinic_dbg_cmd_read(struct file *filp, char __user *buffer, size_t count, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h index fb3e89141a0d..a4fbf44f944c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h @@ -95,9 +95,6 @@ struct hinic_dev { u16 sq_depth; u16 rq_depth; - struct hinic_txq_stats tx_stats; - struct hinic_rxq_stats rx_stats; - u8 rss_tmpl_idx; u8 rss_hash_engine; u16 num_rss; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c index 4e4029d5c8e1..9553d280ec1b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c @@ -818,7 +818,6 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain, { struct hinic_hwif *hwif = attr->hwif; struct pci_dev *pdev = hwif->pdev; - size_t cell_ctxt_size; chain->hwif = hwif; chain->chain_type = attr->chain_type; @@ -830,8 +829,8 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain, sema_init(&chain->sem, 1); - cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); - chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL); + chain->cell_ctxt = devm_kcalloc(&pdev->dev, chain->num_cells, + sizeof(*chain->cell_ctxt), GFP_KERNEL); if (!chain->cell_ctxt) return -ENOMEM; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c index 5a6bbee819cd..dff979f5d08b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -796,11 +796,10 @@ static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev, struct hinic_cmdq_ctxt *cmdq_ctxts; struct pci_dev *pdev = hwif->pdev; struct hinic_pfhwdev *pfhwdev; - size_t cmdq_ctxts_size; int err; - cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts); - cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL); + cmdq_ctxts = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES, + sizeof(*cmdq_ctxts), GFP_KERNEL); if (!cmdq_ctxts) return -ENOMEM; @@ -884,7 +883,6 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs); struct pci_dev *pdev = hwif->pdev; struct hinic_hwdev *hwdev; - size_t saved_wqs_size; u16 max_wqe_size; int err; @@ -895,8 +893,8 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, if (!cmdqs->cmdq_buf_pool) return -ENOMEM; - saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq); - cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL); + cmdqs->saved_wqs = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES, + sizeof(*cmdqs->saved_wqs), GFP_KERNEL); if (!cmdqs->saved_wqs) { err = -ENOMEM; goto err_saved_wqs; @@ -931,7 +929,7 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, err_set_cmdq_depth: hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ); - + free_cmdq(&cmdqs->cmdq[HINIC_CMDQ_SYNC]); err_cmdq_ctxt: hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, HINIC_MAX_CMDQ_TYPES); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 0c74f6674634..bcf2476512a5 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -162,7 +162,6 @@ static int init_msix(struct hinic_hwdev *hwdev) struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; int nr_irqs, num_aeqs, num_ceqs; - size_t msix_entries_size; int i, err; num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); @@ -171,8 +170,8 @@ static int init_msix(struct hinic_hwdev *hwdev) if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif)) nr_irqs = HINIC_HWIF_NUM_IRQS(hwif); - msix_entries_size = nr_irqs * sizeof(*hwdev->msix_entries); - hwdev->msix_entries = devm_kzalloc(&pdev->dev, msix_entries_size, + hwdev->msix_entries = devm_kcalloc(&pdev->dev, nr_irqs, + sizeof(*hwdev->msix_entries), GFP_KERNEL); if (!hwdev->msix_entries) return -ENOMEM; @@ -893,7 +892,7 @@ int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev, if (err) return -EINVAL; - interrupt_info->lli_credit_cnt = temp_info.lli_timer_cnt; + interrupt_info->lli_credit_cnt = temp_info.lli_credit_cnt; interrupt_info->lli_timer_cnt = temp_info.lli_timer_cnt; err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c index 19942fef99d9..7396158df64f 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c @@ -631,16 +631,15 @@ static int alloc_eq_pages(struct hinic_eq *eq) struct hinic_hwif *hwif = eq->hwif; struct pci_dev *pdev = hwif->pdev; u32 init_val, addr, val; - size_t addr_size; int err, pg; - addr_size = eq->num_pages * sizeof(*eq->dma_addr); - eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL); + eq->dma_addr = devm_kcalloc(&pdev->dev, eq->num_pages, + sizeof(*eq->dma_addr), GFP_KERNEL); if (!eq->dma_addr) return -ENOMEM; - addr_size = eq->num_pages * sizeof(*eq->virt_addr); - eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL); + eq->virt_addr = devm_kcalloc(&pdev->dev, eq->num_pages, + sizeof(*eq->virt_addr), GFP_KERNEL); if (!eq->virt_addr) { err = -ENOMEM; goto err_virt_addr_alloc; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index 819fa13034c0..027dcc453506 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -647,6 +647,7 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, err = alloc_msg_buf(pf_to_mgmt); if (err) { dev_err(&pdev->dev, "Failed to allocate msg buffers\n"); + destroy_workqueue(pf_to_mgmt->workq); hinic_health_reporters_destroy(hwdev->devlink_dev); return err; } @@ -654,6 +655,7 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif); if (err) { dev_err(&pdev->dev, "Failed to initialize cmd chains\n"); + destroy_workqueue(pf_to_mgmt->workq); hinic_health_reporters_destroy(hwdev->devlink_dev); return err; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c index 5dc3743f8091..f930cd6a75f7 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c @@ -192,20 +192,20 @@ static int alloc_page_arrays(struct hinic_wqs *wqs) { struct hinic_hwif *hwif = wqs->hwif; struct pci_dev *pdev = hwif->pdev; - size_t size; - size = wqs->num_pages * sizeof(*wqs->page_paddr); - wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + wqs->page_paddr = devm_kcalloc(&pdev->dev, wqs->num_pages, + sizeof(*wqs->page_paddr), GFP_KERNEL); if (!wqs->page_paddr) return -ENOMEM; - size = wqs->num_pages * sizeof(*wqs->page_vaddr); - wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + wqs->page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages, + sizeof(*wqs->page_vaddr), GFP_KERNEL); if (!wqs->page_vaddr) goto err_page_vaddr; - size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr); - wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + wqs->shadow_page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages, + sizeof(*wqs->shadow_page_vaddr), + GFP_KERNEL); if (!wqs->shadow_page_vaddr) goto err_page_shadow_vaddr; @@ -378,15 +378,14 @@ static int alloc_wqes_shadow(struct hinic_wq *wq) { struct hinic_hwif *hwif = wq->hwif; struct pci_dev *pdev = hwif->pdev; - size_t size; - size = wq->num_q_pages * wq->max_wqe_size; - wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + wq->shadow_wqe = devm_kcalloc(&pdev->dev, wq->num_q_pages, + wq->max_wqe_size, GFP_KERNEL); if (!wq->shadow_wqe) return -ENOMEM; - size = wq->num_q_pages * sizeof(wq->prod_idx); - wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages, + sizeof(*wq->shadow_idx), GFP_KERNEL); if (!wq->shadow_idx) goto err_shadow_idx; @@ -771,7 +770,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, /* If we only have one page, still need to get shadown wqe when * wqe rolling-over page */ - if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) { + if (curr_pg != end_pg || end_prod_idx < *prod_idx) { void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx); @@ -841,7 +840,10 @@ struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, *cons_idx = curr_cons_idx; - if (curr_pg != end_pg) { + /* If we only have one page, still need to get shadown wqe when + * wqe rolling-over page + */ + if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) { void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 350225bbe0be..6ec042d48cd1 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -62,8 +62,6 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)"); #define HINIC_LRO_RX_TIMER_DEFAULT 16 -#define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8) - #define work_to_rx_mode_work(work) \ container_of(work, struct hinic_rx_mode_work, work) @@ -82,56 +80,44 @@ static int set_features(struct hinic_dev *nic_dev, netdev_features_t pre_features, netdev_features_t features, bool force_change); -static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq) +static void gather_rx_stats(struct hinic_rxq_stats *nic_rx_stats, struct hinic_rxq *rxq) { - struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats; struct hinic_rxq_stats rx_stats; - u64_stats_init(&rx_stats.syncp); - hinic_rxq_get_stats(rxq, &rx_stats); - u64_stats_update_begin(&nic_rx_stats->syncp); nic_rx_stats->bytes += rx_stats.bytes; nic_rx_stats->pkts += rx_stats.pkts; nic_rx_stats->errors += rx_stats.errors; nic_rx_stats->csum_errors += rx_stats.csum_errors; nic_rx_stats->other_errors += rx_stats.other_errors; - u64_stats_update_end(&nic_rx_stats->syncp); - - hinic_rxq_clean_stats(rxq); } -static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq) +static void gather_tx_stats(struct hinic_txq_stats *nic_tx_stats, struct hinic_txq *txq) { - struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats; struct hinic_txq_stats tx_stats; - u64_stats_init(&tx_stats.syncp); - hinic_txq_get_stats(txq, &tx_stats); - u64_stats_update_begin(&nic_tx_stats->syncp); nic_tx_stats->bytes += tx_stats.bytes; nic_tx_stats->pkts += tx_stats.pkts; nic_tx_stats->tx_busy += tx_stats.tx_busy; nic_tx_stats->tx_wake += tx_stats.tx_wake; nic_tx_stats->tx_dropped += tx_stats.tx_dropped; nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts; - u64_stats_update_end(&nic_tx_stats->syncp); - - hinic_txq_clean_stats(txq); } -static void update_nic_stats(struct hinic_dev *nic_dev) +static void gather_nic_stats(struct hinic_dev *nic_dev, + struct hinic_rxq_stats *nic_rx_stats, + struct hinic_txq_stats *nic_tx_stats) { int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev); for (i = 0; i < num_qps; i++) - update_rx_stats(nic_dev, &nic_dev->rxqs[i]); + gather_rx_stats(nic_rx_stats, &nic_dev->rxqs[i]); for (i = 0; i < num_qps; i++) - update_tx_stats(nic_dev, &nic_dev->txqs[i]); + gather_tx_stats(nic_tx_stats, &nic_dev->txqs[i]); } /** @@ -144,13 +130,12 @@ static int create_txqs(struct hinic_dev *nic_dev) { int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); struct net_device *netdev = nic_dev->netdev; - size_t txq_size; if (nic_dev->txqs) return -EINVAL; - txq_size = num_txqs * sizeof(*nic_dev->txqs); - nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL); + nic_dev->txqs = devm_kcalloc(&netdev->dev, num_txqs, + sizeof(*nic_dev->txqs), GFP_KERNEL); if (!nic_dev->txqs) return -ENOMEM; @@ -242,13 +227,12 @@ static int create_rxqs(struct hinic_dev *nic_dev) { int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); struct net_device *netdev = nic_dev->netdev; - size_t rxq_size; if (nic_dev->rxqs) return -EINVAL; - rxq_size = num_rxqs * sizeof(*nic_dev->rxqs); - nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL); + nic_dev->rxqs = devm_kcalloc(&netdev->dev, num_rxqs, + sizeof(*nic_dev->rxqs), GFP_KERNEL); if (!nic_dev->rxqs) return -ENOMEM; @@ -569,8 +553,6 @@ int hinic_close(struct net_device *netdev) netif_carrier_off(netdev); netif_tx_disable(netdev); - update_nic_stats(nic_dev); - up(&nic_dev->mgmt_lock); if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) @@ -864,26 +846,19 @@ static void hinic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct hinic_dev *nic_dev = netdev_priv(netdev); - struct hinic_rxq_stats *nic_rx_stats; - struct hinic_txq_stats *nic_tx_stats; - - nic_rx_stats = &nic_dev->rx_stats; - nic_tx_stats = &nic_dev->tx_stats; - - down(&nic_dev->mgmt_lock); + struct hinic_rxq_stats nic_rx_stats = {}; + struct hinic_txq_stats nic_tx_stats = {}; if (nic_dev->flags & HINIC_INTF_UP) - update_nic_stats(nic_dev); + gather_nic_stats(nic_dev, &nic_rx_stats, &nic_tx_stats); - up(&nic_dev->mgmt_lock); + stats->rx_bytes = nic_rx_stats.bytes; + stats->rx_packets = nic_rx_stats.pkts; + stats->rx_errors = nic_rx_stats.errors; - stats->rx_bytes = nic_rx_stats->bytes; - stats->rx_packets = nic_rx_stats->pkts; - stats->rx_errors = nic_rx_stats->errors; - - stats->tx_bytes = nic_tx_stats->bytes; - stats->tx_packets = nic_tx_stats->pkts; - stats->tx_errors = nic_tx_stats->tx_dropped; + stats->tx_bytes = nic_tx_stats.bytes; + stats->tx_packets = nic_tx_stats.pkts; + stats->tx_errors = nic_tx_stats.tx_dropped; } static int hinic_set_features(struct net_device *netdev, @@ -1182,8 +1157,6 @@ static void hinic_free_intr_coalesce(struct hinic_dev *nic_dev) static int nic_dev_init(struct pci_dev *pdev) { struct hinic_rx_mode_work *rx_mode_work; - struct hinic_txq_stats *tx_stats; - struct hinic_rxq_stats *rx_stats; struct hinic_dev *nic_dev; struct net_device *netdev; struct hinic_hwdev *hwdev; @@ -1244,15 +1217,8 @@ static int nic_dev_init(struct pci_dev *pdev) sema_init(&nic_dev->mgmt_lock, 1); - tx_stats = &nic_dev->tx_stats; - rx_stats = &nic_dev->rx_stats; - - u64_stats_init(&tx_stats->syncp); - u64_stats_init(&rx_stats->syncp); - - nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev, - VLAN_BITMAP_SIZE(nic_dev), - GFP_KERNEL); + nic_dev->vlan_bitmap = devm_bitmap_zalloc(&pdev->dev, VLAN_N_VID, + GFP_KERNEL); if (!nic_dev->vlan_bitmap) { err = -ENOMEM; goto err_vlan_bitmap; @@ -1536,8 +1502,15 @@ static struct pci_driver hinic_driver = { static int __init hinic_module_init(void) { + int ret; + hinic_dbg_register_debugfs(HINIC_DRV_NAME); - return pci_register_driver(&hinic_driver); + + ret = pci_register_driver(&hinic_driver); + if (ret) + hinic_dbg_unregister_debugfs(); + + return ret; } static void __exit hinic_module_exit(void) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index 070a7cc6392e..30ab05289e8d 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -73,17 +73,15 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats) struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; unsigned int start; - u64_stats_update_begin(&stats->syncp); do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); + start = u64_stats_fetch_begin_irq(&rxq_stats->syncp); stats->pkts = rxq_stats->pkts; stats->bytes = rxq_stats->bytes; stats->errors = rxq_stats->csum_errors + rxq_stats->other_errors; stats->csum_errors = rxq_stats->csum_errors; stats->other_errors = rxq_stats->other_errors; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); - u64_stats_update_end(&stats->syncp); + } while (u64_stats_fetch_retry_irq(&rxq_stats->syncp, start)); } /** diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c index f8a26459ff65..4d82ebfe27f9 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c @@ -1178,7 +1178,6 @@ int hinic_vf_func_init(struct hinic_hwdev *hwdev) dev_err(&hwdev->hwif->pdev->dev, "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n", err, register_info.status, out_size); - hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC); return -EIO; } } else { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 8da7d46363b2..c12d814ac94a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -97,17 +97,15 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) struct hinic_txq_stats *txq_stats = &txq->txq_stats; unsigned int start; - u64_stats_update_begin(&stats->syncp); do { - start = u64_stats_fetch_begin(&txq_stats->syncp); + start = u64_stats_fetch_begin_irq(&txq_stats->syncp); stats->pkts = txq_stats->pkts; stats->bytes = txq_stats->bytes; stats->tx_busy = txq_stats->tx_busy; stats->tx_wake = txq_stats->tx_wake; stats->tx_dropped = txq_stats->tx_dropped; stats->big_frags_pkts = txq_stats->big_frags_pkts; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); - u64_stats_update_end(&stats->syncp); + } while (u64_stats_fetch_retry_irq(&txq_stats->syncp, start)); } /** @@ -861,7 +859,6 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_hwdev *hwdev = nic_dev->hwdev; int err, irqname_len; - size_t sges_size; txq->netdev = netdev; txq->sq = sq; @@ -870,13 +867,13 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, txq->max_sges = HINIC_MAX_SQ_BUFDESCS; - sges_size = txq->max_sges * sizeof(*txq->sges); - txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); + txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges, + sizeof(*txq->sges), GFP_KERNEL); if (!txq->sges) return -ENOMEM; - sges_size = txq->max_sges * sizeof(*txq->free_sges); - txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); + txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges, + sizeof(*txq->free_sges), GFP_KERNEL); if (!txq->free_sges) { err = -ENOMEM; goto err_alloc_free_sges; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index f63066736425..28a5f8d73a61 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -2897,6 +2897,7 @@ static struct device *ehea_register_port(struct ehea_port *port, ret = of_device_register(&port->ofdev); if (ret) { pr_err("failed to register device. ret=%d\n", ret); + put_device(&port->ofdev.dev); goto out; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 95bee3d91593..7fe2e47dc83d 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -117,7 +117,7 @@ struct ibmvnic_stat { #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ offsetof(struct ibmvnic_statistics, stat)) -#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off))) +#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off)))) static const struct ibmvnic_stat ibmvnic_stats[] = { {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, @@ -2063,14 +2063,14 @@ static int do_reset(struct ibmvnic_adapter *adapter, rc = reset_tx_pools(adapter); if (rc) { netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", - rc); + rc); goto out; } rc = reset_rx_pools(adapter); if (rc) { netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", - rc); + rc); goto out; } } @@ -2331,7 +2331,8 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, if (adapter->state == VNIC_PROBING) { netdev_warn(netdev, "Adapter reset during probe\n"); - ret = adapter->init_done_rc = EAGAIN; + adapter->init_done_rc = EAGAIN; + ret = EAGAIN; goto err; } @@ -2665,13 +2666,8 @@ static void ibmvnic_get_ringparam(struct net_device *netdev, { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { - ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; - ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; - } else { - ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ; - ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ; - } + ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; + ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; @@ -2684,23 +2680,21 @@ static int ibmvnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - int ret; - ret = 0; + if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq || + ring->tx_pending > adapter->max_tx_entries_per_subcrq) { + netdev_err(netdev, "Invalid request.\n"); + netdev_err(netdev, "Max tx buffers = %llu\n", + adapter->max_rx_add_entries_per_subcrq); + netdev_err(netdev, "Max rx buffers = %llu\n", + adapter->max_tx_entries_per_subcrq); + return -EINVAL; + } + adapter->desired.rx_entries = ring->rx_pending; adapter->desired.tx_entries = ring->tx_pending; - ret = wait_for_reset(adapter); - - if (!ret && - (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending || - adapter->req_tx_entries_per_subcrq != ring->tx_pending)) - netdev_info(netdev, - "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", - ring->rx_pending, ring->tx_pending, - adapter->req_rx_add_entries_per_subcrq, - adapter->req_tx_entries_per_subcrq); - return ret; + return wait_for_reset(adapter); } static void ibmvnic_get_channels(struct net_device *netdev, @@ -2708,14 +2702,8 @@ static void ibmvnic_get_channels(struct net_device *netdev, { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { - channels->max_rx = adapter->max_rx_queues; - channels->max_tx = adapter->max_tx_queues; - } else { - channels->max_rx = IBMVNIC_MAX_QUEUES; - channels->max_tx = IBMVNIC_MAX_QUEUES; - } - + channels->max_rx = adapter->max_rx_queues; + channels->max_tx = adapter->max_tx_queues; channels->max_other = 0; channels->max_combined = 0; channels->rx_count = adapter->req_rx_queues; @@ -2728,23 +2716,11 @@ static int ibmvnic_set_channels(struct net_device *netdev, struct ethtool_channels *channels) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - int ret; - ret = 0; adapter->desired.rx_queues = channels->rx_count; adapter->desired.tx_queues = channels->tx_count; - ret = wait_for_reset(adapter); - - if (!ret && - (adapter->req_rx_queues != channels->rx_count || - adapter->req_tx_queues != channels->tx_count)) - netdev_info(netdev, - "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n", - channels->rx_count, channels->tx_count, - adapter->req_rx_queues, adapter->req_tx_queues); - return ret; - + return wait_for_reset(adapter); } static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -2752,43 +2728,32 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) struct ibmvnic_adapter *adapter = netdev_priv(dev); int i; - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); - i++, data += ETH_GSTRING_LEN) - memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); - - for (i = 0; i < adapter->req_tx_queues; i++) { - snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); - data += ETH_GSTRING_LEN; - - snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); - data += ETH_GSTRING_LEN; - - snprintf(data, ETH_GSTRING_LEN, - "tx%d_dropped_packets", i); - data += ETH_GSTRING_LEN; - } - - for (i = 0; i < adapter->req_rx_queues; i++) { - snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); - data += ETH_GSTRING_LEN; - - snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); - data += ETH_GSTRING_LEN; - - snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); - data += ETH_GSTRING_LEN; - } - break; - - case ETH_SS_PRIV_FLAGS: - for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++) - strcpy(data + i * ETH_GSTRING_LEN, - ibmvnic_priv_flags[i]); - break; - default: + if (stringset != ETH_SS_STATS) return; + + for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) + memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); + + for (i = 0; i < adapter->req_tx_queues; i++) { + snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i); + data += ETH_GSTRING_LEN; + } + + for (i = 0; i < adapter->req_rx_queues; i++) { + snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); + data += ETH_GSTRING_LEN; } } @@ -2801,8 +2766,6 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset) return ARRAY_SIZE(ibmvnic_stats) + adapter->req_tx_queues * NUM_TX_STATS + adapter->req_rx_queues * NUM_RX_STATS; - case ETH_SS_PRIV_FLAGS: - return ARRAY_SIZE(ibmvnic_priv_flags); default: return -EOPNOTSUPP; } @@ -2833,8 +2796,8 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, return; for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) - data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter, - ibmvnic_stats[i].offset)); + data[i] = be64_to_cpu(IBMVNIC_GET_STAT + (adapter, ibmvnic_stats[i].offset)); for (j = 0; j < adapter->req_tx_queues; j++) { data[i] = adapter->tx_stats_buffers[j].packets; @@ -2855,25 +2818,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, } } -static u32 ibmvnic_get_priv_flags(struct net_device *netdev) -{ - struct ibmvnic_adapter *adapter = netdev_priv(netdev); - - return adapter->priv_flags; -} - -static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags) -{ - struct ibmvnic_adapter *adapter = netdev_priv(netdev); - bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES); - - if (which_maxes) - adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES; - else - adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES; - - return 0; -} static const struct ethtool_ops ibmvnic_ethtool_ops = { .get_drvinfo = ibmvnic_get_drvinfo, .get_msglevel = ibmvnic_get_msglevel, @@ -2887,8 +2831,6 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = { .get_sset_count = ibmvnic_get_sset_count, .get_ethtool_stats = ibmvnic_get_ethtool_stats, .get_link_ksettings = ibmvnic_get_link_ksettings, - .get_priv_flags = ibmvnic_get_priv_flags, - .set_priv_flags = ibmvnic_set_priv_flags, }; /* Routines for managing CRQs/sCRQs */ @@ -3119,7 +3061,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, /* H_EOI would fail with rc = H_FUNCTION when running * in XIVE mode which is expected, but not an error. */ - if (rc && (rc != H_FUNCTION)) + if (rc && rc != H_FUNCTION) dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc); } @@ -5286,6 +5228,15 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) release_sub_crqs(adapter, 0); rc = init_sub_crqs(adapter); } else { + /* no need to reinitialize completely, but we do + * need to clean up transmits that were in flight + * when we processed the reset. Failure to do so + * will confound the upper layer, usually TCP, by + * creating the illusion of transmits that are + * awaiting completion. + */ + clean_tx_pools(adapter); + rc = reset_sub_crq_queues(adapter); } } else { diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index b27211063c64..2eb9a4d5533a 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -41,11 +41,6 @@ #define IBMVNIC_RESET_DELAY 100 -static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = { -#define IBMVNIC_USE_SERVER_MAXES 0x1 - "use-server-maxes" -}; - struct ibmvnic_login_buffer { __be32 len; __be32 version; @@ -974,7 +969,6 @@ struct ibmvnic_adapter { struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl; dma_addr_t ip_offload_ctrl_tok; u32 msg_enable; - u32 priv_flags; /* Vital Product Data (VPD) */ struct ibmvnic_vpd *vpd; diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 9295a9a1efc7..001850d578e8 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1739,14 +1739,11 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb, dma_addr_t dma_addr; cb->command = nic->tx_command; - dma_addr = pci_map_single(nic->pdev, - skb->data, skb->len, PCI_DMA_TODEVICE); + dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); /* If we can't map the skb, have the upper layer try later */ - if (pci_dma_mapping_error(nic->pdev, dma_addr)) { - dev_kfree_skb_any(skb); - skb = NULL; + if (dma_mapping_error(&nic->pdev->dev, dma_addr)) return -ENOMEM; - } /* * Use the last 4 bytes of the SKB payload packet as the CRC, used for @@ -1828,10 +1825,10 @@ static int e100_tx_clean(struct nic *nic) dev->stats.tx_packets++; dev->stats.tx_bytes += cb->skb->len; - pci_unmap_single(nic->pdev, - le32_to_cpu(cb->u.tcb.tbd.buf_addr), - le16_to_cpu(cb->u.tcb.tbd.size), - PCI_DMA_TODEVICE); + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); dev_kfree_skb_any(cb->skb); cb->skb = NULL; tx_cleaned = 1; @@ -1855,10 +1852,10 @@ static void e100_clean_cbs(struct nic *nic) while (nic->cbs_avail != nic->params.cbs.count) { struct cb *cb = nic->cb_to_clean; if (cb->skb) { - pci_unmap_single(nic->pdev, - le32_to_cpu(cb->u.tcb.tbd.buf_addr), - le16_to_cpu(cb->u.tcb.tbd.size), - PCI_DMA_TODEVICE); + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); dev_kfree_skb(cb->skb); } nic->cb_to_clean = nic->cb_to_clean->next; @@ -1925,10 +1922,10 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) /* Init, and map the RFD. */ skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); - rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, - RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { + if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { dev_kfree_skb_any(rx->skb); rx->skb = NULL; rx->dma_addr = 0; @@ -1941,8 +1938,10 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) if (rx->prev->skb) { struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; put_unaligned_le32(rx->dma_addr, &prev_rfd->link); - pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, - sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_device(&nic->pdev->dev, + rx->prev->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); } return 0; @@ -1961,8 +1960,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, return -EAGAIN; /* Need to sync before taking a peek at cb_complete bit */ - pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr, - sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); rfd_status = le16_to_cpu(rfd->status); netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, @@ -1981,9 +1980,9 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, if (ioread8(&nic->csr->scb.status) & rus_no_res) nic->ru_running = RU_SUSPENDED; - pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, - sizeof(struct rfd), - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), + DMA_FROM_DEVICE); return -ENODATA; } @@ -1995,8 +1994,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, actual_size = RFD_BUF_LEN - sizeof(struct rfd); /* Get data */ - pci_unmap_single(nic->pdev, rx->dma_addr, - RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); /* If this buffer has the el bit, but we think the receiver * is still running, check to see if it really stopped while @@ -2097,22 +2096,25 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done, (struct rfd *)new_before_last_rx->skb->data; new_before_last_rfd->size = 0; new_before_last_rfd->command |= cpu_to_le16(cb_el); - pci_dma_sync_single_for_device(nic->pdev, - new_before_last_rx->dma_addr, sizeof(struct rfd), - PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_device(&nic->pdev->dev, + new_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); /* Now that we have a new stopping point, we can clear the old * stopping point. We must sync twice to get the proper * ordering on the hardware side of things. */ old_before_last_rfd->command &= ~cpu_to_le16(cb_el); - pci_dma_sync_single_for_device(nic->pdev, - old_before_last_rx->dma_addr, sizeof(struct rfd), - PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); - pci_dma_sync_single_for_device(nic->pdev, - old_before_last_rx->dma_addr, sizeof(struct rfd), - PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); } if (restart_required) { @@ -2134,8 +2136,9 @@ static void e100_rx_clean_list(struct nic *nic) if (nic->rxs) { for (rx = nic->rxs, i = 0; i < count; rx++, i++) { if (rx->skb) { - pci_unmap_single(nic->pdev, rx->dma_addr, - RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + dma_unmap_single(&nic->pdev->dev, + rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); dev_kfree_skb(rx->skb); } } @@ -2177,8 +2180,8 @@ static int e100_rx_alloc_list(struct nic *nic) before_last = (struct rfd *)rx->skb->data; before_last->command |= cpu_to_le16(cb_el); before_last->size = 0; - pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, - sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); nic->rx_to_use = nic->rx_to_clean = nic->rxs; nic->ru_running = RU_SUSPENDED; @@ -2377,8 +2380,8 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) msleep(10); - pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr, - RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), skb->data, ETH_DATA_LEN)) @@ -2759,16 +2762,16 @@ static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) static int e100_alloc(struct nic *nic) { - nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem), - &nic->dma_addr); + nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem), + &nic->dma_addr, GFP_KERNEL); return nic->mem ? 0 : -ENOMEM; } static void e100_free(struct nic *nic) { if (nic->mem) { - pci_free_consistent(nic->pdev, sizeof(struct mem), - nic->mem, nic->dma_addr); + dma_free_coherent(&nic->pdev->dev, sizeof(struct mem), + nic->mem, nic->dma_addr); nic->mem = NULL; } } @@ -2861,7 +2864,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_disable_pdev; } - if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); goto err_out_free_res; } diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 15b1503d5b6c..1f51252b465a 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1006,8 +1006,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) { u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; - u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ - u16 lat_enc_d = 0; /* latency decoded */ + u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ + u32 lat_enc_d = 0; /* latency decoded */ u16 lat_enc = 0; /* latency encoded */ if (link) { diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index d0c4de023112..ae0c9aaab48d 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5937,9 +5937,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, e1000_tx_queue(tx_ring, tx_flags, count); /* Make sure there is space in the ring for the next send. */ e1000_maybe_stop_tx(tx_ring, - (MAX_SKB_FRAGS * + ((MAX_SKB_FRAGS + 1) * DIV_ROUND_UP(PAGE_SIZE, - adapter->tx_fifo_limit) + 2)); + adapter->tx_fifo_limit) + 4)); if (!netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 99b8252eb969..a388a0fcbeed 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -32,6 +32,8 @@ struct workqueue_struct *fm10k_workqueue; **/ static int __init fm10k_init_module(void) { + int ret; + pr_info("%s\n", fm10k_driver_string); pr_info("%s\n", fm10k_copyright); @@ -43,7 +45,13 @@ static int __init fm10k_init_module(void) fm10k_dbg_init(); - return fm10k_register_pci_driver(); + ret = fm10k_register_pci_driver(); + if (ret) { + fm10k_dbg_exit(); + destroy_workqueue(fm10k_workqueue); + } + + return ret; } module_init(fm10k_init_module); diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index effdc3361266..dd630b6bc74b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -37,6 +37,7 @@ #include #include #include +#include #include "i40e_type.h" #include "i40e_prototype.h" #include @@ -991,6 +992,21 @@ static inline void i40e_write_fd_input_set(struct i40e_pf *pf, (u32)(val & 0xFFFFFFFFULL)); } +/** + * i40e_get_pf_count - get PCI PF count. + * @hw: pointer to a hw. + * + * Reports the function number of the highest PCI physical + * function plus 1 as it is loaded from the NVM. + * + * Return: PCI PF count. + **/ +static inline u32 i40e_get_pf_count(struct i40e_hw *hw) +{ + return FIELD_GET(I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK, + rd32(hw, I40E_GLGEN_PCIFCNCNT)); +} + /* needed by i40e_ethtool.c */ int i40e_up(struct i40e_vsi *vsi); void i40e_down(struct i40e_vsi *vsi); diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 32f3facbed1a..b3cb5d103326 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -178,6 +178,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset) "Cannot locate client instance close routine\n"); return; } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { + dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n"); + return; + } cdev->client->ops->close(&cdev->lan_info, cdev->client, reset); clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); i40e_client_release_qvlist(&cdev->lan_info); @@ -374,7 +378,6 @@ void i40e_client_subtask(struct i40e_pf *pf) /* Remove failed client instance */ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); - i40e_client_del_instance(pf); return; } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index a2bdb2906519..520929f4d535 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2081,9 +2081,6 @@ static int i40e_set_ringparam(struct net_device *netdev, */ rx_rings[i].tail = hw->hw_addr + I40E_PRTGEN_STATUS; err = i40e_setup_rx_descriptors(&rx_rings[i]); - if (err) - goto rx_unwind; - err = i40e_alloc_rx_bi(&rx_rings[i]); if (err) goto rx_unwind; @@ -2582,15 +2579,16 @@ static void i40e_diag_test(struct net_device *netdev, set_bit(__I40E_TESTING, pf->state); + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { + dev_warn(&pf->pdev->dev, + "Cannot start offline testing when PF is in reset state.\n"); + goto skip_ol_tests; + } + if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) { dev_warn(&pf->pdev->dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n"); - data[I40E_ETH_TEST_REG] = 1; - data[I40E_ETH_TEST_EEPROM] = 1; - data[I40E_ETH_TEST_INTR] = 1; - data[I40E_ETH_TEST_LINK] = 1; - eth_test->flags |= ETH_TEST_FL_FAILED; - clear_bit(__I40E_TESTING, pf->state); goto skip_ol_tests; } @@ -2637,9 +2635,17 @@ static void i40e_diag_test(struct net_device *netdev, data[I40E_ETH_TEST_INTR] = 0; } -skip_ol_tests: - netif_info(pf, drv, netdev, "testing finished\n"); + return; + +skip_ol_tests: + data[I40E_ETH_TEST_REG] = 1; + data[I40E_ETH_TEST_EEPROM] = 1; + data[I40E_ETH_TEST_INTR] = 1; + data[I40E_ETH_TEST_LINK] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__I40E_TESTING, pf->state); + netif_info(pf, drv, netdev, "testing failed\n"); } static void i40e_get_wol(struct net_device *netdev, @@ -3077,10 +3083,17 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) if (cmd->flow_type == TCP_V4_FLOW || cmd->flow_type == UDP_V4_FLOW) { - if (i_set & I40E_L3_SRC_MASK) - cmd->data |= RXH_IP_SRC; - if (i_set & I40E_L3_DST_MASK) - cmd->data |= RXH_IP_DST; + if (hw->mac.type == I40E_MAC_X722) { + if (i_set & I40E_X722_L3_SRC_MASK) + cmd->data |= RXH_IP_SRC; + if (i_set & I40E_X722_L3_DST_MASK) + cmd->data |= RXH_IP_DST; + } else { + if (i_set & I40E_L3_SRC_MASK) + cmd->data |= RXH_IP_SRC; + if (i_set & I40E_L3_DST_MASK) + cmd->data |= RXH_IP_DST; + } } else if (cmd->flow_type == TCP_V6_FLOW || cmd->flow_type == UDP_V6_FLOW) { if (i_set & I40E_L3_V6_SRC_MASK) @@ -3387,12 +3400,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, /** * i40e_get_rss_hash_bits - Read RSS Hash bits from register + * @hw: hw structure * @nfc: pointer to user request * @i_setc: bits currently set * * Returns value of bits to be set per user request **/ -static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc) +static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw, + struct ethtool_rxnfc *nfc, + u64 i_setc) { u64 i_set = i_setc; u64 src_l3 = 0, dst_l3 = 0; @@ -3411,8 +3427,13 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc) dst_l3 = I40E_L3_V6_DST_MASK; } else if (nfc->flow_type == TCP_V4_FLOW || nfc->flow_type == UDP_V4_FLOW) { - src_l3 = I40E_L3_SRC_MASK; - dst_l3 = I40E_L3_DST_MASK; + if (hw->mac.type == I40E_MAC_X722) { + src_l3 = I40E_X722_L3_SRC_MASK; + dst_l3 = I40E_X722_L3_DST_MASK; + } else { + src_l3 = I40E_L3_SRC_MASK; + dst_l3 = I40E_L3_DST_MASK; + } } else { /* Any other flow type are not supported here */ return i_set; @@ -3430,6 +3451,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc) return i_set; } +#define FLOW_PCTYPES_SIZE 64 /** * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash * @pf: pointer to the physical function struct @@ -3442,9 +3464,11 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) struct i40e_hw *hw = &pf->hw; u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); - u8 flow_pctype = 0; + DECLARE_BITMAP(flow_pctypes, FLOW_PCTYPES_SIZE); u64 i_set, i_setc; + bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE); + if (pf->flags & I40E_FLAG_MFP_ENABLED) { dev_err(&pf->pdev->dev, "Change of RSS hash input set is not supported when MFP mode is enabled\n"); @@ -3460,36 +3484,35 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) switch (nfc->flow_type) { case TCP_V4_FLOW: - flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes); if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK, + flow_pctypes); break; case TCP_V6_FLOW: - flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; + set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes); if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); + set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK, + flow_pctypes); break; case UDP_V4_FLOW: - flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); - + set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes); + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP, + flow_pctypes); + set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP, + flow_pctypes); + } hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4); break; case UDP_V6_FLOW: - flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); - + set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes); + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP, + flow_pctypes); + set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP, + flow_pctypes); + } hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6); break; case AH_ESP_V4_FLOW: @@ -3522,17 +3545,20 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) return -EINVAL; } - if (flow_pctype) { - i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, - flow_pctype)) | - ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, - flow_pctype)) << 32); - i_set = i40e_get_rss_hash_bits(nfc, i_setc); - i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype), - (u32)i_set); - i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype), - (u32)(i_set >> 32)); - hena |= BIT_ULL(flow_pctype); + if (bitmap_weight(flow_pctypes, FLOW_PCTYPES_SIZE)) { + u8 flow_id; + + for_each_set_bit(flow_id, flow_pctypes, FLOW_PCTYPES_SIZE) { + i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id)) | + ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id)) << 32); + i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc); + + i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id), + (u32)i_set); + i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id), + (u32)(i_set >> 32)); + hena |= BIT_ULL(flow_id); + } } i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); @@ -4208,11 +4234,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, return -EOPNOTSUPP; /* First 4 bytes of L4 header */ - if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF)) - new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK; - else if (!usr_ip4_spec->l4_4_bytes) - new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK); - else + if (usr_ip4_spec->l4_4_bytes) return -EOPNOTSUPP; /* Filtering on Type of Service is not supported. */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index bd18a780a000..2c60d2a93330 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -382,7 +382,9 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue) set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); break; default: - netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); + netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n"); + set_bit(__I40E_DOWN_REQUESTED, pf->state); + set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state); break; } @@ -548,6 +550,47 @@ void i40e_pf_reset_stats(struct i40e_pf *pf) pf->hw_csum_rx_error = 0; } +/** + * i40e_compute_pci_to_hw_id - compute index form PCI function. + * @vsi: ptr to the VSI to read from. + * @hw: ptr to the hardware info. + **/ +static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw) +{ + int pf_count = i40e_get_pf_count(hw); + + if (vsi->type == I40E_VSI_SRIOV) + return (hw->port * BIT(7)) / pf_count + vsi->vf_id; + + return hw->port + BIT(7); +} + +/** + * i40e_stat_update64 - read and update a 64 bit stat from the chip. + * @hw: ptr to the hardware info. + * @hireg: the high 32 bit reg to read. + * @loreg: the low 32 bit reg to read. + * @offset_loaded: has the initial offset been loaded yet. + * @offset: ptr to current offset value. + * @stat: ptr to the stat. + * + * Since the device stats are not reset at PFReset, they will not + * be zeroed when the driver starts. We'll save the first values read + * and use them as offsets to be subtracted from the raw values in order + * to report stats that count from zero. + **/ +static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg, + bool offset_loaded, u64 *offset, u64 *stat) +{ + u64 new_data; + + new_data = rd64(hw, loreg); + + if (!offset_loaded || new_data < *offset) + *offset = new_data; + *stat = new_data - *offset; +} + /** * i40e_stat_update48 - read and update a 48 bit stat from the chip * @hw: ptr to the hardware info @@ -619,6 +662,34 @@ static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat) *stat += new_data; } +/** + * i40e_stats_update_rx_discards - update rx_discards. + * @vsi: ptr to the VSI to be updated. + * @hw: ptr to the hardware info. + * @stat_idx: VSI's stat_counter_idx. + * @offset_loaded: ptr to the VSI's stat_offsets_loaded. + * @stat_offset: ptr to stat_offset to store first read of specific register. + * @stat: ptr to VSI's stat to be updated. + **/ +static void +i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw, + int stat_idx, bool offset_loaded, + struct i40e_eth_stats *stat_offset, + struct i40e_eth_stats *stat) +{ + u64 rx_rdpc, rx_rxerr; + + i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded, + &stat_offset->rx_discards, &rx_rdpc); + i40e_stat_update64(hw, + I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)), + I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)), + offset_loaded, &stat_offset->rx_discards_other, + &rx_rxerr); + + stat->rx_discards = rx_rdpc + rx_rxerr; +} + /** * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. * @vsi: the VSI to be updated @@ -678,6 +749,10 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi) I40E_GLV_BPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); + + i40e_stats_update_rx_discards(vsi, hw, stat_idx, + vsi->stat_offsets_loaded, oes, es); + vsi->stat_offsets_loaded = true; } @@ -1834,11 +1909,15 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, * non-zero req_queue_pairs says that user requested a new * queue count via ethtool's set_channels, so use this * value for queues distribution across traffic classes + * We need at least one queue pair for the interface + * to be usable as we see in else statement. */ if (vsi->req_queue_pairs > 0) vsi->num_queue_pairs = vsi->req_queue_pairs; else if (pf->flags & I40E_FLAG_MSIX_ENABLED) vsi->num_queue_pairs = pf->num_lan_msix; + else + vsi->num_queue_pairs = 1; } /* Number of queues per enabled TC */ @@ -3330,12 +3409,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) if (ring->vsi->type == I40E_VSI_MAIN) xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); - kfree(ring->rx_bi); ring->xsk_pool = i40e_xsk_pool(ring); if (ring->xsk_pool) { - ret = i40e_alloc_rx_bi_zc(ring); - if (ret) - return ret; ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); /* For AF_XDP ZC, we disallow packets to span on @@ -3353,9 +3428,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->queue_index); } else { - ret = i40e_alloc_rx_bi(ring); - if (ret) - return ret; ring->rx_buf_len = vsi->rx_buf_len; if (ring->vsi->type == I40E_VSI_MAIN) { ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, @@ -5654,6 +5726,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi) } } +/** + * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits + * @vsi: Pointer to vsi structure + * @max_tx_rate: max TX rate in bytes to be converted into Mbits + * + * Helper function to convert units before send to set BW limit + **/ +static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate) +{ + if (max_tx_rate < I40E_BW_MBPS_DIVISOR) { + dev_warn(&vsi->back->pdev->dev, + "Setting max tx rate to minimum usable value of 50Mbps.\n"); + max_tx_rate = I40E_BW_CREDIT_DIVISOR; + } else { + do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); + } + + return max_tx_rate; +} + /** * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate * @vsi: VSI to be configured @@ -5676,10 +5768,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) max_tx_rate, seid); return -EINVAL; } - if (max_tx_rate && max_tx_rate < 50) { + if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) { dev_warn(&pf->pdev->dev, "Setting max tx rate to minimum usable value of 50Mbps.\n"); - max_tx_rate = 50; + max_tx_rate = I40E_BW_CREDIT_DIVISOR; } /* Tx rate credits are in values of 50Mbps, 0 is disabled */ @@ -7175,42 +7267,43 @@ static void i40e_free_macvlan_channels(struct i40e_vsi *vsi) static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev, struct i40e_fwd_adapter *fwd) { + struct i40e_channel *ch = NULL, *ch_tmp, *iter; int ret = 0, num_tc = 1, i, aq_err; - struct i40e_channel *ch, *ch_tmp; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - if (list_empty(&vsi->macvlan_list)) - return -EINVAL; - /* Go through the list and find an available channel */ - list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { - if (!i40e_is_channel_macvlan(ch)) { - ch->fwd = fwd; + list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) { + if (!i40e_is_channel_macvlan(iter)) { + iter->fwd = fwd; /* record configuration for macvlan interface in vdev */ for (i = 0; i < num_tc; i++) netdev_bind_sb_channel_queue(vsi->netdev, vdev, i, - ch->num_queue_pairs, - ch->base_queue); - for (i = 0; i < ch->num_queue_pairs; i++) { + iter->num_queue_pairs, + iter->base_queue); + for (i = 0; i < iter->num_queue_pairs; i++) { struct i40e_ring *tx_ring, *rx_ring; u16 pf_q; - pf_q = ch->base_queue + i; + pf_q = iter->base_queue + i; /* Get to TX ring ptr */ tx_ring = vsi->tx_rings[pf_q]; - tx_ring->ch = ch; + tx_ring->ch = iter; /* Get the RX ring ptr */ rx_ring = vsi->rx_rings[pf_q]; - rx_ring->ch = ch; + rx_ring->ch = iter; } + ch = iter; break; } } + if (!ch) + return -EINVAL; + /* Guarantee all rings are updated before we update the * MAC address filter. */ @@ -7639,9 +7732,9 @@ config_tc: if (pf->flags & I40E_FLAG_TC_MQPRIO) { if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; + u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, + vsi->mqprio_qopt.max_rate[0]); - do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (!ret) { u64 credits = max_tx_rate; @@ -8162,6 +8255,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi, return -EOPNOTSUPP; } + if (!tc) { + dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination"); + return -EINVAL; + } + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) return -EBUSY; @@ -9967,6 +10065,21 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi) return 0; } +/** + * i40e_clean_xps_state - clean xps state for every tx_ring + * @vsi: ptr to the VSI + **/ +static void i40e_clean_xps_state(struct i40e_vsi *vsi) +{ + int i; + + if (vsi->tx_rings) + for (i = 0; i < vsi->num_queue_pairs; i++) + if (vsi->tx_rings[i]) + clear_bit(__I40E_TX_XPS_INIT_DONE, + vsi->tx_rings[i]->state); +} + /** * i40e_prep_for_reset - prep for the core to reset * @pf: board private structure @@ -9998,8 +10111,10 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired) rtnl_unlock(); for (v = 0; v < pf->num_alloc_vsi; v++) { - if (pf->vsi[v]) + if (pf->vsi[v]) { + i40e_clean_xps_state(pf->vsi[v]); pf->vsi[v]->seid = 0; + } } i40e_shutdown_adminq(&pf->hw); @@ -10108,7 +10223,7 @@ static int i40e_reset(struct i40e_pf *pf) **/ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) { - int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state); + const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf); struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; i40e_status ret; @@ -10116,13 +10231,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) int v; if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && - i40e_check_recovery_mode(pf)) { + is_recovery_mode_reported) i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); - } if (test_bit(__I40E_DOWN, pf->state) && - !test_bit(__I40E_RECOVERY_MODE, pf->state) && - !old_recovery_mode_bit) + !test_bit(__I40E_RECOVERY_MODE, pf->state)) goto clear_recovery; dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); @@ -10149,13 +10262,12 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) * accordingly with regard to resources initialization * and deinitialization */ - if (test_bit(__I40E_RECOVERY_MODE, pf->state) || - old_recovery_mode_bit) { + if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { if (i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities)) goto end_unlock; - if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { + if (is_recovery_mode_reported) { /* we're staying in recovery mode so we'll reinitialize * misc vector here */ @@ -10284,10 +10396,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; + u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, + vsi->mqprio_qopt.max_rate[0]); u64 credits = 0; - do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (ret) goto end_unlock; @@ -12582,6 +12694,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, i40e_reset_and_rebuild(pf, true, true); } + if (!i40e_enabled_xdp_vsi(vsi) && prog) { + if (i40e_realloc_rx_bi_zc(vsi, true)) + return -ENOMEM; + } else if (i40e_enabled_xdp_vsi(vsi) && !prog) { + if (i40e_realloc_rx_bi_zc(vsi, false)) + return -ENOMEM; + } + for (i = 0; i < vsi->num_queue_pairs; i++) WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); @@ -12814,6 +12934,7 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) i40e_queue_pair_disable_irq(vsi, queue_pair); err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); + i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); i40e_queue_pair_clean_rings(vsi, queue_pair); i40e_queue_pair_reset_stats(vsi, queue_pair); @@ -15868,6 +15989,8 @@ static struct pci_driver i40e_driver = { **/ static int __init i40e_init_module(void) { + int err; + pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); @@ -15885,7 +16008,14 @@ static int __init i40e_init_module(void) } i40e_dbg_init(); - return pci_register_driver(&i40e_driver); + err = pci_register_driver(&i40e_driver); + if (err) { + destroy_workqueue(i40e_wq); + i40e_dbg_exit(); + return err; + } + + return 0; } module_init(i40e_init_module); diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 8335f151ceef..117fd9674d7f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -77,6 +77,11 @@ #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 #define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16 #define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT) +#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */ +#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0 +#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT) +#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16 +#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT) #define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */ #define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0 #define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT) @@ -461,6 +466,14 @@ #define I40E_VFQF_HKEY1_MAX_INDEX 12 #define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */ #define I40E_VFQF_HLUT1_MAX_INDEX 15 +#define I40E_GL_RXERR1H(_i) (0x00318004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_RXERR1H_MAX_INDEX 143 +#define I40E_GL_RXERR1H_RXERR1H_SHIFT 0 +#define I40E_GL_RXERR1H_RXERR1H_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1H_RXERR1H_SHIFT) +#define I40E_GL_RXERR1L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_RXERR1L_MAX_INDEX 143 +#define I40E_GL_RXERR1L_RXERR1L_SHIFT 0 +#define I40E_GL_RXERR1L_RXERR1L_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1L_RXERR1L_SHIFT) #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 5ad28129fab2..43be33d87e39 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1305,14 +1305,6 @@ err: return -ENOMEM; } -int i40e_alloc_rx_bi(struct i40e_ring *rx_ring) -{ - unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count; - - rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL); - return rx_ring->rx_bi ? 0 : -ENOMEM; -} - static void i40e_clear_rx_bi(struct i40e_ring *rx_ring) { memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count); @@ -1443,6 +1435,11 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; + rx_ring->rx_bi = + kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL); + if (!rx_ring->rx_bi) + return -ENOMEM; + return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 93ac201f68b8..af843e8169f7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -465,7 +465,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); -int i40e_alloc_rx_bi(struct i40e_ring *rx_ring); /** * i40e_get_head - Retrieve head from head writeback diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index add67f7b73e8..0872448c0e80 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1172,6 +1172,7 @@ struct i40e_eth_stats { u64 tx_broadcast; /* bptc */ u64 tx_discards; /* tdpc */ u64 tx_errors; /* tepc */ + u64 rx_discards_other; /* rxerr1 */ }; /* Statistics collected per VEB per TC */ @@ -1403,6 +1404,10 @@ struct i40e_lldp_variables { #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 /* INPUT SET MASK for RSS, flow director, and flexible payload */ +#define I40E_X722_L3_SRC_SHIFT 49 +#define I40E_X722_L3_SRC_MASK (0x3ULL << I40E_X722_L3_SRC_SHIFT) +#define I40E_X722_L3_DST_SHIFT 41 +#define I40E_X722_L3_DST_MASK (0x3ULL << I40E_X722_L3_DST_SHIFT) #define I40E_L3_SRC_SHIFT 47 #define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT) #define I40E_L3_V6_SRC_SHIFT 43 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 9181e007e039..bb2a79b70c3a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1483,10 +1483,12 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr) if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) return true; - /* If the VFs have been disabled, this means something else is - * resetting the VF, so we shouldn't continue. - */ - if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) + /* Bail out if VFs are disabled. */ + if (test_bit(__I40E_VF_DISABLE, pf->state)) + return true; + + /* If VF is being reset already we don't need to continue. */ + if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) return true; i40e_trigger_vf_reset(vf, flr); @@ -1523,7 +1525,8 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr) i40e_cleanup_reset_vf(vf); i40e_flush(hw); - clear_bit(__I40E_VF_DISABLE, pf->state); + usleep_range(20000, 40000); + clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states); return true; } @@ -1556,8 +1559,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) return false; /* Begin reset on all VFs at once */ - for (v = 0; v < pf->num_alloc_vfs; v++) - i40e_trigger_vf_reset(&pf->vf[v], flr); + for (v = 0; v < pf->num_alloc_vfs; v++) { + vf = &pf->vf[v]; + /* If VF is being reset no need to trigger reset again */ + if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) + i40e_trigger_vf_reset(&pf->vf[v], flr); + } /* HW requires some time to make sure it can flush the FIFO for a VF * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in @@ -1573,9 +1580,11 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) */ while (v < pf->num_alloc_vfs) { vf = &pf->vf[v]; - reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); - if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) - break; + if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) { + reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); + if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) + break; + } /* If the current VF has finished resetting, move on * to the next VF in sequence. @@ -1603,6 +1612,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) if (pf->vf[v].lan_vsi_idx == 0) continue; + /* If VF is reset in another thread just continue */ + if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) + continue; + i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); } @@ -1614,6 +1627,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) if (pf->vf[v].lan_vsi_idx == 0) continue; + /* If VF is reset in another thread just continue */ + if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) + continue; + i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); } @@ -1623,10 +1640,16 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) mdelay(50); /* Finish the reset on each VF */ - for (v = 0; v < pf->num_alloc_vfs; v++) + for (v = 0; v < pf->num_alloc_vfs; v++) { + /* If VF is reset in another thread just continue */ + if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) + continue; + i40e_cleanup_reset_vf(&pf->vf[v]); + } i40e_flush(hw); + usleep_range(20000, 40000); clear_bit(__I40E_VF_DISABLE, pf->state); return true; @@ -1985,6 +2008,25 @@ static void i40e_del_qch(struct i40e_vf *vf) } } +/** + * i40e_vc_get_max_frame_size + * @vf: pointer to the VF + * + * Max frame size is determined based on the current port's max frame size and + * whether a port VLAN is configured on this VF. The VF is not aware whether + * it's in a port VLAN so the PF needs to account for this in max frame size + * checks and sending the max frame size to the VF. + **/ +static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf) +{ + u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size; + + if (vf->port_vlan_id) + max_frame_size -= VLAN_HLEN; + + return max_frame_size; +} + /** * i40e_vc_get_vf_resources_msg * @vf: pointer to the VF info @@ -2085,6 +2127,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; + vfres->max_mtu = i40e_vc_get_max_frame_size(vf); if (vf->lan_vsi_idx) { vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; @@ -2228,7 +2271,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) } if (vf->adq_enabled) { - for (i = 0; i < I40E_MAX_VF_VSI; i++) + for (i = 0; i < vf->num_tc; i++) num_qps_all += vf->ch[i].num_qps; if (num_qps_all != qci->num_queue_pairs) { aq_ret = I40E_ERR_PARAM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index a554d0a0b09b..358bbdb58795 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -39,6 +39,7 @@ enum i40e_vf_states { I40E_VF_STATE_MC_PROMISC, I40E_VF_STATE_UC_PROMISC, I40E_VF_STATE_PRE_ENABLE, + I40E_VF_STATE_RESETTING }; /* VF capabilities */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 75e4a698c3db..7f1226123629 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -9,14 +9,6 @@ #include "i40e_txrx_common.h" #include "i40e_xsk.h" -int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring) -{ - unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count; - - rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL); - return rx_ring->rx_bi_zc ? 0 : -ENOMEM; -} - void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring) { memset(rx_ring->rx_bi_zc, 0, @@ -28,6 +20,58 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) return &rx_ring->rx_bi_zc[idx]; } +/** + * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer + * @rx_ring: Current rx ring + * @pool_present: is pool for XSK present + * + * Try allocating memory and return ENOMEM, if failed to allocate. + * If allocation was successful, substitute buffer with allocated one. + * Returns 0 on success, negative on failure + */ +static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present) +{ + size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) : + sizeof(*rx_ring->rx_bi); + void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); + + if (!sw_ring) + return -ENOMEM; + + if (pool_present) { + kfree(rx_ring->rx_bi); + rx_ring->rx_bi = NULL; + rx_ring->rx_bi_zc = sw_ring; + } else { + kfree(rx_ring->rx_bi_zc); + rx_ring->rx_bi_zc = NULL; + rx_ring->rx_bi = sw_ring; + } + return 0; +} + +/** + * i40e_realloc_rx_bi_zc - reallocate rx SW rings + * @vsi: Current VSI + * @zc: is zero copy set + * + * Reallocate buffer for rx_rings that might be used by XSK. + * XDP requires more memory, than rx_buf provides. + * Returns 0 on success, negative on failure + */ +int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc) +{ + struct i40e_ring *rx_ring; + unsigned long q; + + for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) { + rx_ring = vsi->rx_rings[q]; + if (i40e_realloc_rx_xdp_bi(rx_ring, zc)) + return -ENOMEM; + } + return 0; +} + /** * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a * certain ring/qid @@ -68,6 +112,10 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi, if (err) return err; + err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true); + if (err) + return err; + err = i40e_queue_pair_enable(vsi, qid); if (err) return err; @@ -112,6 +160,9 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid) xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR); if (if_running) { + err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false); + if (err) + return err; err = i40e_queue_pair_enable(vsi, qid); if (err) return err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index 7adfd8539247..36f5b6d20601 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -17,7 +17,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget); bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring); int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); -int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring); +int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc); void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring); #endif /* _I40E_XSK_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index ce1e2fb22e09..a994a2970ab2 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -86,6 +86,7 @@ struct iavf_vsi { #define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4) #define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4) #define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */ +#define IAVF_MBPS_QUANTA 50 #define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \ (IAVF_MAX_VF_VSI * \ diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c index 9fa3fa99b4c2..897b349cdaf1 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c @@ -324,6 +324,7 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw) static enum iavf_status iavf_init_asq(struct iavf_hw *hw) { enum iavf_status ret_code = 0; + int i; if (hw->aq.asq.count > 0) { /* queue already initialized */ @@ -354,12 +355,17 @@ static enum iavf_status iavf_init_asq(struct iavf_hw *hw) /* initialize base registers */ ret_code = iavf_config_asq_regs(hw); if (ret_code) - goto init_adminq_free_rings; + goto init_free_asq_bufs; /* success! */ hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; +init_free_asq_bufs: + for (i = 0; i < hw->aq.num_asq_entries; i++) + iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); + init_adminq_free_rings: iavf_free_adminq_asq(hw); @@ -383,6 +389,7 @@ init_adminq_exit: static enum iavf_status iavf_init_arq(struct iavf_hw *hw) { enum iavf_status ret_code = 0; + int i; if (hw->aq.arq.count > 0) { /* queue already initialized */ @@ -413,12 +420,16 @@ static enum iavf_status iavf_init_arq(struct iavf_hw *hw) /* initialize base registers */ ret_code = iavf_config_arq_regs(hw); if (ret_code) - goto init_adminq_free_rings; + goto init_free_arq_bufs; /* success! */ hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; +init_free_arq_bufs: + for (i = 0; i < hw->aq.num_arq_entries; i++) + iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); init_adminq_free_rings: iavf_free_adminq_arq(hw); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index bd1fb3774769..ae96b552a3bb 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1318,7 +1318,6 @@ static void iavf_fill_rss_lut(struct iavf_adapter *adapter) static int iavf_init_rss(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; - int ret; if (!RSS_PF(adapter)) { /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ @@ -1334,9 +1333,8 @@ static int iavf_init_rss(struct iavf_adapter *adapter) iavf_fill_rss_lut(adapter); netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); - ret = iavf_config_rss(adapter); - return ret; + return iavf_config_rss(adapter); } /** @@ -2578,6 +2576,7 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter, struct tc_mqprio_qopt_offload *mqprio_qopt) { u64 total_max_rate = 0; + u32 tx_rate_rem = 0; int i, num_qps = 0; u64 tx_rate = 0; int ret = 0; @@ -2592,12 +2591,32 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter, return -EINVAL; if (mqprio_qopt->min_rate[i]) { dev_err(&adapter->pdev->dev, - "Invalid min tx rate (greater than 0) specified\n"); + "Invalid min tx rate (greater than 0) specified for TC%d\n", + i); return -EINVAL; } - /*convert to Mbps */ + + /* convert to Mbps */ tx_rate = div_u64(mqprio_qopt->max_rate[i], IAVF_MBPS_DIVISOR); + + if (mqprio_qopt->max_rate[i] && + tx_rate < IAVF_MBPS_QUANTA) { + dev_err(&adapter->pdev->dev, + "Invalid max tx rate for TC%d, minimum %dMbps\n", + i, IAVF_MBPS_QUANTA); + return -EINVAL; + } + + (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem); + + if (tx_rate_rem != 0) { + dev_err(&adapter->pdev->dev, + "Invalid max tx rate for TC%d, not divisible by %d\n", + i, IAVF_MBPS_QUANTA); + return -EINVAL; + } + total_max_rate += tx_rate; num_qps += mqprio_qopt->qopt.count[i]; } @@ -4019,7 +4038,11 @@ static int __init iavf_init_module(void) pr_err("%s: Failed to create workqueue\n", iavf_driver_name); return -ENOMEM; } + ret = pci_register_driver(&iavf_driver); + if (ret) + destroy_workqueue(iavf_wq); + return ret; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 256fa07d54d5..d481a922f018 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw) { u32 head, tail; + /* underlying hardware might not allow access and/or always return + * 0 for the head/tail registers so just use the cached values + */ head = ring->next_to_clean; - tail = readl(ring->tail); + tail = ring->next_to_use; if (head != tail) return (head < tail) ? @@ -1263,11 +1266,10 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, { struct iavf_rx_buffer *rx_buffer; - if (!size) - return NULL; - rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; prefetchw(rx_buffer->page); + if (!size) + return rx_buffer; /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, @@ -1369,7 +1371,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, #endif struct sk_buff *skb; - if (!rx_buffer) + if (!rx_buffer || !size) return NULL; /* prefetch first cache line of first page */ va = page_address(rx_buffer->page) + rx_buffer->page_offset; @@ -1527,7 +1529,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; - if (rx_buffer) + if (rx_buffer && size) rx_buffer->pagecnt_bias++; break; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index ff479bf72144..5deee75bc436 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -241,11 +241,14 @@ out: void iavf_configure_queues(struct iavf_adapter *adapter) { struct virtchnl_vsi_queue_config_info *vqci; - struct virtchnl_queue_pair_info *vqpi; + int i, max_frame = adapter->vf_res->max_mtu; int pairs = adapter->num_active_queues; - int i, max_frame = IAVF_MAX_RXBUFFER; + struct virtchnl_queue_pair_info *vqpi; size_t len; + if (max_frame > IAVF_MAX_RXBUFFER || !max_frame) + max_frame = IAVF_MAX_RXBUFFER; + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 6a57b41ddb54..7794703c1359 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -498,7 +498,7 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) { - return !!vsi->xdp_prog; + return !!READ_ONCE(vsi->xdp_prog); } static inline void ice_set_ring_xdp(struct ice_ring *ring) diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 421fc707f80a..7f1bf71844bc 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -652,7 +652,8 @@ static int ice_lbtest_receive_frames(struct ice_ring *rx_ring) rx_desc = ICE_RX_DESC(rx_ring, i); if (!(rx_desc->wb.status_error0 & - cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS))) + (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) | + cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S))))) continue; rx_buf = &rx_ring->rx_buf[i]; @@ -2174,6 +2175,42 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks, return err; } +/** + * ice_set_phy_type_from_speed - set phy_types based on speeds + * and advertised modes + * @ks: ethtool link ksettings struct + * @phy_type_low: pointer to the lower part of phy_type + * @phy_type_high: pointer to the higher part of phy_type + * @adv_link_speed: targeted link speeds bitmap + */ +static void +ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks, + u64 *phy_type_low, u64 *phy_type_high, + u16 adv_link_speed) +{ + /* Handle 1000M speed in a special way because ice_update_phy_type + * enables all link modes, but having mixed copper and optical + * standards is not supported. + */ + adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseT_Full)) + *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T | + ICE_PHY_TYPE_LOW_1G_SGMII; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseKX_Full)) + *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseX_Full)) + *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX | + ICE_PHY_TYPE_LOW_1000BASE_LX; + + ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed); +} + /** * ice_set_link_ksettings - Set Speed and Duplex * @netdev: network interface device structure @@ -2310,7 +2347,8 @@ ice_set_link_ksettings(struct net_device *netdev, adv_link_speed = curr_link_speed; /* Convert the advertise link speeds to their corresponded PHY_TYPE */ - ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed); + ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high, + adv_link_speed); if (!autoneg_changed && adv_link_speed == curr_link_speed) { netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 52ac6cc08e83..ea8d868c8f30 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1265,6 +1265,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->vsi = vsi; ring->dev = dev; ring->count = vsi->num_tx_desc; + ring->txq_teid = ICE_INVAL_TEID; WRITE_ONCE(vsi->tx_rings[i], ring); } @@ -2667,6 +2668,8 @@ int ice_vsi_release(struct ice_vsi *vsi) } } + if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) + ice_clear_dflt_vsi(pf->first_sw); ice_fltr_remove_all(vsi); ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_vsi_delete(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 20c9d55f3adc..f193709c8efc 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2475,8 +2475,10 @@ free_qmap: for (i = 0; i < vsi->num_xdp_txq; i++) if (vsi->xdp_rings[i]) { - if (vsi->xdp_rings[i]->desc) + if (vsi->xdp_rings[i]->desc) { + synchronize_rcu(); ice_free_tx_ring(vsi->xdp_rings[i]); + } kfree_rcu(vsi->xdp_rings[i], rcu); vsi->xdp_rings[i] = NULL; } @@ -3402,7 +3404,7 @@ static int ice_init_pf(struct ice_pf *pf) pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); if (!pf->avail_rxqs) { - devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs); + bitmap_free(pf->avail_txqs); pf->avail_txqs = NULL; return -ENOMEM; } @@ -5201,10 +5203,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi) if (vsi->netdev) { ice_set_rx_mode(vsi->netdev); - err = ice_vsi_vlan_setup(vsi); + if (vsi->type != ICE_VSI_LB) { + err = ice_vsi_vlan_setup(vsi); - if (err) - return err; + if (err) + return err; + } } ice_vsi_cfg_dcb_rings(vsi); @@ -5269,9 +5273,10 @@ static int ice_up_complete(struct ice_vsi *vsi) netif_carrier_on(vsi->netdev); } - /* clear this now, and the first stats read will be used as baseline */ - vsi->stat_offsets_loaded = false; - + /* Perform an initial read of the statistics registers now to + * set the baseline so counters are ready when interface is up + */ + ice_update_eth_stats(vsi); ice_service_task_schedule(pf); return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 5ce8590cdb37..0155c45d9d7f 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -2590,7 +2590,7 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, else status = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vlan_id); - if (status) + if (status && status != -EEXIST) break; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 5134342ff70f..a980d337861d 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -2723,9 +2723,9 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - /* Skip queue if not enabled */ if (!test_bit(vf_q_id, vf->txq_ena)) - continue; + dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n", + vf_q_id, vsi->vsi_num); ice_fill_txq_meta(vsi, ring, &txq_meta); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 9f36f8d7a985..59963b901be0 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -36,8 +36,10 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) { ice_clean_tx_ring(vsi->tx_rings[q_idx]); - if (ice_is_xdp_ena_vsi(vsi)) + if (ice_is_xdp_ena_vsi(vsi)) { + synchronize_rcu(); ice_clean_tx_ring(vsi->xdp_rings[q_idx]); + } ice_clean_rx_ring(vsi->rx_rings[q_idx]); } @@ -369,6 +371,19 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) bool if_running, pool_present = !!pool; int ret = 0, pool_failure = 0; + if (qid >= vsi->num_rxq || qid >= vsi->num_txq) { + netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n"); + pool_failure = -EINVAL; + goto failure; + } + + if (!is_power_of_2(vsi->rx_rings[qid]->count) || + !is_power_of_2(vsi->tx_rings[qid]->count)) { + netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n"); + pool_failure = -EINVAL; + goto failure; + } + if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi); if (if_running) { @@ -391,6 +406,7 @@ xsk_pool_if_up: netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret); } +failure: if (pool_failure) { netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n", pool_present ? "en" : "dis", pool_failure); diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 7bda8c5edea5..e6d2800a8abc 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -664,6 +664,8 @@ struct igb_adapter { struct igb_mac_addr *mac_table; struct vf_mac_filter vf_macs; struct vf_mac_filter *vf_mac_list; + /* lock for VF resources */ + spinlock_t vfs_lock; }; /* flags controlling PTP/1588 function */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 28baf203459a..5e3b0a5843a8 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1413,6 +1413,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) *data = 1; return -1; } + wr32(E1000_IVAR_MISC, E1000_IVAR_VALID << 8); + wr32(E1000_EIMS, BIT(0)); } else if (adapter->flags & IGB_FLAG_HAS_MSI) { shared_int = false; if (request_irq(irq, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index f854d41c6c94..f24f1a8ec2fb 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3638,6 +3638,7 @@ static int igb_disable_sriov(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + unsigned long flags; /* reclaim resources allocated to VFs */ if (adapter->vf_data) { @@ -3650,12 +3651,13 @@ static int igb_disable_sriov(struct pci_dev *pdev) pci_disable_sriov(pdev); msleep(500); } - + spin_lock_irqsave(&adapter->vfs_lock, flags); kfree(adapter->vf_mac_list); adapter->vf_mac_list = NULL; kfree(adapter->vf_data); adapter->vf_data = NULL; adapter->vfs_allocated_count = 0; + spin_unlock_irqrestore(&adapter->vfs_lock, flags); wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); wrfl(); msleep(100); @@ -3815,7 +3817,9 @@ static void igb_remove(struct pci_dev *pdev) igb_release_hw_control(adapter); #ifdef CONFIG_PCI_IOV + rtnl_lock(); igb_disable_sriov(pdev); + rtnl_unlock(); #endif unregister_netdev(netdev); @@ -3975,6 +3979,9 @@ static int igb_sw_init(struct igb_adapter *adapter) spin_lock_init(&adapter->nfc_lock); spin_lock_init(&adapter->stats64_lock); + + /* init spinlock to avoid concurrency of VF resources */ + spin_lock_init(&adapter->vfs_lock); #ifdef CONFIG_PCI_IOV switch (hw->mac.type) { case e1000_82576: @@ -4813,8 +4820,11 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) while (i != tx_ring->next_to_use) { union e1000_adv_tx_desc *eop_desc, *tx_desc; - /* Free all the Tx ring sk_buffs */ - dev_kfree_skb_any(tx_buffer->skb); + /* Free all the Tx ring sk_buffs or xdp frames */ + if (tx_buffer->type == IGB_TYPE_SKB) + dev_kfree_skb_any(tx_buffer->skb); + else + xdp_return_frame(tx_buffer->xdpf); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -5499,7 +5509,8 @@ static void igb_watchdog_task(struct work_struct *work) break; } - if (adapter->link_speed != SPEED_1000) + if (adapter->link_speed != SPEED_1000 || + !hw->phy.ops.read_reg) goto no_wait; /* wait for Remote receiver status OK */ @@ -7405,7 +7416,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; - u32 reg, msgbuf[3]; + u32 reg, msgbuf[3] = {}; u8 *addr = (u8 *)(&msgbuf[1]); /* process all the same items cleared in a function level reset */ @@ -7848,8 +7859,10 @@ unlock: static void igb_msg_task(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + unsigned long flags; u32 vf; + spin_lock_irqsave(&adapter->vfs_lock, flags); for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { /* process any reset requests */ if (!igb_check_for_rst(hw, vf)) @@ -7863,6 +7876,7 @@ static void igb_msg_task(struct igb_adapter *adapter) if (!igb_check_for_ack(hw, vf)) igb_rcv_ack_from_vf(adapter, vf); } + spin_unlock_irqrestore(&adapter->vfs_lock, flags); } /** @@ -9825,11 +9839,10 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) struct e1000_hw *hw = &adapter->hw; u32 dmac_thr; u16 hwm; + u32 reg; if (hw->mac.type > e1000_82580) { if (adapter->flags & IGB_FLAG_DMAC) { - u32 reg; - /* force threshold to 0. */ wr32(E1000_DMCTXTH, 0); @@ -9862,7 +9875,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) /* Disable BMC-to-OS Watchdog Enable */ if (hw->mac.type != e1000_i354) reg &= ~E1000_DMACR_DC_BMC2OSW_EN; - wr32(E1000_DMACR, reg); /* no lower threshold to disable @@ -9879,12 +9891,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) */ wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); + } - /* make low power state decision controlled - * by DMA coal - */ + if (hw->mac.type >= e1000_i210 || + (adapter->flags & IGB_FLAG_DMAC)) { reg = rd32(E1000_PCIEMISC); - reg &= ~E1000_PCIEMISC_LX_DECISION; + reg |= E1000_PCIEMISC_LX_DECISION; wr32(E1000_PCIEMISC, reg); } /* endif adapter->dmac is not disabled */ } else if (hw->mac.type == e1000_82580) { diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index fd37d2c203af..7f3523f0d196 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -187,15 +187,7 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw) igc_check_for_copper_link(hw); - /* Verify phy id and set remaining function pointers */ - switch (phy->id) { - case I225_I_PHY_ID: - phy->type = igc_phy_i225; - break; - default: - ret_val = -IGC_ERR_PHY; - goto out; - } + phy->type = igc_phy_i225; out: return ret_val; diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 55dae7c4703f..7e29f41f70e0 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -22,6 +22,7 @@ #define IGC_DEV_ID_I220_V 0x15F7 #define IGC_DEV_ID_I225_K 0x3100 #define IGC_DEV_ID_I225_K2 0x3101 +#define IGC_DEV_ID_I226_K 0x3102 #define IGC_DEV_ID_I225_LMVP 0x5502 #define IGC_DEV_ID_I225_IT 0x0D9F #define IGC_DEV_ID_I226_LM 0x125B diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c index 553d6bc78e6b..624236a4202e 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.c +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) { u32 swfw_sync; - while (igc_get_hw_semaphore_i225(hw)) - ; /* Empty */ + /* Releasing the resource requires first getting the HW semaphore. + * If we fail to get the semaphore, there is nothing we can do, + * except log an error and quit. We are not allowed to hang here + * indefinitely, as it may cause denial of service or system crash. + */ + if (igc_get_hw_semaphore_i225(hw)) { + hw_dbg("Failed to release SW_FW_SYNC.\n"); + return; + } swfw_sync = rd32(IGC_SW_FW_SYNC); swfw_sync &= ~mask; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 61cebb7df6bc..e7ffe63925fd 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -4177,20 +4178,12 @@ bool igc_has_link(struct igc_adapter *adapter) * false until the igc_check_for_link establishes link * for copper adapters ONLY */ - switch (hw->phy.media_type) { - case igc_media_type_copper: - if (!hw->mac.get_link_status) - return true; - hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; - break; - default: - case igc_media_type_unknown: - break; - } + if (!hw->mac.get_link_status) + return true; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; - if (hw->mac.type == igc_i225 && - hw->phy.id == I225_I_PHY_ID) { + if (hw->mac.type == igc_i225) { if (!netif_carrier_ok(adapter->netdev)) { adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { @@ -4940,6 +4933,9 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg) u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); u32 value = 0; + if (IGC_REMOVED(hw_addr)) + return ~value; + value = readl(&hw_addr[reg]); /* reads should not return all F's */ @@ -5049,6 +5045,10 @@ static int igc_probe(struct pci_dev *pdev, pci_enable_pcie_error_reporting(pdev); + err = pci_enable_ptm(pdev, NULL); + if (err < 0) + dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); + pci_set_master(pdev); err = -ENOMEM; diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index e380b7a3ea63..3a103406eadb 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -249,8 +249,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw) return ret_val; } - if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && - hw->phy.id == I225_I_PHY_ID) { + if (phy->autoneg_mask & ADVERTISE_2500_FULL) { /* Read the MULTI GBT AN Control Register - reg 7.32 */ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << MMD_DEVADDR_SHIFT) | @@ -390,8 +389,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw) ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); - if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && - hw->phy.id == I225_I_PHY_ID) + if (phy->autoneg_mask & ADVERTISE_2500_FULL) ret_val = phy->ops.write_reg(hw, (STANDARD_AN_REG_MASK << MMD_DEVADDR_SHIFT) | @@ -583,7 +581,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data) * the lower time out */ for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { - usleep_range(500, 1000); + udelay(50); mdic = rd32(IGC_MDIC); if (mdic & IGC_MDIC_READY) break; @@ -640,7 +638,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data) * the lower time out */ for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { - usleep_range(500, 1000); + udelay(50); mdic = rd32(IGC_MDIC); if (mdic & IGC_MDIC_READY) break; diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index b52dd9d737e8..a273e1c33b3f 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -252,7 +252,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg); #define wr32(reg, val) \ do { \ u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ - writel((val), &hw_addr[(reg)]); \ + if (!IGC_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ } while (0) #define rd32(reg) (igc_rd32(hw, reg)) @@ -264,4 +265,6 @@ do { \ #define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2))) +#define IGC_REMOVED(h) unlikely(!(h)) + #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index de0fc6ecf491..27c6f911737b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -769,6 +769,7 @@ struct ixgbe_adapter { #ifdef CONFIG_IXGBE_IPSEC struct ixgbe_ipsec *ipsec; #endif /* CONFIG_IXGBE_IPSEC */ + spinlock_t vfs_lock; }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 54d47265a7ac..319620856cba 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -903,7 +903,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) /* Tx IPsec offload doesn't seem to work on this * device, so block these requests for now. */ - if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) { + sam->flags = sam->flags & ~XFRM_OFFLOAD_IPV6; + if (sam->flags != XFRM_OFFLOAD_INBOUND) { err = -EOPNOTSUPP; goto err_out; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a3a02e2f92f6..b5b8be4672aa 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6403,6 +6403,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, /* n-tuple support exists, always init our spinlock */ spin_lock_init(&adapter->fdir_perfect_lock); + /* init spinlock to avoid concurrency of VF resources */ + spin_lock_init(&adapter->vfs_lock); + #ifdef CONFIG_IXGBE_DCB ixgbe_init_dcb(adapter); #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 22a874eee2e8..8b7f30035571 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -1211,7 +1211,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) struct cyclecounter cc; unsigned long flags; u32 incval = 0; - u32 tsauxc = 0; u32 fuse0 = 0; /* For some of the boards below this mask is technically incorrect. @@ -1246,18 +1245,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) case ixgbe_mac_x550em_a: case ixgbe_mac_X550: cc.read = ixgbe_ptp_read_X550; - - /* enable SYSTIME counter */ - IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0); - IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); - IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); - tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, - tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME); - IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS); - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC); - - IXGBE_WRITE_FLUSH(hw); break; case ixgbe_mac_X540: cc.read = ixgbe_ptp_read_82599; @@ -1289,6 +1276,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) spin_unlock_irqrestore(&adapter->tmreg_lock, flags); } +/** + * ixgbe_ptp_init_systime - Initialize SYSTIME registers + * @adapter: the ixgbe private board structure + * + * Initialize and start the SYSTIME registers. + */ +static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 tsauxc; + + switch (hw->mac.type) { + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + case ixgbe_mac_X550: + tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); + + /* Reset SYSTIME registers to 0 */ + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); + + /* Reset interrupt settings */ + IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC); + + /* Activate the SYSTIME counter */ + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, + tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME); + break; + case ixgbe_mac_X540: + case ixgbe_mac_82599EB: + /* Reset SYSTIME registers to 0 */ + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); + break; + default: + /* Other devices aren't supported */ + return; + }; + + IXGBE_WRITE_FLUSH(hw); +} + /** * ixgbe_ptp_reset * @adapter: the ixgbe private board structure @@ -1315,6 +1346,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) ixgbe_ptp_start_cyclecounter(adapter); + ixgbe_ptp_init_systime(adapter); + spin_lock_irqsave(&adapter->tmreg_lock, flags); timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ktime_to_ns(ktime_get_real())); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 214a38de3f41..0078ae592616 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -204,10 +204,13 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs) int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { unsigned int num_vfs = adapter->num_vfs, vf; + unsigned long flags; int rss; + spin_lock_irqsave(&adapter->vfs_lock, flags); /* set num VFs to 0 to prevent access to vfinfo */ adapter->num_vfs = 0; + spin_unlock_irqrestore(&adapter->vfs_lock, flags); /* put the reference to all of the vf devices */ for (vf = 0; vf < num_vfs; ++vf) { @@ -1157,9 +1160,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, switch (xcast_mode) { case IXGBEVF_XCAST_MODE_NONE: - disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + disable = IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; - enable = 0; + enable = IXGBE_VMOLR_BAM; break; case IXGBEVF_XCAST_MODE_MULTI: disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; @@ -1181,9 +1184,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, return -EPERM; } - disable = 0; + disable = IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | - IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE; break; default: return -EOPNOTSUPP; @@ -1305,8 +1308,10 @@ static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) void ixgbe_msg_task(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; + unsigned long flags; u32 vf; + spin_lock_irqsave(&adapter->vfs_lock, flags); for (vf = 0; vf < adapter->num_vfs; vf++) { /* process any reset requests */ if (!ixgbe_check_for_rst(hw, vf)) @@ -1320,6 +1325,7 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter) if (!ixgbe_check_for_ack(hw, vf)) ixgbe_rcv_ack_from_vf(adapter, vf); } + spin_unlock_irqrestore(&adapter->vfs_lock, flags); } void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 2d6ac61d7a3e..4510a84514fa 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4878,6 +4878,8 @@ static struct pci_driver ixgbevf_driver = { **/ static int __init ixgbevf_init_module(void) { + int err; + pr_info("%s\n", ixgbevf_driver_string); pr_info("%s\n", ixgbevf_copyright); ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name); @@ -4886,7 +4888,13 @@ static int __init ixgbevf_init_module(void) return -ENOMEM; } - return pci_register_driver(&ixgbevf_driver); + err = pci_register_driver(&ixgbevf_driver); + if (err) { + destroy_workqueue(ixgbevf_wq); + return err; + } + + return 0; } module_init(ixgbevf_init_module); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 2d0c52f7106b..5ea626b1e578 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -466,7 +466,6 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { - dev_kfree_skb_any(skb); netdev_err(dev, "tx ring full\n"); netif_tx_stop_queue(txq); return NETDEV_TX_BUSY; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 90e6111ce534..735b76effc49 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2471,6 +2471,7 @@ out_free: for (i = 0; i < mp->rxq_count; i++) rxq_deinit(mp->rxq + i); out: + napi_disable(&mp->napi); free_irq(dev->irq, dev); return err; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 74e266c0b8e1..f5567d485e91 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4140,7 +4140,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp) /* Use the cpu associated to the rxq when it is online, in all * the other cases, use the cpu 0 which can't be offline. */ - if (cpu_online(pp->rxq_def)) + if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def)) elected_cpu = pp->rxq_def; max_cpu = num_present_cpus(); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index d825eb021b22..e999ac2de34e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1434,6 +1434,7 @@ u32 mvpp2_read(struct mvpp2 *priv, u32 offset); void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name); void mvpp2_dbgfs_cleanup(struct mvpp2 *priv); +void mvpp2_dbgfs_exit(void); #ifdef CONFIG_MVPP2_PTP int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c index 4a3baa7e0142..75e83ea2a926 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -691,6 +691,13 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent, return 0; } +static struct dentry *mvpp2_root; + +void mvpp2_dbgfs_exit(void) +{ + debugfs_remove(mvpp2_root); +} + void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) { debugfs_remove_recursive(priv->dbgfs_dir); @@ -700,10 +707,9 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name) { - struct dentry *mvpp2_dir, *mvpp2_root; + struct dentry *mvpp2_dir; int ret, i; - mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL); if (!mvpp2_root) mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 542cd6f2c9bd..68c5ed8716c8 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -7155,7 +7155,18 @@ static struct platform_driver mvpp2_driver = { }, }; -module_platform_driver(mvpp2_driver); +static int __init mvpp2_driver_init(void) +{ + return platform_driver_register(&mvpp2_driver); +} +module_init(mvpp2_driver_init); + +static void __exit mvpp2_driver_exit(void) +{ + platform_driver_unregister(&mvpp2_driver); + mvpp2_dbgfs_exit(); +} +module_exit(mvpp2_driver_exit); MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); MODULE_AUTHOR("Marcin Wojtas "); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c index 2a13c318048c..59a3ea02b8ad 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c @@ -771,6 +771,7 @@ tx_done: int prestera_rxtx_switch_init(struct prestera_switch *sw) { struct prestera_rxtx *rxtx; + int err; rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL); if (!rxtx) @@ -778,7 +779,11 @@ int prestera_rxtx_switch_init(struct prestera_switch *sw) sw->rxtx = rxtx; - return prestera_sdma_switch_init(sw); + err = prestera_sdma_switch_init(sw); + if (err) + kfree(rxtx); + + return err; } void prestera_rxtx_switch_fini(struct prestera_switch *sw) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 7d7dc0754a3a..217dc67c48fa 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -806,6 +806,17 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd, rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); } +static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask) +{ + unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH); + unsigned long data; + + data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN, + get_order(size)); + + return (void *)data; +} + /* the qdma core needs scratch memory to be setup */ static int mtk_init_fq_dma(struct mtk_eth *eth) { @@ -1303,7 +1314,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, goto release_desc; /* alloc new buffer */ - new_data = napi_alloc_frag(ring->frag_size); + if (ring->frag_size <= PAGE_SIZE) + new_data = napi_alloc_frag(ring->frag_size); + else + new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC); if (unlikely(!new_data)) { netdev->stats.rx_dropped++; goto release_desc; @@ -1700,7 +1714,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) return -ENOMEM; for (i = 0; i < rx_dma_size; i++) { - ring->data[i] = netdev_alloc_frag(ring->frag_size); + if (ring->frag_size <= PAGE_SIZE) + ring->data[i] = netdev_alloc_frag(ring->frag_size); + else + ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL); if (!ring->data[i]) return -ENOMEM; } @@ -1966,6 +1983,9 @@ static int mtk_hwlro_get_fdir_entry(struct net_device *dev, struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip)) + return -EINVAL; + /* only tcp dst ipv4 is meaningful, others are meaningless */ fsp->flow_type = TCP_V4_FLOW; fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]); @@ -2279,7 +2299,10 @@ static int mtk_open(struct net_device *dev) int err = mtk_start_dma(eth); if (err) + if (err) { + phylink_disconnect_phy(mac->phylink); return err; + } mtk_gdm_config(eth, MTK_GDMA_TO_PDMA); diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c index 32d83421226a..5897940a418b 100644 --- a/drivers/net/ethernet/mediatek/mtk_sgmii.c +++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c @@ -26,6 +26,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3) break; ss->regmap[i] = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(ss->regmap[i])) return PTR_ERR(ss->regmap[i]); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 01275c376721..962851000ace 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -2099,7 +2099,7 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev, en_err(priv, "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", i, offset, ee->len - i, ret); - return 0; + return ret; } i += ret; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 427e7a31862c..d7f2890c254f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -697,7 +697,8 @@ static int mlx4_create_zones(struct mlx4_dev *dev, err = mlx4_bitmap_init(*bitmap + k, 1, MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0, 0); - mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0); + if (!err) + mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0); } if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 94426d29025e..39c17e903915 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -959,6 +959,7 @@ static void cmd_work_handler(struct work_struct *work) cmd_ent_get(ent); set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); + cmd_ent_get(ent); /* for the _real_ FW event on completion */ /* Skip sending command to fw if internal error */ if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) { u8 status = 0; @@ -972,7 +973,6 @@ static void cmd_work_handler(struct work_struct *work) return; } - cmd_ent_get(ent); /* for the _real_ FW event on completion */ /* ring doorbell after the descriptor is valid */ mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); wmb(); @@ -1422,8 +1422,8 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, return -EFAULT; err = sscanf(outlen_str, "%d", &outlen); - if (err < 0) - return err; + if (err != 1) + return -EINVAL; ptr = kzalloc(outlen, GFP_KERNEL); if (!ptr) @@ -1586,8 +1586,8 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force cmd_ent_put(ent); /* timeout work was canceled */ if (!forced || /* Real FW completion */ - pci_channel_offline(dev->pdev) || /* FW is inaccessible */ - dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) + mlx5_cmd_is_down(dev) || /* No real FW completion is expected */ + !opcode_allowed(cmd, ent->op)) cmd_ent_put(ent); ent->ts2 = ktime_get_ns(); @@ -1687,12 +1687,17 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev) struct mlx5_cmd *cmd = &dev->cmd; int i; - for (i = 0; i < cmd->max_reg_cmds; i++) - while (down_trylock(&cmd->sem)) + for (i = 0; i < cmd->max_reg_cmds; i++) { + while (down_trylock(&cmd->sem)) { mlx5_cmd_trigger_completions(dev); + cond_resched(); + } + } - while (down_trylock(&cmd->pages_sem)) + while (down_trylock(&cmd->pages_sem)) { mlx5_cmd_trigger_completions(dev); + cond_resched(); + } /* Unlock cmdif */ up(&cmd->pages_sem); @@ -1853,7 +1858,7 @@ void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, ctx->dev = dev; /* Starts at 1 to avoid doing wake_up if we are not cleaning up */ atomic_set(&ctx->num_inflight, 1); - init_waitqueue_head(&ctx->wait); + init_completion(&ctx->inflight_done); } EXPORT_SYMBOL(mlx5_cmd_init_async_ctx); @@ -1867,8 +1872,8 @@ EXPORT_SYMBOL(mlx5_cmd_init_async_ctx); */ void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx) { - atomic_dec(&ctx->num_inflight); - wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0); + if (!atomic_dec_and_test(&ctx->num_inflight)) + wait_for_completion(&ctx->inflight_done); } EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx); @@ -1879,7 +1884,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work) work->user_callback(status, work); if (atomic_dec_and_test(&ctx->num_inflight)) - wake_up(&ctx->wait); + complete(&ctx->inflight_done); } int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, @@ -1895,7 +1900,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, ret = cmd_exec(ctx->dev, in, in_size, out, out_size, mlx5_cmd_exec_cb_handler, work, false); if (ret && atomic_dec_and_test(&ctx->num_inflight)) - wake_up(&ctx->wait); + complete(&ctx->inflight_done); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 857be86b4a11..f800e1ca5ba6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -638,7 +638,7 @@ static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer, trace_timestamp = (timestamp_event.timestamp & MASK_52_7) | (str_frmt->timestamp & MASK_6_0); else - trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) | + trace_timestamp = ((timestamp_event.timestamp - 1) & MASK_52_7) | (str_frmt->timestamp & MASK_6_0); mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp); @@ -675,6 +675,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) if (!tracer->owner) return; + if (unlikely(!tracer->str_db.loaded)) + goto arm; + block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE; start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE; @@ -732,6 +735,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) &tmp_trace_block[TRACES_PER_BLOCK - 1]); } +arm: mlx5_fw_tracer_arm(dev); } @@ -1138,8 +1142,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void queue_work(tracer->work_queue, &tracer->ownership_change_work); break; case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE: - if (likely(tracer->str_db.loaded)) - queue_work(tracer->work_queue, &tracer->handle_traces_work); + queue_work(tracer->work_queue, &tracer->handle_traces_work); break; default: mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c index ed4fb79b4db7..75b6060f7a9a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c @@ -31,6 +31,7 @@ static const char *const mlx5_rsc_sgmt_name[] = { struct mlx5_rsc_dump { u32 pdn; struct mlx5_core_mkey mkey; + u32 number_of_menu_items; u16 fw_segment_type[MLX5_SGMT_TYPE_NUM]; }; @@ -50,21 +51,37 @@ static int mlx5_rsc_dump_sgmt_get_by_name(char *name) return -EINVAL; } -static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page) +#define MLX5_RSC_DUMP_MENU_HEADER_SIZE (MLX5_ST_SZ_BYTES(resource_dump_info_segment) + \ + MLX5_ST_SZ_BYTES(resource_dump_command_segment) + \ + MLX5_ST_SZ_BYTES(resource_dump_menu_segment)) + +static int mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page, + int read_size, int start_idx) { void *data = page_address(page); enum mlx5_sgmt_type sgmt_idx; int num_of_items; char *sgmt_name; void *member; + int size = 0; void *menu; int i; - menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu); - num_of_items = MLX5_GET(resource_dump_menu_segment, menu, num_of_records); + if (!start_idx) { + menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu); + rsc_dump->number_of_menu_items = MLX5_GET(resource_dump_menu_segment, menu, + num_of_records); + size = MLX5_RSC_DUMP_MENU_HEADER_SIZE; + data += size; + } + num_of_items = rsc_dump->number_of_menu_items; - for (i = 0; i < num_of_items; i++) { - member = MLX5_ADDR_OF(resource_dump_menu_segment, menu, record[i]); + for (i = 0; start_idx + i < num_of_items; i++) { + size += MLX5_ST_SZ_BYTES(resource_dump_menu_record); + if (size >= read_size) + return start_idx + i; + + member = data + MLX5_ST_SZ_BYTES(resource_dump_menu_record) * i; sgmt_name = MLX5_ADDR_OF(resource_dump_menu_record, member, segment_name); sgmt_idx = mlx5_rsc_dump_sgmt_get_by_name(sgmt_name); if (sgmt_idx == -EINVAL) @@ -72,6 +89,7 @@ static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct rsc_dump->fw_segment_type[sgmt_idx] = MLX5_GET(resource_dump_menu_record, member, segment_type); } + return 0; } static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd, @@ -168,6 +186,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev) struct mlx5_rsc_dump_cmd *cmd = NULL; struct mlx5_rsc_key key = {}; struct page *page; + int start_idx = 0; int size; int err; @@ -189,7 +208,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev) if (err < 0) goto destroy_cmd; - mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page); + start_idx = mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page, size, start_idx); } while (err > 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 73060b30fece..b0229ceae234 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -101,7 +101,7 @@ struct page_pool; #define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1)) #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) #define MLX5E_MAX_RQ_NUM_MTTS \ - ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ + (ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024)) #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \ (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index 673f1c82d381..c9d5d8d93994 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -309,8 +309,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (err) return err; - err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz, - xoff, &port_buffer, &update_buffer); + err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, xoff, + port_buff_cell_sz, &port_buffer, &update_buffer); if (err) return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 0469f53dfb99..511c43146562 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -1629,6 +1629,8 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg) static void mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) { + struct mlx5e_priv *priv; + if (!refcount_dec_and_test(&ft->refcount)) return; @@ -1638,6 +1640,8 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) rhashtable_free_and_destroy(&ft->ct_entries_ht, mlx5_tc_ct_flush_ft_entry, ct_priv); + priv = netdev_priv(ct_priv->netdev); + flush_workqueue(priv->wq); mlx5_tc_ct_free_pre_ct_tables(ft); mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id); kfree(ft); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 26f7fab109d9..d08bd22dc569 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -113,7 +113,6 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) struct xfrm_replay_state_esn *replay_esn; u32 seq_bottom = 0; u8 overlap; - u32 *esn; if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) { sa_entry->esn_state.trigger = 0; @@ -128,11 +127,9 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x, htonl(seq_bottom)); - esn = &sa_entry->esn_state.esn; sa_entry->esn_state.trigger = 1; if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) { - ++(*esn); sa_entry->esn_state.overlap = 0; return true; } else if (unlikely(!overlap && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index 1b392696280d..f824d781b99e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -15,7 +15,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, struct mlx5_core_dev *mdev = priv->mdev; int err; - if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info))) + if (!mlx5e_ktls_type_check(mdev, crypto_info)) return -EOPNOTSUPP; if (direction == TLS_OFFLOAD_CTX_DIR_TX) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index d06532d0baa4..634777fd7db9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -231,8 +231,7 @@ mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx, struct mlx5e_ktls_offload_context_rx **ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); - BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) > - TLS_OFFLOAD_CONTEXT_SIZE_RX); + BUILD_BUG_ON(sizeof(priv_rx) > TLS_DRIVER_STATE_SIZE_RX); *ctx = priv_rx; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index b140e13fdcc8..679747db3110 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -63,8 +63,7 @@ mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx, struct mlx5e_ktls_offload_context_tx **ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); - BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_tx *) > - TLS_OFFLOAD_CONTEXT_SIZE_TX); + BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX); *ctx = priv_tx; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index f23c67575073..7c0ae7c38eef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1210,6 +1210,16 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv) if (err) return err; + if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) { + /* + * Align the driver state with the register state. + * Temporary state change is required to enable the app list reset. + */ + priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP; + mlx5e_dcbnl_delete_app(priv); + priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP; + } + mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params, priv->dcbx_dp.trust_state); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 16e98ac47624..cfc3bfcb04a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4009,6 +4009,13 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, } } + if (params->xdp_prog) { + if (features & NETIF_F_LRO) { + netdev_warn(netdev, "LRO is incompatible with XDP\n"); + features &= ~NETIF_F_LRO; + } + } + if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { features &= ~NETIF_F_RXHASH; if (netdev->features & NETIF_F_RXHASH) @@ -4569,6 +4576,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) unlock: mutex_unlock(&priv->state_lock); + + /* Need to fix some features. */ + if (!err) + netdev_update_features(netdev); + return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 304435e56117..b991f03c7e99 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -706,6 +706,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev) params->num_tc = 1; params->tunneled_offload_en = false; + if (rep->vport != MLX5_VPORT_UPLINK) + params->vlan_strip_disable = true; mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 78f6a6f0a7e0..ff4f10d0f090 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -536,7 +536,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env) u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; struct mlx5_core_dev *mdev = priv->mdev; - if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) + if (!mlx5e_stats_grp_vnic_env_num_stats(priv)) return; MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 1ad1692a5b2d..f1bf7f6758d0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2396,6 +2396,17 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, match.key->vlan_priority); *match_level = MLX5_MATCH_L2; + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) && + match.mask->vlan_eth_type && + MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, + ft_field_support.outer_second_vid, + fs_type)) { + MLX5_SET(fte_match_set_misc, misc_c, + outer_second_cvlan_tag, 1); + spec->match_criteria_enable |= + MLX5_MATCH_MISC_PARAMETERS; + } } } else if (*match_level != MLX5_MATCH_NONE) { /* cvlan_tag enabled in match criteria and diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c index 1cbb330b9f42..132ea9997676 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c @@ -30,9 +30,9 @@ mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act, sizeof(dest->vport.num), hash); hash = jhash((const void *)&dest->vport.vhca_id, sizeof(dest->vport.num), hash); - if (dest->vport.pkt_reformat) - hash = jhash(dest->vport.pkt_reformat, - sizeof(*dest->vport.pkt_reformat), + if (flow_act->pkt_reformat) + hash = jhash(flow_act->pkt_reformat, + sizeof(*flow_act->pkt_reformat), hash); return hash; } @@ -53,9 +53,11 @@ mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1, if (ret) return ret; - return dest1->vport.pkt_reformat && dest2->vport.pkt_reformat ? - memcmp(dest1->vport.pkt_reformat, dest2->vport.pkt_reformat, - sizeof(*dest1->vport.pkt_reformat)) : 0; + if (flow_act1->pkt_reformat && flow_act2->pkt_reformat) + return memcmp(flow_act1->pkt_reformat, flow_act2->pkt_reformat, + sizeof(*flow_act1->pkt_reformat)); + + return !(flow_act1->pkt_reformat == flow_act2->pkt_reformat); } static int @@ -306,6 +308,8 @@ revert_changes: for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) { struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl; + attr->dests[curr_dest].termtbl = NULL; + /* search for the destination associated with the * current term table */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 55772f0cbbf8..4bdcceffe9d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1520,9 +1520,22 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte, return NULL; } -static bool check_conflicting_actions(u32 action1, u32 action2) +static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0, + const struct mlx5_fs_vlan *vlan1) { - u32 xored_actions = action1 ^ action2; + return vlan0->ethtype != vlan1->ethtype || + vlan0->vid != vlan1->vid || + vlan0->prio != vlan1->prio; +} + +static bool check_conflicting_actions(const struct mlx5_flow_act *act1, + const struct mlx5_flow_act *act2) +{ + u32 action1 = act1->action; + u32 action2 = act2->action; + u32 xored_actions; + + xored_actions = action1 ^ action2; /* if one rule only wants to count, it's ok */ if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT || @@ -1539,6 +1552,22 @@ static bool check_conflicting_actions(u32 action1, u32 action2) MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2)) return true; + if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT && + act1->pkt_reformat != act2->pkt_reformat) + return true; + + if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && + act1->modify_hdr != act2->modify_hdr) + return true; + + if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH && + check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0])) + return true; + + if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 && + check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1])) + return true; + return false; } @@ -1546,7 +1575,7 @@ static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_context *flow_context, const struct mlx5_flow_act *flow_act) { - if (check_conflicting_actions(flow_act->action, fte->action.action)) { + if (check_conflicting_actions(flow_act, &fte->action)) { mlx5_core_warn(get_dev(&fte->node), "Found two FTEs with conflicting actions\n"); return -EEXIST; @@ -2024,16 +2053,16 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) down_write_ref_node(&fte->node, false); for (i = handle->num_rules - 1; i >= 0; i--) tree_remove_node(&handle->rule[i]->node, true); - if (fte->dests_size) { - if (fte->modify_mask) - modify_fte(fte); - up_write_ref_node(&fte->node, false); - } else if (list_empty(&fte->node.children)) { + if (list_empty(&fte->node.children)) { del_hw_fte(&fte->node); /* Avoid double call to del_hw_fte */ fte->node.del_hw_func = NULL; up_write_ref_node(&fte->node, false); tree_put_node(&fte->node, false); + } else if (fte->dests_size) { + if (fte->modify_mask) + modify_fte(fte); + up_write_ref_node(&fte->node, false); } else { up_write_ref_node(&fte->node, false); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 9b472e793ee3..e29db4c39b37 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -134,14 +134,19 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev) del_timer_sync(&fw_reset->timer); } -static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health) +static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health) { struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + if (!test_and_clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) { + mlx5_core_warn(dev, "Reset request was already cleared\n"); + return -EALREADY; + } + mlx5_stop_sync_reset_poll(dev); - clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags); if (poll_health) mlx5_start_health_poll(dev); + return 0; } #define MLX5_RESET_POLL_INTERVAL (HZ / 10) @@ -185,13 +190,17 @@ static int mlx5_fw_reset_set_reset_sync_nack(struct mlx5_core_dev *dev) return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 2, false); } -static void mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev) +static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev) { struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + if (test_and_set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) { + mlx5_core_warn(dev, "Reset request was already set\n"); + return -EALREADY; + } mlx5_stop_health_poll(dev, true); - set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags); mlx5_start_sync_reset_poll(dev); + return 0; } static void mlx5_fw_live_patch_event(struct work_struct *work) @@ -225,7 +234,9 @@ static void mlx5_sync_reset_request_event(struct work_struct *work) err ? "Failed" : "Sent"); return; } - mlx5_sync_reset_set_reset_requested(dev); + if (mlx5_sync_reset_set_reset_requested(dev)) + return; + err = mlx5_fw_reset_set_reset_sync_ack(dev); if (err) mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack Failed. Error code: %d\n", err); @@ -325,7 +336,8 @@ static void mlx5_sync_reset_now_event(struct work_struct *work) struct mlx5_core_dev *dev = fw_reset->dev; int err; - mlx5_sync_reset_clear_reset_requested(dev, false); + if (mlx5_sync_reset_clear_reset_requested(dev, false)) + return; mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n"); @@ -354,10 +366,8 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work) reset_abort_work); struct mlx5_core_dev *dev = fw_reset->dev; - if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) + if (mlx5_sync_reset_clear_reset_requested(dev, true)) return; - - mlx5_sync_reset_clear_reset_requested(dev, true); mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n"); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 11cc3ea5010a..9fb3e5ec1da6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -274,7 +274,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; - struct lag_tracker tracker; + struct lag_tracker tracker = { }; bool do_bond, roce_lag; int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index 839a01da110f..8ff16318e32d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -122,7 +122,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) { struct mlx5_mpfs *mpfs = dev->priv.mpfs; - if (!MLX5_ESWITCH_MANAGER(dev)) + if (!mpfs) return; WARN_ON(!hlist_empty(mpfs->hash)); @@ -137,7 +137,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) int err = 0; u32 index; - if (!MLX5_ESWITCH_MANAGER(dev)) + if (!mpfs) return 0; mutex_lock(&mpfs->lock); @@ -185,7 +185,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) int err = 0; u32 index; - if (!MLX5_ESWITCH_MANAGER(dev)) + if (!mpfs) return 0; mutex_lock(&mpfs->lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c index b599b6beb5b9..6a4b997c258a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c @@ -9,7 +9,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, struct mlx5dr_matcher *last_matcher = NULL; struct mlx5dr_htbl_connect_info info; struct mlx5dr_ste_htbl *last_htbl; - int ret; + int ret = -EOPNOTSUPP; if (action && action->action_type != DR_ACTION_TYP_FT) return -EOPNOTSUPP; @@ -68,6 +68,9 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, } } + if (ret) + goto out; + /* Release old action */ if (tbl->miss_action) refcount_dec(&tbl->miss_action->refcount); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 96c39a17d026..b227fa9ada46 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -43,11 +43,10 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns, err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action); if (err && action) { err = mlx5dr_action_destroy(action); - if (err) { - action = NULL; - mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n", - err); - } + if (err) + mlx5_core_err(ns->dev, + "Failed to destroy action (%d)\n", err); + action = NULL; } ft->fs_dr_table.miss_action = action; if (old_miss_action) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c index 939b692ffc33..ce843ea91464 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -650,6 +650,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client, return 0; errout: + mutex_destroy(&mlxsw_i2c->cmd.lock); i2c_set_clientdata(client, NULL); return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h index a68d931090dd..15c8d4de8350 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h @@ -8,8 +8,8 @@ #include "spectrum.h" enum mlxsw_sp_counter_sub_pool_id { - MLXSW_SP_COUNTER_SUB_POOL_FLOW, MLXSW_SP_COUNTER_SUB_POOL_RIF, + MLXSW_SP_COUNTER_SUB_POOL_FLOW, }; int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index 5f92b1691360..aff6d4f35cd2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -168,8 +168,6 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev, static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev, struct dcb_app *app) { - int prio; - if (app->priority >= IEEE_8021QAZ_MAX_TCS) { netdev_err(dev, "APP entry with priority value %u is invalid\n", app->priority); @@ -183,17 +181,6 @@ static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev, app->protocol); return -EINVAL; } - - /* Warn about any DSCP APP entries with the same PID. */ - prio = fls(dcb_ieee_getapp_mask(dev, app)); - if (prio--) { - if (prio < app->priority) - netdev_warn(dev, "Choosing priority %d for DSCP %d in favor of previously-active value of %d\n", - app->priority, app->protocol, prio); - else if (prio > app->priority) - netdev_warn(dev, "Ignoring new priority %d for DSCP %d in favor of current value of %d\n", - app->priority, app->protocol, prio); - } break; case IEEE_8021QAZ_APP_SEL_ETHERTYPE: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 53128382fc2e..d2887ae508bb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -4003,7 +4003,7 @@ static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp, { const struct fib_nh *nh = fib_info_nh(fi, 0); - return nh->fib_nh_scope == RT_SCOPE_LINK || + return nh->fib_nh_gw_family || mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL); } @@ -8038,13 +8038,14 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp) static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) { struct net *net = mlxsw_sp_net(mlxsw_sp); - bool usp = net->ipv4.sysctl_ip_fwd_update_priority; char rgcr_pl[MLXSW_REG_RGCR_LEN]; u64 max_rifs; + bool usp; if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS)) return -EIO; max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); + usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority); mlxsw_reg_rgcr_pack(rgcr_pl, true, true); mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 433f14ade464..02ba6aa01105 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -709,7 +709,7 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = { .trap = MLXSW_SP_TRAP_CONTROL(LLDP, LLDP, TRAP), .listeners_arr = { MLXSW_RXL(mlxsw_sp_rx_ptp_listener, LLDP, TRAP_TO_CPU, - false, SP_LLDP, DISCARD), + true, SP_LLDP, DISCARD), }, }, { diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 9ed264ed7070..1fa16064142d 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -6923,7 +6923,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id) char banner[sizeof(version)]; struct ksz_switch *sw = NULL; - result = pci_enable_device(pdev); + result = pcim_enable_device(pdev); if (result) return result; diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c index 81a8ccca7e5e..5693784eec5b 100644 --- a/drivers/net/ethernet/microchip/encx24j600-regmap.c +++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c @@ -359,7 +359,7 @@ static int regmap_encx24j600_phy_reg_read(void *context, unsigned int reg, goto err_out; usleep_range(26, 100); - while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) && + while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) && (mistat & BUSY)) cpu_relax(); @@ -397,7 +397,7 @@ static int regmap_encx24j600_phy_reg_write(void *context, unsigned int reg, goto err_out; usleep_range(26, 100); - while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) && + while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) && (mistat & BUSY)) cpu_relax(); diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index a0e1ccca505b..73aac97fb5c9 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -74,11 +74,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr) static void moxart_mac_free_memory(struct net_device *ndev) { struct moxart_mac_priv_t *priv = netdev_priv(ndev); - int i; - - for (i = 0; i < RX_DESC_NUM; i++) - dma_unmap_single(&ndev->dev, priv->rx_mapping[i], - priv->rx_buf_size, DMA_FROM_DEVICE); if (priv->tx_desc_base) dma_free_coherent(&priv->pdev->dev, @@ -147,11 +142,11 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev) desc + RX_REG_OFFSET_DESC1); priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i; - priv->rx_mapping[i] = dma_map_single(&ndev->dev, + priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev, priv->rx_buf[i], priv->rx_buf_size, DMA_FROM_DEVICE); - if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i])) + if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i])) netdev_err(ndev, "DMA mapping error\n"); moxart_desc_write(priv->rx_mapping[i], @@ -193,6 +188,7 @@ static int moxart_mac_open(struct net_device *ndev) static int moxart_mac_stop(struct net_device *ndev) { struct moxart_mac_priv_t *priv = netdev_priv(ndev); + int i; napi_disable(&priv->napi); @@ -204,6 +200,11 @@ static int moxart_mac_stop(struct net_device *ndev) /* disable all functions */ writel(0, priv->base + REG_MAC_CTRL); + /* unmap areas mapped in moxart_mac_setup_desc_ring() */ + for (i = 0; i < RX_DESC_NUM; i++) + dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i], + priv->rx_buf_size, DMA_FROM_DEVICE); + return 0; } @@ -240,7 +241,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) if (len > RX_BUF_SIZE) len = RX_BUF_SIZE; - dma_sync_single_for_cpu(&ndev->dev, + dma_sync_single_for_cpu(&priv->pdev->dev, priv->rx_mapping[rx_head], priv->rx_buf_size, DMA_FROM_DEVICE); skb = netdev_alloc_skb_ip_align(ndev, len); @@ -294,7 +295,7 @@ static void moxart_tx_finished(struct net_device *ndev) unsigned int tx_tail = priv->tx_tail; while (tx_tail != tx_head) { - dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail], + dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail], priv->tx_len[tx_tail], DMA_TO_DEVICE); ndev->stats.tx_packets++; @@ -358,9 +359,9 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb, len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len; - priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data, + priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data, len, DMA_TO_DEVICE); - if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) { + if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) { netdev_err(ndev, "DMA mapping error\n"); goto out_unlock; } @@ -379,7 +380,7 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb, len = ETH_ZLEN; } - dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head], + dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head], priv->tx_buf_size, DMA_TO_DEVICE); txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK); @@ -494,7 +495,7 @@ static int moxart_mac_probe(struct platform_device *pdev) priv->tx_buf_size = TX_BUF_SIZE; priv->rx_buf_size = RX_BUF_SIZE; - priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE * + priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE * TX_DESC_NUM, &priv->tx_base, GFP_DMA | GFP_KERNEL); if (!priv->tx_desc_base) { @@ -502,7 +503,7 @@ static int moxart_mac_probe(struct platform_device *pdev) goto init_fail; } - priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE * + priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE * RX_DESC_NUM, &priv->rx_base, GFP_DMA | GFP_KERNEL); if (!priv->rx_desc_base) { diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index a06466ecca12..a55861ea4206 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1593,8 +1593,12 @@ int ocelot_init(struct ocelot *ocelot) ocelot_write_rix(ocelot, ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)), ANA_PGID_PGID, PGID_MC); - ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4); - ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6); + ocelot_write_rix(ocelot, + ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)), + ANA_PGID_PGID, PGID_MCIPV4); + ocelot_write_rix(ocelot, + ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)), + ANA_PGID_PGID, PGID_MCIPV6); /* Allow manual injection via DEVCPU_QS registers, and byte swap these * registers endianness. diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index c4c4649b2088..b221b83ec5a6 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -206,9 +206,10 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, filter->type = OCELOT_VCAP_FILTER_OFFLOAD; break; case FLOW_ACTION_TRAP: - if (filter->block_id != VCAP_IS2) { + if (filter->block_id != VCAP_IS2 || + filter->lookup != 0) { NL_SET_ERR_MSG_MOD(extack, - "Trap action can only be offloaded to VCAP IS2"); + "Trap action can only be offloaded to VCAP IS2 lookup 0"); return -EOPNOTSUPP; } if (filter->goto_target != -1) { diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index d8c778ee6f1b..118572590607 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -373,7 +373,6 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, OCELOT_VCAP_BIT_0); vcap_key_set(vcap, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0, ~filter->ingress_port_mask); - vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_ANY); vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_HOST_MATCH, OCELOT_VCAP_BIT_ANY); vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc); @@ -1143,6 +1142,8 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot, struct ocelot_vcap_filter *tmp; tmp = ocelot_vcap_block_find_filter_by_index(block, i); + /* Read back the filter's counters before moving it */ + vcap_entry_get(ocelot, i - 1, tmp); vcap_entry_set(ocelot, i, tmp); } @@ -1181,7 +1182,11 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot, struct ocelot_vcap_filter del_filter; int i, index; + /* Need to inherit the block_id so that vcap_entry_set() + * does not get confused and knows where to install it. + */ memset(&del_filter, 0, sizeof(del_filter)); + del_filter.block_id = filter->block_id; /* Gets index of the filter */ index = ocelot_vcap_block_get_filter_index(block, filter); @@ -1196,6 +1201,8 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot, struct ocelot_vcap_filter *tmp; tmp = ocelot_vcap_block_find_filter_by_index(block, i); + /* Read back the filter's counters before moving it */ + vcap_entry_get(ocelot, i + 1, tmp); vcap_entry_set(ocelot, i, tmp); } diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index fc99ad8e4a38..1664e9184c9c 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -2895,11 +2895,9 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, status = myri10ge_xmit(curr, dev); if (status != 0) { dev_kfree_skb_any(curr); - if (segs != NULL) { - curr = segs; - segs = next; + skb_list_walk_safe(next, curr, next) { curr->next = NULL; - dev_kfree_skb_any(segs); + dev_kfree_skb_any(curr); } goto drop; } diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 3cae8449fadb..8a30be698f99 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -7114,9 +7114,8 @@ static int s2io_card_up(struct s2io_nic *sp) if (ret) { DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", dev->name); - s2io_reset(sp); - free_rx_buffers(sp); - return -ENOMEM; + ret = -ENOMEM; + goto err_fill_buff; } DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, ring->rx_bufs_left); @@ -7154,18 +7153,16 @@ static int s2io_card_up(struct s2io_nic *sp) /* Enable Rx Traffic and interrupts on the NIC */ if (start_nic(sp)) { DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name); - s2io_reset(sp); - free_rx_buffers(sp); - return -ENODEV; + ret = -ENODEV; + goto err_out; } /* Add interrupt service routine */ if (s2io_add_isr(sp) != 0) { if (sp->config.intr_type == MSI_X) s2io_rem_isr(sp); - s2io_reset(sp); - free_rx_buffers(sp); - return -ENODEV; + ret = -ENODEV; + goto err_out; } timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0); @@ -7185,6 +7182,20 @@ static int s2io_card_up(struct s2io_nic *sp) } return 0; + +err_out: + if (config->napi) { + if (config->intr_type == MSI_X) { + for (i = 0; i < sp->config.rx_ring_num; i++) + napi_disable(&sp->mac_control.rings[i].napi); + } else { + napi_disable(&sp->napi); + } + } +err_fill_buff: + s2io_reset(sp); + free_rx_buffers(sp); + return ret; } /** diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c index 2643ea5948f4..154399c5453f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c @@ -196,7 +196,7 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg, } reg->dst_lmextn = swreg_lmextn(dst); - reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg); + reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg); return 0; } @@ -277,7 +277,7 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg, } reg->dst_lmextn = swreg_lmextn(dst); - reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg); + reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 7a8187458724..24578c48f075 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -363,7 +363,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) return ret; attrs.split = eth_port.is_split; - attrs.splittable = !attrs.split; + attrs.splittable = eth_port.port_lanes > 1 && !attrs.split; attrs.lanes = eth_port.port_lanes; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.phys.port_number = eth_port.label_port; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index dfc1f32cda2b..5ab230aab2cd 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3373,21 +3373,21 @@ static void nfp_net_stat64(struct net_device *netdev, unsigned int start; do { - start = u64_stats_fetch_begin(&r_vec->rx_sync); + start = u64_stats_fetch_begin_irq(&r_vec->rx_sync); data[0] = r_vec->rx_pkts; data[1] = r_vec->rx_bytes; data[2] = r_vec->rx_drops; - } while (u64_stats_fetch_retry(&r_vec->rx_sync, start)); + } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start)); stats->rx_packets += data[0]; stats->rx_bytes += data[1]; stats->rx_dropped += data[2]; do { - start = u64_stats_fetch_begin(&r_vec->tx_sync); + start = u64_stats_fetch_begin_irq(&r_vec->tx_sync); data[0] = r_vec->tx_pkts; data[1] = r_vec->tx_bytes; data[2] = r_vec->tx_errors; - } while (u64_stats_fetch_retry(&r_vec->tx_sync, start)); + } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start)); stats->tx_packets += data[0]; stats->tx_bytes += data[1]; stats->tx_errors += data[2]; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index cd0c9623f7dd..311873ff57e3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -286,8 +286,6 @@ nfp_net_get_link_ksettings(struct net_device *netdev, /* Init to unknowns */ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); - ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); - ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); cmd->base.port = PORT_OTHER; cmd->base.speed = SPEED_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN; @@ -295,6 +293,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev, port = nfp_port_from_netdev(netdev); eth_port = nfp_port_get_eth_port(port); if (eth_port) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ? AUTONEG_ENABLE : AUTONEG_DISABLE; nfp_net_set_fec_link_mode(eth_port, cmd); @@ -494,7 +494,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) unsigned int start; do { - start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); + start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync); data[0] = nn->r_vecs[i].rx_pkts; tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; @@ -502,10 +502,10 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) tmp[3] = nn->r_vecs[i].hw_csum_rx_error; tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail; tmp[5] = nn->r_vecs[i].hw_tls_rx; - } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start)); + } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start)); do { - start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); + start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync); data[1] = nn->r_vecs[i].tx_pkts; data[2] = nn->r_vecs[i].tx_busy; tmp[6] = nn->r_vecs[i].hw_csum_tx; @@ -515,7 +515,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) tmp[10] = nn->r_vecs[i].hw_tls_tx; tmp[11] = nn->r_vecs[i].tls_tx_fallback; tmp[12] = nn->r_vecs[i].tls_tx_no_fallback; - } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); + } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start)); data += NN_RVEC_PER_Q_STATS; @@ -1225,6 +1225,11 @@ nfp_port_get_module_info(struct net_device *netdev, u8 data; port = nfp_port_from_netdev(netdev); + if (!port) + return -EOPNOTSUPP; + + /* update port state to get latest interface */ + set_bit(NFP_PORT_CHANGED, &port->flags); eth_port = nfp_port_get_eth_port(port); if (!eth_port) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index 6ef48eb3a77d..b163489489e9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -874,7 +874,6 @@ area_cache_get(struct nfp_cpp *cpp, u32 id, } /* Adjust the start address to be cache size aligned */ - cache->id = id; cache->addr = addr & ~(u64)(cache->size - 1); /* Re-init to the new ID and address */ @@ -894,6 +893,8 @@ area_cache_get(struct nfp_cpp *cpp, u32 id, return NULL; } + cache->id = id; + exit: /* Adjust offset */ *offset = addr - cache->addr; diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index a6861df9904f..07fbd329fe93 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -249,25 +249,26 @@ static void nixge_hw_dma_bd_release(struct net_device *ndev) struct sk_buff *skb; int i; - for (i = 0; i < RX_BD_NUM; i++) { - phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], - phys); + if (priv->rx_bd_v) { + for (i = 0; i < RX_BD_NUM; i++) { + phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], + phys); - dma_unmap_single(ndev->dev.parent, phys_addr, - NIXGE_MAX_JUMBO_FRAME_SIZE, - DMA_FROM_DEVICE); + dma_unmap_single(ndev->dev.parent, phys_addr, + NIXGE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); - skb = (struct sk_buff *)(uintptr_t) - nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], - sw_id_offset); - dev_kfree_skb(skb); - } + skb = (struct sk_buff *)(uintptr_t) + nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], + sw_id_offset); + dev_kfree_skb(skb); + } - if (priv->rx_bd_v) dma_free_coherent(ndev->dev.parent, sizeof(*priv->rx_bd_v) * RX_BD_NUM, priv->rx_bd_v, priv->rx_bd_p); + } if (priv->tx_skb) devm_kfree(ndev->dev.parent, priv->tx_skb); @@ -899,6 +900,7 @@ static int nixge_open(struct net_device *ndev) err_rx_irq: free_irq(priv->tx_irq, ndev); err_tx_irq: + napi_disable(&priv->napi); phy_stop(phy); phy_disconnect(phy); tasklet_kill(&priv->dma_err_tasklet); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 2942102efd48..bde32f0845ca 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1166,6 +1166,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter, buffer_info->dma = 0; buffer_info->time_stamp = 0; tx_ring->next_to_use = ring_num; + dev_kfree_skb_any(skb); return; } buffer_info->mapped = true; @@ -2481,6 +2482,7 @@ static void pch_gbe_remove(struct pci_dev *pdev) unregister_netdev(netdev); pch_gbe_phy_hw_reset(&adapter->hw); + pci_dev_put(adapter->ptp_pdev); free_netdev(netdev); } @@ -2562,7 +2564,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, /* setup the private structure */ ret = pch_gbe_sw_init(adapter); if (ret) - goto err_free_netdev; + goto err_put_dev; /* Initialize PHY */ ret = pch_gbe_init_phy(adapter); @@ -2620,6 +2622,8 @@ static int pch_gbe_probe(struct pci_dev *pdev, err_free_adapter: pch_gbe_phy_hw_reset(&adapter->hw); +err_put_dev: + pci_dev_put(adapter->ptp_pdev); err_free_netdev: free_netdev(netdev); return ret; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index b0d8499d373b..31fbe8904222 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -251,7 +251,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = ionic_map_bars(ionic); if (err) - goto err_out_pci_disable_device; + goto err_out_pci_release_regions; /* Configure the device */ err = ionic_setup(ionic); @@ -353,6 +353,7 @@ err_out_teardown: err_out_unmap_bars: ionic_unmap_bars(ionic); +err_out_pci_release_regions: pci_release_regions(pdev); err_out_pci_disable_device: pci_disable_device(pdev); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index e95c09dc2c30..cb12d0171517 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -1286,7 +1286,7 @@ static int ionic_set_nic_features(struct ionic_lif *lif, if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); - if ((vlan_flags & features) && + if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) && !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); @@ -2383,11 +2383,15 @@ err_out: * than the full array, but leave the qcq shells in place */ for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { - lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; - ionic_qcq_free(lif, lif->txqcqs[i]); + if (lif->txqcqs && lif->txqcqs[i]) { + lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; + ionic_qcq_free(lif, lif->txqcqs[i]); + } - lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; - ionic_qcq_free(lif, lif->rxqcqs[i]); + if (lif->rxqcqs && lif->rxqcqs[i]) { + lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; + ionic_qcq_free(lif, lif->rxqcqs[i]); + } } return err; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index e14869a2e24a..00b6985edea0 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -378,8 +378,8 @@ try_again: ionic_opcode_to_str(opcode), opcode, ionic_error_to_str(err), err); - msleep(1000); iowrite32(0, &idev->dev_cmd_regs->done); + msleep(1000); iowrite32(1, &idev->dev_cmd_regs->doorbell); goto try_again; } @@ -392,6 +392,8 @@ try_again: return ionic_error_to_errno(err); } + ionic_dev_cmd_clean(ionic); + return 0; } @@ -567,8 +569,14 @@ int ionic_port_reset(struct ionic *ionic) static int __init ionic_init_module(void) { + int ret; + ionic_debugfs_create(); - return ionic_bus_register_driver(); + ret = ionic_bus_register_driver(); + if (ret) + ionic_debugfs_destroy(); + + return ret; } static void __exit ionic_cleanup_module(void) diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 21c906200e79..d210632676d3 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -752,6 +752,9 @@ qede_build_skb(struct qede_rx_queue *rxq, buf = page_address(bd->data) + bd->page_offset; skb = build_skb(buf, rxq->rx_buf_seg_size); + if (unlikely(!skb)) + return NULL; + skb_reserve(skb, pad); skb_put(skb, len); diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index c9f32fc50254..99fd35a8ca75 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2475,6 +2475,7 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb, skb_shinfo(skb)->nr_frags); if (tx_cb->seg_count == -1) { netdev_err(ndev, "%s: invalid segment count!\n", __func__); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -3628,7 +3629,8 @@ static void ql_reset_work(struct work_struct *work) qdev->mem_map_registers; unsigned long hw_flags; - if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { + if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) || + test_bit(QL_RESET_START, &qdev->flags)) { clear_bit(QL_LINK_MASTER, &qdev->flags); /* diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index bd0607680329..2fd5c6fdb500 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -2991,7 +2991,7 @@ static void qlcnic_83xx_recover_driver_lock(struct qlcnic_adapter *adapter) QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val); dev_info(&adapter->pdev->dev, "%s: lock recovery initiated\n", __func__); - msleep(QLC_83XX_DRV_LOCK_RECOVERY_DELAY); + mdelay(QLC_83XX_DRV_LOCK_RECOVERY_DELAY); val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK); id = ((val >> 2) & 0xF); if (id == adapter->portnum) { @@ -3027,7 +3027,7 @@ int qlcnic_83xx_lock_driver(struct qlcnic_adapter *adapter) if (status) break; - msleep(QLC_83XX_DRV_LOCK_WAIT_DELAY); + mdelay(QLC_83XX_DRV_LOCK_WAIT_DELAY); i++; if (i == 1) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 8cf997afd826..28850f7af5a1 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4184,7 +4184,6 @@ static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts) static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, struct sk_buff *skb, u32 *opts) { - u32 transport_offset = (u32)skb_transport_offset(skb); struct skb_shared_info *shinfo = skb_shinfo(skb); u32 mss = shinfo->gso_size; @@ -4201,7 +4200,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, WARN_ON_ONCE(1); } - opts[0] |= transport_offset << GTTCPHO_SHIFT; + opts[0] |= skb_transport_offset(skb) << GTTCPHO_SHIFT; opts[1] |= mss << TD1_MSS_SHIFT; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 ip_protocol; @@ -4229,7 +4228,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, else WARN_ON_ONCE(1); - opts[1] |= transport_offset << TCPHO_SHIFT; + opts[1] |= skb_transport_offset(skb) << TCPHO_SHIFT; } else { unsigned int padto = rtl_quirk_packet_padto(tp, skb); @@ -4402,14 +4401,13 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { - int transport_offset = skb_transport_offset(skb); struct rtl8169_private *tp = netdev_priv(dev); if (skb_is_gso(skb)) { if (tp->mac_version == RTL_GIGA_MAC_VER_34) features = rtl8168evl_fix_tso(skb, features); - if (transport_offset > GTTCPHO_MAX && + if (skb_transport_offset(skb) > GTTCPHO_MAX && rtl_chip_supports_csum_v2(tp)) features &= ~NETIF_F_ALL_TSO; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { @@ -4420,7 +4418,7 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb, if (rtl_quirk_packet_padto(tp, skb)) features &= ~NETIF_F_CSUM_MASK; - if (transport_offset > TCPHO_MAX && + if (skb_transport_offset(skb) > TCPHO_MAX && rtl_chip_supports_csum_v2(tp)) features &= ~NETIF_F_CSUM_MASK; } diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index f96eed67e1a2..9e7b85e178fd 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2364,6 +2364,7 @@ static int __maybe_unused ravb_resume(struct device *dev) ret = ravb_open(ndev); if (ret < 0) return ret; + ravb_set_rx_mode(ndev); netif_device_attach(ndev); } diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 815766620979..e4d919de7e3f 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -1273,7 +1273,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port, bool removing; int err = 0; - entry = kzalloc(sizeof(*entry), GFP_KERNEL); + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return -ENOMEM; diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 4fa72b573c17..eb1be7302082 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1916,7 +1916,10 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) efx_update_sw_stats(efx, stats); out: + /* releasing a DMA coherent buffer with BH disabled can panic */ + spin_unlock_bh(&efx->stats_lock); efx_nic_free_buffer(efx, &stats_buf); + spin_lock_bh(&efx->stats_lock); return rc; } @@ -2240,7 +2243,7 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb, * guaranteed to satisfy the second as we only attempt TSO if * inner_network_header <= 208. */ - ip_tot_len = -EFX_TSO2_MAX_HDRLEN; + ip_tot_len = 0x10000 - EFX_TSO2_MAX_HDRLEN; EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN + (tcp->doff << 2u) > ip_tot_len); @@ -3252,6 +3255,30 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) bool was_enabled = efx->port_enabled; int rc; +#ifdef CONFIG_SFC_SRIOV + /* If this function is a VF and we have access to the parent PF, + * then use the PF control path to attempt to change the VF MAC address. + */ + if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { + struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u8 mac[ETH_ALEN]; + + /* net_dev->dev_addr can be zeroed by efx_net_stop in + * efx_ef10_sriov_set_vf_mac, so pass in a copy. + */ + ether_addr_copy(mac, efx->net_dev->dev_addr); + + rc = efx_ef10_sriov_set_vf_mac(efx_pf, nic_data->vf_index, mac); + if (!rc) + return 0; + + netif_dbg(efx, drv, efx->net_dev, + "Updating VF mac via PF failed (%d), setting directly\n", + rc); + } +#endif + efx_device_detach_sync(efx); efx_net_stop(efx->net_dev); @@ -3274,40 +3301,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) efx_net_open(efx->net_dev); efx_device_attach_if_not_resetting(efx); -#ifdef CONFIG_SFC_SRIOV - if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { - struct efx_ef10_nic_data *nic_data = efx->nic_data; - struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; - - if (rc == -EPERM) { - struct efx_nic *efx_pf; - - /* Switch to PF and change MAC address on vport */ - efx_pf = pci_get_drvdata(pci_dev_pf); - - rc = efx_ef10_sriov_set_vf_mac(efx_pf, - nic_data->vf_index, - efx->net_dev->dev_addr); - } else if (!rc) { - struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); - struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; - unsigned int i; - - /* MAC address successfully changed by VF (with MAC - * spoofing) so update the parent PF if possible. - */ - for (i = 0; i < efx_pf->vf_count; ++i) { - struct ef10_vf *vf = nic_data->vf + i; - - if (vf->efx == efx) { - ether_addr_copy(vf->mac, - efx->net_dev->dev_addr); - return 0; - } - } - } - } else -#endif if (rc == -EPERM) { netif_err(efx, drv, efx->net_dev, "Cannot change MAC address; use sfboot to enable" @@ -3563,6 +3556,11 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx) n_parts++; } + if (!n_parts) { + kfree(parts); + return 0; + } + rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); fail: if (rc) diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c index 67fe44db6b61..63a44ee763be 100644 --- a/drivers/net/ethernet/sfc/ef100_netdev.c +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -200,6 +200,7 @@ static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb, skb->len, skb->data_len, channel->channel); if (!efx->n_channels || !efx->n_tx_channels || !channel) { netif_stop_queue(net_dev); + dev_kfree_skb_any(skb); goto err; } diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 84041cd587d7..b44acb6e3953 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -411,8 +411,9 @@ fail1: static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) { struct pci_dev *dev = efx->pci_dev; + struct efx_ef10_nic_data *nic_data = efx->nic_data; unsigned int vfs_assigned = pci_vfs_assigned(dev); - int rc = 0; + int i, rc = 0; if (vfs_assigned && !force) { netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " @@ -420,10 +421,13 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) return -EBUSY; } - if (!vfs_assigned) + if (!vfs_assigned) { + for (i = 0; i < efx->vf_count; i++) + nic_data->vf[i].pci_dev = NULL; pci_disable_sriov(dev); - else + } else { rc = -EBUSY; + } efx_ef10_sriov_free_vf_vswitching(efx); efx->vf_count = 0; diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 0a8799a208cf..c49168ba7a4d 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -287,6 +287,7 @@ int efx_probe_interrupts(struct efx_nic *efx) efx->n_channels = 1; efx->n_rx_channels = 1; efx->n_tx_channels = 1; + efx->tx_channel_offset = 0; efx->n_xdp_channels = 0; efx->xdp_channel_offset = efx->n_channels; rc = pci_enable_msi(efx->pci_dev); @@ -307,6 +308,7 @@ int efx_probe_interrupts(struct efx_nic *efx) efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0); efx->n_rx_channels = 1; efx->n_tx_channels = 1; + efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0; efx->n_xdp_channels = 0; efx->xdp_channel_offset = efx->n_channels; efx->legacy_irq = efx->pci_dev->irq; @@ -744,7 +746,9 @@ void efx_remove_channels(struct efx_nic *efx) int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) { - struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; + struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel, + *ptp_channel = efx_ptp_channel(efx); + struct efx_ptp_data *ptp_data = efx->ptp_data; unsigned int i, next_buffer_table = 0; u32 old_rxq_entries, old_txq_entries; int rc, rc2; @@ -797,11 +801,8 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) old_txq_entries = efx->txq_entries; efx->rxq_entries = rxq_entries; efx->txq_entries = txq_entries; - for (i = 0; i < efx->n_channels; i++) { - channel = efx->channel[i]; - efx->channel[i] = other_channel[i]; - other_channel[i] = channel; - } + for (i = 0; i < efx->n_channels; i++) + swap(efx->channel[i], other_channel[i]); /* Restart buffer table allocation */ efx->next_buffer_table = next_buffer_table; @@ -817,6 +818,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) } out: + efx->ptp_data = NULL; /* Destroy unused channel structures */ for (i = 0; i < efx->n_channels; i++) { channel = other_channel[i]; @@ -827,6 +829,7 @@ out: } } + efx->ptp_data = ptp_data; rc2 = efx_soft_enable_interrupts(efx); if (rc2) { rc = rc ? rc : rc2; @@ -843,11 +846,9 @@ rollback: /* Swap back */ efx->rxq_entries = old_rxq_entries; efx->txq_entries = old_txq_entries; - for (i = 0; i < efx->n_channels; i++) { - channel = efx->channel[i]; - efx->channel[i] = other_channel[i]; - other_channel[i] = channel; - } + for (i = 0; i < efx->n_channels; i++) + swap(efx->channel[i], other_channel[i]); + efx_ptp_update_channel(efx, ptp_channel); goto out; } @@ -859,10 +860,6 @@ int efx_set_channels(struct efx_nic *efx) int xdp_queue_number; int rc; - efx->tx_channel_offset = - efx_separate_tx_channels ? - efx->n_channels - efx->n_tx_channels : 0; - if (efx->xdp_tx_queue_count) { EFX_WARN_ON_PARANOID(efx->xdp_tx_queues); diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index 40b2af8bfb81..2ac3c8f1b04b 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -157,7 +157,8 @@ struct efx_filter_spec { u32 flags:6; u32 dmaq_id:12; u32 rss_context; - __be16 outer_vid __aligned(4); /* allow jhash2() of match values */ + u32 vport_id; + __be16 outer_vid; __be16 inner_vid; u8 loc_mac[ETH_ALEN]; u8 rem_mac[ETH_ALEN]; diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 9f7dfdf708cf..8aecb4bd2c0d 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1522,7 +1522,7 @@ static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel) static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) { - return true; + return channel && channel->channel >= channel->efx->tx_channel_offset; } static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel) diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 797e51802ccb..a2b4e3befa59 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -45,6 +45,7 @@ #include "farch_regs.h" #include "tx.h" #include "nic.h" /* indirectly includes ptp.h */ +#include "efx_channels.h" /* Maximum number of events expected to make up a PTP event */ #define MAX_EVENT_FRAGS 3 @@ -541,6 +542,12 @@ struct efx_channel *efx_ptp_channel(struct efx_nic *efx) return efx->ptp_data ? efx->ptp_data->channel : NULL; } +void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel) +{ + if (efx->ptp_data) + efx->ptp_data->channel = channel; +} + static u32 last_sync_timestamp_major(struct efx_nic *efx) { struct efx_channel *channel = efx_ptp_channel(efx); @@ -1093,7 +1100,29 @@ static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb) tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type); if (tx_queue && tx_queue->timestamping) { + /* This code invokes normal driver TX code which is always + * protected from softirqs when called from generic TX code, + * which in turn disables preemption. Look at __dev_queue_xmit + * which uses rcu_read_lock_bh disabling preemption for RCU + * plus disabling softirqs. We do not need RCU reader + * protection here. + * + * Although it is theoretically safe for current PTP TX/RX code + * running without disabling softirqs, there are three good + * reasond for doing so: + * + * 1) The code invoked is mainly implemented for non-PTP + * packets and it is always executed with softirqs + * disabled. + * 2) This being a single PTP packet, better to not + * interrupt its processing by softirqs which can lead + * to high latencies. + * 3) netdev_xmit_more checks preemption is disabled and + * triggers a BUG_ON if not. + */ + local_bh_disable(); efx_enqueue_skb(tx_queue, skb); + local_bh_enable(); } else { WARN_ONCE(1, "PTP channel has no timestamped tx queue\n"); dev_kfree_skb_any(skb); @@ -1443,6 +1472,11 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) int rc = 0; unsigned int pos; + if (efx->ptp_data) { + efx->ptp_data->channel = channel; + return 0; + } + ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL); efx->ptp_data = ptp; if (!efx->ptp_data) @@ -2179,7 +2213,7 @@ static const struct efx_channel_type efx_ptp_channel_type = { .pre_probe = efx_ptp_probe_channel, .post_remove = efx_ptp_remove_channel, .get_name = efx_ptp_get_channel_name, - /* no copy operation; there is no need to reallocate this channel */ + .copy = efx_copy_channel, .receive_skb = efx_ptp_rx, .want_txqs = efx_ptp_want_txqs, .keep_eventq = false, diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h index 9855e8c9e544..7b1ef7002b3f 100644 --- a/drivers/net/ethernet/sfc/ptp.h +++ b/drivers/net/ethernet/sfc/ptp.h @@ -16,6 +16,7 @@ struct ethtool_ts_info; int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); void efx_ptp_defer_probe_with_channel(struct efx_nic *efx); struct efx_channel *efx_ptp_channel(struct efx_nic *efx); +void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel); void efx_ptp_remove(struct efx_nic *efx); int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr); int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr); diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index e423b17e2a14..36b46ddb6710 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -166,6 +166,9 @@ static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue) struct efx_nic *efx = rx_queue->efx; int i; + if (unlikely(!rx_queue->page_ring)) + return; + /* Unmap and release the pages in the recycle ring. Remove the ring. */ for (i = 0; i <= rx_queue->page_ptr_mask; i++) { struct page *page = rx_queue->page_ring[i]; @@ -673,17 +676,17 @@ bool efx_filter_spec_equal(const struct efx_filter_spec *left, (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) return false; - return memcmp(&left->outer_vid, &right->outer_vid, + return memcmp(&left->vport_id, &right->vport_id, sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) == 0; + offsetof(struct efx_filter_spec, vport_id)) == 0; } u32 efx_filter_spec_hash(const struct efx_filter_spec *spec) { - BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); - return jhash2((const u32 *)&spec->outer_vid, + BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3); + return jhash2((const u32 *)&spec->vport_id, (sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) / 4, + offsetof(struct efx_filter_spec, vport_id)) / 4, 0); } diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 1665529a7271..fcc7de8ae2bf 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -545,7 +545,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, * previous packets out. */ if (!netdev_xmit_more()) - efx_tx_send_pending(tx_queue->channel); + efx_tx_send_pending(efx_get_tx_channel(efx, index)); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 51cd7dca91cd..3a7d2baec6c1 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -1513,14 +1513,14 @@ static void epic_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct epic_private *ep = netdev_priv(dev); + unregister_netdev(dev); dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); - unregister_netdev(dev); pci_iounmap(pdev, ep->ioaddr); - pci_release_regions(pdev); free_netdev(dev); + pci_release_regions(pdev); pci_disable_device(pdev); /* pci_power_off(pdev, -1); */ } diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 823d9a7184fe..6c4479c31097 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2422,7 +2422,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev) if (irq == -EPROBE_DEFER) { retval = -EPROBE_DEFER; goto out_0; - } else if (irq <= 0) { + } else if (irq < 0) { pr_warn("Could not allocate irq resource\n"); retval = -ENODEV; goto out_0; diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index ef3634d1b9f7..b9acee214bb6 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1958,11 +1958,13 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr) ret = PTR_ERR(priv->phydev); dev_err(priv->dev, "get_phy_device err(%d)\n", ret); priv->phydev = NULL; + mdiobus_unregister(bus); return -ENODEV; } ret = phy_device_register(priv->phydev); if (ret) { + phy_device_free(priv->phydev); mdiobus_unregister(bus); dev_err(priv->dev, "phy_device_register err(%d)\n", ret); diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c index cd478d2cd871..00f6d347eaf7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c @@ -57,10 +57,6 @@ #define TSE_PCS_USE_SGMII_ENA BIT(0) #define TSE_PCS_IF_USE_SGMII 0x03 -#define SGMII_ADAPTER_CTRL_REG 0x00 -#define SGMII_ADAPTER_DISABLE 0x0001 -#define SGMII_ADAPTER_ENABLE 0x0000 - #define AUTONEGO_LINK_TIMER 20 static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs) @@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev, unsigned int speed) { void __iomem *tse_pcs_base = pcs->tse_pcs_base; - void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base; u32 val; - writew(SGMII_ADAPTER_ENABLE, - sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); - pcs->autoneg = phy_dev->autoneg; if (phy_dev->autoneg == AUTONEG_ENABLE) { diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h index 442812c0a4bd..694ac25ef426 100644 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h @@ -10,6 +10,10 @@ #include #include +#define SGMII_ADAPTER_CTRL_REG 0x00 +#define SGMII_ADAPTER_ENABLE 0x0000 +#define SGMII_ADAPTER_DISABLE 0x0001 + struct tse_pcs { struct device *dev; void __iomem *tse_pcs_base; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index 2342d497348e..fd1b0cc6b5fa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -363,6 +363,7 @@ bypass_clk_reset_gpio: data->fix_mac_speed = tegra_eqos_fix_speed; data->init = tegra_eqos_init; data->bsp_priv = eqos; + data->sph_disable = 1; err = tegra_eqos_init(pdev, eqos); if (err < 0) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index a9087dae767d..5406f5a9bbe5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -243,6 +243,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->has_gmac4 = 1; plat->force_sf_dma_mode = 0; plat->tso_en = 1; + plat->sph_disable = 1; plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; @@ -668,6 +669,7 @@ static void intel_eth_pci_remove(struct pci_dev *pdev) pci_free_irq_vectors(pdev); + clk_disable_unprepare(priv->plat->stmmac_clk); clk_unregister_fixed_rate(priv->plat->stmmac_clk); pcim_iounmap_regions(pdev, BIT(0)); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 752658ec7bee..50ef68497bce 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -261,11 +261,9 @@ static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac, if (ret) return ret; - devm_add_action_or_reset(dwmac->dev, - (void(*)(void *))clk_disable_unprepare, - dwmac->rgmii_tx_clk); - - return 0; + return devm_add_action_or_reset(dwmac->dev, + (void(*)(void *))clk_disable_unprepare, + clk); } static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index f37b6d57b2fe..142bf912011e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -18,9 +18,6 @@ #include "altr_tse_pcs.h" -#define SGMII_ADAPTER_CTRL_REG 0x00 -#define SGMII_ADAPTER_DISABLE 0x0001 - #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2 @@ -62,14 +59,13 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) { struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv; void __iomem *splitter_base = dwmac->splitter_base; - void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base; void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base; struct device *dev = dwmac->dev; struct net_device *ndev = dev_get_drvdata(dev); struct phy_device *phy_dev = ndev->phydev; u32 val; - if ((tse_pcs_base) && (sgmii_adapter_base)) + if (sgmii_adapter_base) writew(SGMII_ADAPTER_DISABLE, sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); @@ -93,8 +89,11 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG); } - if (tse_pcs_base && sgmii_adapter_base) + if (phy_dev && sgmii_adapter_base) { + writew(SGMII_ADAPTER_ENABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed); + } } static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index cad6588840d8..958bbcfc2668 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -895,6 +895,7 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv) ret = mdio_mux_init(priv->device, mdio_mux, mdio_mux_syscon_switch_fn, &gmac->mux_handle, priv, priv->mii); + of_node_put(mdio_mux); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index d2d344b23f1c..b248c80b5bb3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -215,6 +215,9 @@ static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan) if (queue == 0 || queue == 4) { value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK; value |= MTL_RXQ_DMA_Q04MDMACH(chan); + } else if (queue > 4) { + value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4); + value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4); } else { value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue); value |= MTL_RXQ_DMA_QXMDMACH(chan, queue); @@ -717,6 +720,8 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, if (fc & FLOW_RX) { pr_debug("\tReceive Flow-Control ON\n"); flow |= GMAC_RX_FLOW_CTRL_RFE; + } else { + pr_debug("\tReceive Flow-Control OFF\n"); } writel(flow, ioaddr + GMAC_RX_FLOW_CTRL); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 07b1b8374cd2..53efcc9c40e2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -68,9 +68,9 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) writel(value, ioaddr + PTP_TCR); /* wait for present system time initialize to complete */ - return readl_poll_timeout(ioaddr + PTP_TCR, value, + return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value, !(value & PTP_TCR_TSINIT), - 10000, 100000); + 10, 100000); } static int config_addend(void __iomem *ioaddr, u32 addend) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 97c7c9fec326..e8358a1a02df 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -738,19 +738,10 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) struct timespec64 now; u32 sec_inc = 0; u64 temp = 0; - int ret; if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) return -EOPNOTSUPP; - ret = clk_prepare_enable(priv->plat->clk_ptp_ref); - if (ret < 0) { - netdev_warn(priv->dev, - "failed to enable PTP reference clock: %pe\n", - ERR_PTR(ret)); - return ret; - } - stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); priv->systime_flags = systime_flags; @@ -1052,8 +1043,16 @@ static void stmmac_mac_link_up(struct phylink_config *config, ctrl |= priv->hw->link.duplex; /* Flow Control operation */ - if (tx_pause && rx_pause) - stmmac_mac_flow_ctrl(priv, duplex); + if (rx_pause && tx_pause) + priv->flow_ctrl = FLOW_AUTO; + else if (rx_pause && !tx_pause) + priv->flow_ctrl = FLOW_RX; + else if (!rx_pause && tx_pause) + priv->flow_ctrl = FLOW_TX; + else + priv->flow_ctrl = FLOW_OFF; + + stmmac_mac_flow_ctrl(priv, duplex); writel(ctrl, priv->ioaddr + MAC_CTRL_REG); @@ -2766,6 +2765,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) stmmac_mmc_setup(priv); + if (ptp_register) { + ret = clk_prepare_enable(priv->plat->clk_ptp_ref); + if (ret < 0) + netdev_warn(priv->dev, + "failed to enable PTP reference clock: %pe\n", + ERR_PTR(ret)); + } + ret = stmmac_init_ptp(priv); if (ret == -EOPNOTSUPP) netdev_warn(priv->dev, "PTP not supported by HW\n"); @@ -2922,6 +2929,15 @@ static int stmmac_open(struct net_device *dev) goto init_error; } + if (priv->plat->serdes_powerup) { + ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); + if (ret < 0) { + netdev_err(priv->dev, "%s: Serdes powerup failed\n", + __func__); + goto init_error; + } + } + ret = stmmac_hw_setup(dev, true); if (ret < 0) { netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); @@ -3040,6 +3056,10 @@ static int stmmac_release(struct net_device *dev) /* Disable the MAC Rx/Tx */ stmmac_mac_set(priv, priv->ioaddr, false); + /* Powerdown Serdes if there is */ + if (priv->plat->serdes_powerdown) + priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); + netif_carrier_off(dev); stmmac_release_ptp(priv); @@ -5062,7 +5082,7 @@ int stmmac_dvr_probe(struct device *device, dev_info(priv->device, "TSO feature enabled\n"); } - if (priv->dma_cap.sphen) { + if (priv->dma_cap.sphen && !priv->plat->sph_disable) { ndev->hw_features |= NETIF_F_GRO; if (!priv->plat->sph_disable) { priv->sph = true; @@ -5197,14 +5217,6 @@ int stmmac_dvr_probe(struct device *device, goto error_netdev_register; } - if (priv->plat->serdes_powerup) { - ret = priv->plat->serdes_powerup(ndev, - priv->plat->bsp_priv); - - if (ret < 0) - goto error_serdes_powerup; - } - #ifdef CONFIG_DEBUG_FS stmmac_init_fs(ndev); #endif @@ -5216,8 +5228,6 @@ int stmmac_dvr_probe(struct device *device, return ret; -error_serdes_powerup: - unregister_netdev(ndev); error_netdev_register: phylink_destroy(priv->phylink); error_phy_setup: diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 272cb47af9f2..a7a1227c9b92 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -175,7 +175,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev, return -ENOMEM; /* Enable pci device */ - ret = pci_enable_device(pdev); + ret = pcim_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__); @@ -227,8 +227,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev) pcim_iounmap_regions(pdev, BIT(i)); break; } - - pci_disable_device(pdev); } static int __maybe_unused stmmac_pci_suspend(struct device *dev) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 1c10018d3ac7..6539b9276c09 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -108,10 +108,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); - axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe"); - axi->axi_fb = of_property_read_bool(np, "snps,axi_fb"); - axi->axi_mb = of_property_read_bool(np, "snps,axi_mb"); - axi->axi_rb = of_property_read_bool(np, "snps,axi_rb"); + axi->axi_kbbe = of_property_read_bool(np, "snps,kbbe"); + axi->axi_fb = of_property_read_bool(np, "snps,fb"); + axi->axi_mb = of_property_read_bool(np, "snps,mb"); + axi->axi_rb = of_property_read_bool(np, "snps,rb"); if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt)) axi->axi_wr_osr_lmt = 1; @@ -432,8 +432,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->phylink_node = np; /* Get max speed of operation from device tree */ - if (of_property_read_u32(np, "max-speed", &plat->max_speed)) - plat->max_speed = -1; + of_property_read_u32(np, "max-speed", &plat->max_speed); plat->bus_id = of_alias_get_id(np, "ethernet"); if (plat->bus_id < 0) @@ -818,7 +817,13 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev) if (ret) return ret; - stmmac_init_tstamp_counter(priv, priv->systime_flags); + ret = clk_prepare_enable(priv->plat->clk_ptp_ref); + if (ret < 0) { + netdev_warn(priv->dev, + "failed to enable PTP reference clock: %pe\n", + ERR_PTR(ret)); + return ret; + } } return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index 0462dcc93e53..dd5c4ef92ef3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -1084,8 +1084,9 @@ static int stmmac_test_rxp(struct stmmac_priv *priv) unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00}; struct tc_cls_u32_offload cls_u32 = { }; struct stmmac_packet_attrs attr = { }; - struct tc_action **actions, *act; + struct tc_action **actions; struct tc_u32_sel *sel; + struct tcf_gact *gact; struct tcf_exts *exts; int ret, i, nk = 1; @@ -1104,14 +1105,14 @@ static int stmmac_test_rxp(struct stmmac_priv *priv) goto cleanup_sel; } - actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL); + actions = kcalloc(nk, sizeof(*actions), GFP_KERNEL); if (!actions) { ret = -ENOMEM; goto cleanup_exts; } - act = kzalloc(nk * sizeof(*act), GFP_KERNEL); - if (!act) { + gact = kcalloc(nk, sizeof(*gact), GFP_KERNEL); + if (!gact) { ret = -ENOMEM; goto cleanup_actions; } @@ -1126,9 +1127,7 @@ static int stmmac_test_rxp(struct stmmac_priv *priv) exts->nr_actions = nk; exts->actions = actions; for (i = 0; i < nk; i++) { - struct tcf_gact *gact = to_gact(&act[i]); - - actions[i] = &act[i]; + actions[i] = (struct tc_action *)&gact[i]; gact->tcf_action = TC_ACT_SHOT; } @@ -1152,7 +1151,7 @@ static int stmmac_test_rxp(struct stmmac_priv *priv) stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); cleanup_act: - kfree(act); + kfree(gact); cleanup_actions: kfree(actions); cleanup_exts: diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 69fc47089e62..940db4ec5714 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2063,9 +2063,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) skb_reserve(copy_skb, 2); skb_put(copy_skb, len); - dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE); skb_copy_from_linear_data(skb, copy_skb->data, len); - dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); + dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE); /* Reuse original ring buffer. */ hme_write_rxd(hp, this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 0805edef5625..059d68d48f1e 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1716,6 +1716,7 @@ static int am65_cpsw_init_cpts(struct am65_cpsw_common *common) if (IS_ERR(cpts)) { int ret = PTR_ERR(cpts); + of_node_put(node); if (ret == -EOPNOTSUPP) { dev_info(dev, "cpts disabled\n"); return 0; @@ -2064,9 +2065,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) if (!node) return -ENOENT; common->port_num = of_get_child_count(node); + of_node_put(node); if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS) return -ENOENT; - of_node_put(node); if (common->port_num != 1) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index b0f00b4edd94..5af0f9f8c097 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -864,6 +864,8 @@ static int cpsw_ndo_open(struct net_device *ndev) err_cleanup: if (!cpsw->usage_count) { + napi_disable(&cpsw->napi_rx); + napi_disable(&cpsw->napi_tx); cpdma_ctlr_stop(cpsw->dma); cpsw_destroy_xdp_rxqs(cpsw); } diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 31cefd6ef680..a1ee205d6a88 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1255,8 +1255,10 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw) data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM, sizeof(struct cpsw_slave_data), GFP_KERNEL); - if (!data->slave_data) + if (!data->slave_data) { + of_node_put(tmp_node); return -ENOMEM; + } /* Populate all the child nodes here... */ @@ -1353,6 +1355,7 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw) err_node_put: of_node_put(port_np); + of_node_put(tmp_node); return ret; } diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index c62f474b6d08..fcebd2418dbd 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1302,12 +1302,15 @@ static int tsi108_open(struct net_device *dev) data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size, &data->rxdma, GFP_KERNEL); - if (!data->rxring) + if (!data->rxring) { + free_irq(data->irq_num, dev); return -ENOMEM; + } data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size, &data->txdma, GFP_KERNEL); if (!data->txring) { + free_irq(data->irq_num, dev); dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring, data->rxdma); return -ENOMEM; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index bbdcba88c021..3d91baf2e55a 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -2060,15 +2060,14 @@ static int axienet_probe(struct platform_device *pdev) if (ret) goto cleanup_clk; - lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); - if (lp->phy_node) { - ret = axienet_mdio_setup(lp); - if (ret) - dev_warn(&pdev->dev, - "error registering MDIO bus: %d\n", ret); - } + ret = axienet_mdio_setup(lp); + if (ret) + dev_warn(&pdev->dev, + "error registering MDIO bus: %d\n", ret); + if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { + lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (!lp->phy_node) { dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n"); ret = -EINVAL; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 4bd44fbc6ecf..f6ea4a0ad5df 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -820,10 +820,10 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) { struct mii_bus *bus; - int rc; struct resource res; struct device_node *np = of_get_parent(lp->phy_node); struct device_node *npp; + int rc, ret; /* Don't register the MDIO bus if the phy_node or its parent node * can't be found. @@ -833,8 +833,14 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) return -ENODEV; } npp = of_get_parent(np); - - of_address_to_resource(npp, 0, &res); + ret = of_address_to_resource(npp, 0, &res); + of_node_put(npp); + if (ret) { + dev_err(dev, "%s resource error!\n", + dev->of_node->full_name); + of_node_put(np); + return ret; + } if (lp->ndev->mem_start != res.start) { struct phy_device *phydev; phydev = of_phy_find_device(lp->phy_node); @@ -843,6 +849,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) "MDIO of the phy is not registered yet\n"); else put_device(&phydev->mdio.dev); + of_node_put(np); return 0; } @@ -855,6 +862,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) bus = mdiobus_alloc(); if (!bus) { dev_err(dev, "Failed to allocate mdiobus\n"); + of_node_put(np); return -ENOMEM; } @@ -867,6 +875,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) bus->parent = dev; rc = of_mdiobus_register(bus, np); + of_node_put(np); if (rc) { dev_err(dev, "Failed to register mdio bus.\n"); goto err_register; @@ -923,8 +932,6 @@ static int xemaclite_open(struct net_device *dev) xemaclite_disable_interrupts(lp); if (lp->phy_node) { - u32 bmcr; - lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, xemaclite_adjust_link, 0, PHY_INTERFACE_MODE_MII); @@ -935,19 +942,6 @@ static int xemaclite_open(struct net_device *dev) /* EmacLite doesn't support giga-bit speeds */ phy_set_max_speed(lp->phy_dev, SPEED_100); - - /* Don't advertise 1000BASE-T Full/Half duplex speeds */ - phy_write(lp->phy_dev, MII_CTRL1000, 0); - - /* Advertise only 10 and 100mbps full/half duplex speeds */ - phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL | - ADVERTISE_CSMA); - - /* Restart auto negotiation */ - bmcr = phy_read(lp->phy_dev, MII_BMCR); - bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); - phy_write(lp->phy_dev, MII_BMCR, bmcr); - phy_start(lp->phy_dev); } diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 5ddb2dbb8572..081939cb420b 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -772,7 +772,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct geneve_sock *gs4, struct flowi4 *fl4, const struct ip_tunnel_info *info, - __be16 dport, __be16 sport) + __be16 dport, __be16 sport, + __u8 *full_tos) { bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); @@ -797,6 +798,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, use_cache = false; } fl4->flowi4_tos = RT_TOS(tos); + if (full_tos) + *full_tos = tos; dst_cache = (struct dst_cache *)&info->dst_cache; if (use_cache) { @@ -850,8 +853,7 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, use_cache = false; } - fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio), - info->key.label); + fl6->flowlabel = ip6_make_flowinfo(prio, info->key.label); dst_cache = (struct dst_cache *)&info->dst_cache; if (use_cache) { dst = dst_cache_get_ip6(dst_cache, &fl6->saddr); @@ -885,6 +887,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, const struct ip_tunnel_key *key = &info->key; struct rtable *rt; struct flowi4 fl4; + __u8 full_tos; __u8 tos, ttl; __be16 df = 0; __be16 sport; @@ -895,7 +898,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, - geneve->cfg.info.key.tp_dst, sport); + geneve->cfg.info.key.tp_dst, sport, &full_tos); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -939,7 +942,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; } else { - tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb); + tos = ip_tunnel_ecn_encap(full_tos, ip_hdr(skb), skb); if (geneve->cfg.ttl_inherit) ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); else @@ -1121,7 +1124,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 1, USHRT_MAX, true); rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, - geneve->cfg.info.key.tp_dst, sport); + geneve->cfg.info.key.tp_dst, sport, NULL); if (IS_ERR(rt)) return PTR_ERR(rt); diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 02d6f3ad9aca..83dc1c2c3b84 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -311,7 +311,6 @@ static void sp_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ dev->netdev_ops = &sp_netdev_ops; - dev->needs_free_netdev = true; dev->mtu = SIXP_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->header_ops = &ax25_header_ops; @@ -679,9 +678,11 @@ static void sixpack_close(struct tty_struct *tty) del_timer_sync(&sp->tx_t); del_timer_sync(&sp->resync_t); - /* Free all 6pack frame buffers. */ + /* Free all 6pack frame buffers after unreg. */ kfree(sp->rbuff); kfree(sp->xbuff); + + free_netdev(sp->dev); } /* Perform I/O control on an active 6pack channel. */ diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 1ad6085994b1..5c17c92add8a 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -533,7 +533,7 @@ static int bpq_device_event(struct notifier_block *this, if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; - if (!dev_is_ethdev(dev)) + if (!dev_is_ethdev(dev) && !bpq_get_ax25_dev(dev)) return NOTIFY_DONE; switch (event) { diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 22010384c4a3..b9646b369f8e 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -1353,7 +1353,9 @@ static int rr_close(struct net_device *dev) rrpriv->fw_running = 0; + spin_unlock_irqrestore(&rrpriv->lock, flags); del_timer_sync(&rrpriv->timer); + spin_lock_irqsave(&rrpriv->lock, flags); writel(0, ®s->TxPi); writel(0, ®s->IpRxPi); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index a0f338cf1424..367878493e70 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -977,7 +977,8 @@ struct net_device_context { u32 vf_alloc; /* Serial number of the VF to team with */ u32 vf_serial; - + /* completion variable to confirm vf association */ + struct completion vf_add; /* Is the current data path through the VF NIC? */ bool data_path_is_vf; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 6a7ab930ef70..d15da8287df3 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1327,6 +1327,10 @@ static void netvsc_send_vf(struct net_device *ndev, net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; + + if (net_device_ctx->vf_alloc) + complete(&net_device_ctx->vf_add); + netdev_info(ndev, "VF slot %u %s\n", net_device_ctx->vf_serial, net_device_ctx->vf_alloc ? "added" : "removed"); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index e3676386d0ee..f2020be43cfe 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2290,6 +2290,7 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) { struct device *parent = vf_netdev->dev.parent; struct net_device_context *ndev_ctx; + struct net_device *ndev; struct pci_dev *pdev; u32 serial; @@ -2316,6 +2317,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) return hv_get_drvdata(ndev_ctx->device_ctx); } + /* Fallback path to check synthetic vf with + * help of mac addr + */ + list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { + ndev = hv_get_drvdata(ndev_ctx->device_ctx); + if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) { + netdev_notice(vf_netdev, + "falling back to mac addr based matching\n"); + return ndev; + } + } + netdev_notice(vf_netdev, "no netdev found for vf serial:%u\n", serial); return NULL; @@ -2406,6 +2419,11 @@ static int netvsc_vf_changed(struct net_device *vf_netdev) return NOTIFY_OK; net_device_ctx->data_path_is_vf = vf_is_up; + if (vf_is_up && !net_device_ctx->vf_alloc) { + netdev_info(ndev, "Waiting for the VF association from host\n"); + wait_for_completion(&net_device_ctx->vf_add); + } + netvsc_switch_datapath(ndev, vf_is_up); netdev_info(ndev, "Data path switched %s VF: %s\n", vf_is_up ? "to" : "from", vf_netdev->name); @@ -2429,6 +2447,7 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) netvsc_vf_setxdp(vf_netdev, NULL); + reinit_completion(&net_device_ctx->vf_add); netdev_rx_handler_unregister(vf_netdev); netdev_upper_dev_unlink(vf_netdev, ndev); RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); @@ -2466,6 +2485,7 @@ static int netvsc_probe(struct hv_device *dev, INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); + init_completion(&net_device_ctx->vf_add); spin_lock_init(&net_device_ctx->lock); INIT_LIST_HEAD(&net_device_ctx->reconfig_events); INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); @@ -2629,7 +2649,10 @@ static int netvsc_suspend(struct hv_device *dev) /* Save the current config info */ ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev); - + if (!ndev_ctx->saved_netvsc_dev_info) { + ret = -ENOMEM; + goto out; + } ret = netvsc_detach(net, nvdev); out: rtnl_unlock(); diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 7db9cbd0f5de..07adbeec1978 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -1310,10 +1310,11 @@ static int adf7242_remove(struct spi_device *spi) debugfs_remove_recursive(lp->debugfs_root); + ieee802154_unregister_hw(lp->hw); + cancel_delayed_work_sync(&lp->work); destroy_workqueue(lp->wqueue); - ieee802154_unregister_hw(lp->hw); mutex_destroy(&lp->bmux); ieee802154_free_hw(lp->hw); diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index fd9f33c833fa..95ef3b6f98dd 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -926,7 +926,7 @@ static int ca8210_spi_transfer( dev_dbg(&spi->dev, "%s called\n", __func__); - cas_ctl = kmalloc(sizeof(*cas_ctl), GFP_ATOMIC); + cas_ctl = kzalloc(sizeof(*cas_ctl), GFP_ATOMIC); if (!cas_ctl) return -ENOMEM; diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c index 89c046b204e0..a8369bfa4050 100644 --- a/drivers/net/ieee802154/cc2520.c +++ b/drivers/net/ieee802154/cc2520.c @@ -504,6 +504,7 @@ cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb) goto err_tx; if (status & CC2520_STATUS_TX_UNDERFLOW) { + rc = -EINVAL; dev_err(&priv->spi->dev, "cc2520 tx underflow exception\n"); goto err_tx; } @@ -969,7 +970,7 @@ static int cc2520_hw_init(struct cc2520_private *priv) if (timeout-- <= 0) { dev_err(&priv->spi->dev, "oscillator start failed!\n"); - return ret; + return -ETIMEDOUT; } udelay(1); } while (!(status & CC2520_STATUS_XOSC32M_STABLE)); diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 2a65efd3e8da..64b12e462765 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -1209,9 +1209,10 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index) struct gsi_event *event_done; struct gsi_event *event; struct gsi_trans *trans; + u32 trans_count = 0; u32 byte_count = 0; - u32 old_index; u32 event_avail; + u32 old_index; trans_info = &channel->trans_info; @@ -1232,6 +1233,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index) do { trans->len = __le16_to_cpu(event->len); byte_count += trans->len; + trans_count++; /* Move on to the next event and transaction */ if (--event_avail) @@ -1243,26 +1245,24 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index) /* We record RX bytes when they are received */ channel->byte_count += byte_count; - channel->trans_count++; + channel->trans_count += trans_count; } /* Initialize a ring, including allocating DMA memory for its entries */ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) { - size_t size = count * GSI_RING_ELEMENT_SIZE; + u32 size = count * GSI_RING_ELEMENT_SIZE; struct device *dev = gsi->dev; dma_addr_t addr; - /* Hardware requires a 2^n ring size, with alignment equal to size */ + /* Hardware requires a 2^n ring size, with alignment equal to size. + * The DMA address returned by dma_alloc_coherent() is guaranteed to + * be a power-of-2 number of pages, which satisfies the requirement. + */ ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); - if (ring->virt && addr % size) { - dma_free_coherent(dev, size, ring->virt, addr); - dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n", - size); - return -EINVAL; /* Not a good error value, but distinct */ - } else if (!ring->virt) { + if (!ring->virt) return -ENOMEM; - } + ring->addr = addr; ring->count = count; diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h index 1785c9d3344d..d58dce46e061 100644 --- a/drivers/net/ipa/gsi_private.h +++ b/drivers/net/ipa/gsi_private.h @@ -14,7 +14,7 @@ struct gsi_trans; struct gsi_ring; struct gsi_channel; -#define GSI_RING_ELEMENT_SIZE 16 /* bytes */ +#define GSI_RING_ELEMENT_SIZE 16 /* bytes; must be a power of 2 */ /* Return the entry that follows one provided in a transaction pool */ void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element); diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c index 6c3ed5b17b80..70c2b585f98d 100644 --- a/drivers/net/ipa/gsi_trans.c +++ b/drivers/net/ipa/gsi_trans.c @@ -153,11 +153,10 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool, size = __roundup_pow_of_two(size); total_size = (count + max_alloc - 1) * size; - /* The allocator will give us a power-of-2 number of pages. But we - * can't guarantee that, so request it. That way we won't waste any - * memory that would be available beyond the required space. - * - * Note that gsi_trans_pool_exit_dma() assumes the total allocated + /* The allocator will give us a power-of-2 number of pages + * sufficient to satisfy our request. Round up our requested + * size to avoid any unused space in the allocation. This way + * gsi_trans_pool_exit_dma() can assume the total allocated * size is exactly (count * size). */ total_size = get_order(total_size) << PAGE_SHIFT; diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index a47378b7d9b2..dc94ce035655 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -154,7 +154,7 @@ static void ipa_cmd_validate_build(void) * of entries, as and IPv4 and IPv6 route tables have the same number * of entries. */ -#define TABLE_SIZE (TABLE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE) +#define TABLE_SIZE (TABLE_COUNT_MAX * sizeof(__le64)) #define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX) BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK)); BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK)); diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h index 7fc1058a5ca9..ba05e26c3c60 100644 --- a/drivers/net/ipa/ipa_data.h +++ b/drivers/net/ipa/ipa_data.h @@ -72,8 +72,8 @@ * that can be included in a single transaction. */ struct gsi_channel_data { - u16 tre_count; - u16 event_count; + u16 tre_count; /* must be a power of 2 */ + u16 event_count; /* must be a power of 2 */ u8 tlv_count; }; diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 621648ce750b..7e1208446e04 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -610,12 +610,14 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) if (endpoint->data->aggregation) { if (!endpoint->toward_ipa) { + u32 buffer_size; u32 limit; val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); - limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); + buffer_size = IPA_RX_BUFFER_SIZE - NET_SKB_PAD; + limit = ipa_aggr_size_kb(buffer_size); val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK); limit = IPA_AGGR_TIME_LIMIT_DEFAULT; @@ -882,7 +884,7 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) err_trans_free: gsi_trans_free(trans); err_free_pages: - __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); + put_page(page); return -ENOMEM; } @@ -1177,7 +1179,7 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, struct page *page = trans->data; if (page) - __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); + put_page(page); } } diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c index a78d66051a17..25a8d029f207 100644 --- a/drivers/net/ipa/ipa_mem.c +++ b/drivers/net/ipa/ipa_mem.c @@ -414,7 +414,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size) } /* Align the address down and the size up to a page boundary */ - addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK; + addr = qcom_smem_virt_to_phys(virt); phys = addr & PAGE_MASK; size = PAGE_ALIGN(size + addr - phys); iova = phys; /* We just want a direct mapping */ diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c index 1a87a49538c5..880ec353f958 100644 --- a/drivers/net/ipa/ipa_qmi.c +++ b/drivers/net/ipa/ipa_qmi.c @@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) mem = &ipa->mem[IPA_MEM_V4_ROUTE]; req.v4_route_tbl_info_valid = 1; req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v4_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE; + req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; mem = &ipa->mem[IPA_MEM_V6_ROUTE]; req.v6_route_tbl_info_valid = 1; req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v6_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE; + req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; mem = &ipa->mem[IPA_MEM_V4_FILTER]; req.v4_filter_tbl_start_valid = 1; @@ -352,8 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) req.v4_hash_route_tbl_info_valid = 1; req.v4_hash_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v4_hash_route_tbl_info.count = - mem->size / IPA_TABLE_ENTRY_SIZE; + req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; } mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]; @@ -361,8 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) req.v6_hash_route_tbl_info_valid = 1; req.v6_hash_route_tbl_info.start = ipa->mem_offset + mem->offset; - req.v6_hash_route_tbl_info.count = - mem->size / IPA_TABLE_ENTRY_SIZE; + req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; } mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED]; diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c index 73413371e3d3..ecf9f863c842 100644 --- a/drivers/net/ipa/ipa_qmi_msg.c +++ b/drivers/net/ipa/ipa_qmi_msg.c @@ -271,7 +271,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .tlv_type = 0x12, .offset = offsetof(struct ipa_init_modem_driver_req, v4_route_tbl_info), - .ei_array = ipa_mem_array_ei, + .ei_array = ipa_mem_bounds_ei, }, { .data_type = QMI_OPT_FLAG, @@ -292,7 +292,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .tlv_type = 0x13, .offset = offsetof(struct ipa_init_modem_driver_req, v6_route_tbl_info), - .ei_array = ipa_mem_array_ei, + .ei_array = ipa_mem_bounds_ei, }, { .data_type = QMI_OPT_FLAG, @@ -456,7 +456,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .tlv_type = 0x1b, .offset = offsetof(struct ipa_init_modem_driver_req, v4_hash_route_tbl_info), - .ei_array = ipa_mem_array_ei, + .ei_array = ipa_mem_bounds_ei, }, { .data_type = QMI_OPT_FLAG, @@ -477,7 +477,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { .tlv_type = 0x1c, .offset = offsetof(struct ipa_init_modem_driver_req, v6_hash_route_tbl_info), - .ei_array = ipa_mem_array_ei, + .ei_array = ipa_mem_bounds_ei, }, { .data_type = QMI_OPT_FLAG, diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h index cfac456cea0c..58de425bb8e6 100644 --- a/drivers/net/ipa/ipa_qmi_msg.h +++ b/drivers/net/ipa/ipa_qmi_msg.h @@ -82,9 +82,11 @@ enum ipa_platform_type { IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 5, /* QNX MSM */ }; -/* This defines the start and end offset of a range of memory. Both - * fields are offsets relative to the start of IPA shared memory. - * The end value is the last addressable byte *within* the range. +/* This defines the start and end offset of a range of memory. The start + * value is a byte offset relative to the start of IPA shared memory. The + * end value is the last addressable unit *within* the range. Typically + * the end value is in units of bytes, however it can also be a maximum + * array index value. */ struct ipa_mem_bounds { u32 start; @@ -125,18 +127,19 @@ struct ipa_init_modem_driver_req { u8 hdr_tbl_info_valid; struct ipa_mem_bounds hdr_tbl_info; - /* Routing table information. These define the location and size of - * non-hashable IPv4 and IPv6 filter tables. The start values are - * offsets relative to the start of IPA shared memory. + /* Routing table information. These define the location and maximum + * *index* (not byte) for the modem portion of non-hashable IPv4 and + * IPv6 routing tables. The start values are byte offsets relative + * to the start of IPA shared memory. */ u8 v4_route_tbl_info_valid; - struct ipa_mem_array v4_route_tbl_info; + struct ipa_mem_bounds v4_route_tbl_info; u8 v6_route_tbl_info_valid; - struct ipa_mem_array v6_route_tbl_info; + struct ipa_mem_bounds v6_route_tbl_info; /* Filter table information. These define the location of the * non-hashable IPv4 and IPv6 filter tables. The start values are - * offsets relative to the start of IPA shared memory. + * byte offsets relative to the start of IPA shared memory. */ u8 v4_filter_tbl_start_valid; u32 v4_filter_tbl_start; @@ -177,18 +180,20 @@ struct ipa_init_modem_driver_req { u8 zip_tbl_info_valid; struct ipa_mem_bounds zip_tbl_info; - /* Routing table information. These define the location and size - * of hashable IPv4 and IPv6 filter tables. The start values are - * offsets relative to the start of IPA shared memory. + /* Routing table information. These define the location and maximum + * *index* (not byte) for the modem portion of hashable IPv4 and IPv6 + * routing tables (if supported by hardware). The start values are + * byte offsets relative to the start of IPA shared memory. */ u8 v4_hash_route_tbl_info_valid; - struct ipa_mem_array v4_hash_route_tbl_info; + struct ipa_mem_bounds v4_hash_route_tbl_info; u8 v6_hash_route_tbl_info_valid; - struct ipa_mem_array v6_hash_route_tbl_info; + struct ipa_mem_bounds v6_hash_route_tbl_info; /* Filter table information. These define the location and size - * of hashable IPv4 and IPv6 filter tables. The start values are - * offsets relative to the start of IPA shared memory. + * of hashable IPv4 and IPv6 filter tables (if supported by hardware). + * The start values are byte offsets relative to the start of IPA + * shared memory. */ u8 v4_hash_filter_tbl_start_valid; u32 v4_hash_filter_tbl_start; diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index 0747866d60ab..02c192837414 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -27,28 +27,38 @@ /** * DOC: IPA Filter and Route Tables * - * The IPA has tables defined in its local shared memory that define filter - * and routing rules. Each entry in these tables contains a 64-bit DMA - * address that refers to DRAM (system memory) containing a rule definition. + * The IPA has tables defined in its local (IPA-resident) memory that define + * filter and routing rules. An entry in either of these tables is a little + * endian 64-bit "slot" that holds the address of a rule definition. (The + * size of these slots is 64 bits regardless of the host DMA address size.) + * + * Separate tables (both filter and route) used for IPv4 and IPv6. There + * are normally another set of "hashed" filter and route tables, which are + * used with a hash of message metadata. Hashed operation is not supported + * by all IPA hardware (IPA v4.2 doesn't support hashed tables). + * + * Rules can be in local memory or in DRAM (system memory). The offset of + * an object (such as a route or filter table) in IPA-resident memory must + * 128-byte aligned. An object in system memory (such as a route or filter + * rule) must be at an 8-byte aligned address. We currently only place + * route or filter rules in system memory. + * * A rule consists of a contiguous block of 32-bit values terminated with * 32 zero bits. A special "zero entry" rule consisting of 64 zero bits * represents "no filtering" or "no routing," and is the reset value for - * filter or route table rules. Separate tables (both filter and route) - * used for IPv4 and IPv6. Additionally, there can be hashed filter or - * route tables, which are used when a hash of message metadata matches. - * Hashed operation is not supported by all IPA hardware. + * filter or route table rules. * * Each filter rule is associated with an AP or modem TX endpoint, though - * not all TX endpoints support filtering. The first 64-bit entry in a + * not all TX endpoints support filtering. The first 64-bit slot in a * filter table is a bitmap indicating which endpoints have entries in * the table. The low-order bit (bit 0) in this bitmap represents a * special global filter, which applies to all traffic. This is not * used in the current code. Bit 1, if set, indicates that there is an - * entry (i.e. a DMA address referring to a rule) for endpoint 0 in the - * table. Bit 2, if set, indicates there is an entry for endpoint 1, - * and so on. Space is set aside in IPA local memory to hold as many - * filter table entries as might be required, but typically they are not - * all used. + * entry (i.e. slot containing a system address referring to a rule) for + * endpoint 0 in the table. Bit 3, if set, indicates there is an entry + * for endpoint 2, and so on. Space is set aside in IPA local memory to + * hold as many filter table entries as might be required, but typically + * they are not all used. * * The AP initializes all entries in a filter table to refer to a "zero" * entry. Once initialized the modem and AP update the entries for @@ -96,13 +106,8 @@ * ---------------------- */ -/* IPA hardware constrains filter and route tables alignment */ -#define IPA_TABLE_ALIGN 128 /* Minimum table alignment */ - /* Assignment of route table entries to the modem and AP */ #define IPA_ROUTE_MODEM_MIN 0 -#define IPA_ROUTE_MODEM_COUNT 8 - #define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT #define IPA_ROUTE_AP_COUNT \ (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT) @@ -118,21 +123,14 @@ /* Check things that can be validated at build time. */ static void ipa_table_validate_build(void) { - /* IPA hardware accesses memory 128 bytes at a time. Addresses - * referred to by entries in filter and route tables must be - * aligned on 128-byte byte boundaries. The only rule address - * ever use is the "zero rule", and it's aligned at the base - * of a coherent DMA allocation. + /* Filter and route tables contain DMA addresses that refer + * to filter or route rules. But the size of a table entry + * is 64 bits regardless of what the size of an AP DMA address + * is. A fixed constant defines the size of an entry, and + * code in ipa_table_init() uses a pointer to __le64 to + * initialize tables. */ - BUILD_BUG_ON(ARCH_DMA_MINALIGN % IPA_TABLE_ALIGN); - - /* Filter and route tables contain DMA addresses that refer to - * filter or route rules. We use a fixed constant to represent - * the size of either type of table entry. Code in ipa_table_init() - * uses a pointer to __le64 to initialize table entriews. - */ - BUILD_BUG_ON(IPA_TABLE_ENTRY_SIZE != sizeof(dma_addr_t)); - BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(__le64)); + BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(__le64)); /* A "zero rule" is used to represent no filtering or no routing. * It is a 64-bit block of zeroed memory. Code in ipa_table_init() @@ -163,7 +161,7 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed) else mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED] : &ipa->mem[IPA_MEM_V4_ROUTE]; - size = IPA_ROUTE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE; + size = IPA_ROUTE_COUNT_MAX * sizeof(__le64); } else { if (ipv6) mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED] @@ -171,7 +169,7 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed) else mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED] : &ipa->mem[IPA_MEM_V4_FILTER]; - size = (1 + IPA_FILTER_COUNT_MAX) * IPA_TABLE_ENTRY_SIZE; + size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64); } if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed)) @@ -270,8 +268,8 @@ static void ipa_table_reset_add(struct gsi_trans *trans, bool filter, if (filter) first++; /* skip over bitmap */ - offset = mem->offset + first * IPA_TABLE_ENTRY_SIZE; - size = count * IPA_TABLE_ENTRY_SIZE; + offset = mem->offset + first * sizeof(__le64); + size = count * sizeof(__le64); addr = ipa_table_addr(ipa, false, count); ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true); @@ -455,11 +453,11 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter, count = 1 + hweight32(ipa->filter_map); hash_count = hash_mem->size ? count : 0; } else { - count = mem->size / IPA_TABLE_ENTRY_SIZE; - hash_count = hash_mem->size / IPA_TABLE_ENTRY_SIZE; + count = mem->size / sizeof(__le64); + hash_count = hash_mem->size / sizeof(__le64); } - size = count * IPA_TABLE_ENTRY_SIZE; - hash_size = hash_count * IPA_TABLE_ENTRY_SIZE; + size = count * sizeof(__le64); + hash_size = hash_count * sizeof(__le64); addr = ipa_table_addr(ipa, filter, count); hash_addr = ipa_table_addr(ipa, filter, hash_count); @@ -662,7 +660,13 @@ int ipa_table_init(struct ipa *ipa) ipa_table_validate_build(); - size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE; + /* The IPA hardware requires route and filter table rules to be + * aligned on a 128-byte boundary. We put the "zero rule" at the + * base of the table area allocated here. The DMA address returned + * by dma_alloc_coherent() is guaranteed to be a power-of-2 number + * of pages, which satisfies the rule alignment requirement. + */ + size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64); virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); if (!virt) return -ENOMEM; @@ -694,7 +698,7 @@ void ipa_table_exit(struct ipa *ipa) struct device *dev = &ipa->pdev->dev; size_t size; - size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE; + size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64); dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr); ipa->table_addr = 0; diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h index 78038d14fcea..35e519cef25d 100644 --- a/drivers/net/ipa/ipa_table.h +++ b/drivers/net/ipa/ipa_table.h @@ -10,12 +10,12 @@ struct ipa; -/* The size of a filter or route table entry */ -#define IPA_TABLE_ENTRY_SIZE sizeof(__le64) /* Holds a physical address */ - /* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */ #define IPA_FILTER_COUNT_MAX 14 +/* The number of route table entries allotted to the modem */ +#define IPA_ROUTE_MODEM_COUNT 8 + /* The maximum number of route table entries (IPv4, IPv6; hashed or not) */ #define IPA_ROUTE_COUNT_MAX 15 diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 8801d093135c..a33149ee0ddc 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -496,7 +496,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) static int ipvlan_process_outbound(struct sk_buff *skb) { - struct ethhdr *ethh = eth_hdr(skb); int ret = NET_XMIT_DROP; /* The ipvlan is a pseudo-L2 device, so the packets that we receive @@ -506,6 +505,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb) if (skb_mac_header_was_set(skb)) { /* In this mode we dont care about * multicast and broadcast traffic */ + struct ethhdr *ethh = eth_hdr(skb); + if (is_multicast_ether_addr(ethh->h_dest)) { pr_debug_ratelimited( "Dropped {multi|broad}cast of type=[%x]\n", @@ -590,7 +591,7 @@ out: static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) { const struct ipvl_dev *ipvlan = netdev_priv(dev); - struct ethhdr *eth = eth_hdr(skb); + struct ethhdr *eth = skb_eth_hdr(skb); struct ipvl_addr *addr; void *lyr3h; int addr_type; @@ -620,6 +621,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) return dev_forward_skb(ipvlan->phy_dev, skb); } else if (is_multicast_ether_addr(eth->h_dest)) { + skb_reset_mac_header(skb); ipvlan_skb_crossing_ns(skb, NULL); ipvlan_multicast_enqueue(ipvlan->port, skb, true); return NET_XMIT_SUCCESS; diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c index 1cedb634f4f7..f01078b2581c 100644 --- a/drivers/net/ipvlan/ipvtap.c +++ b/drivers/net/ipvlan/ipvtap.c @@ -194,7 +194,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = { .notifier_call = ipvtap_device_event, }; -static int ipvtap_init(void) +static int __init ipvtap_init(void) { int err; @@ -228,7 +228,7 @@ out1: } module_init(ipvtap_init); -static void ipvtap_exit(void) +static void __exit ipvtap_exit(void) { rtnl_link_unregister(&ipvtap_link_ops); unregister_netdevice_notifier(&ipvtap_notifier_block); diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index a1c77cc00416..498e5c8013ef 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -208,7 +208,7 @@ static __net_init int loopback_net_init(struct net *net) int err; err = -ENOMEM; - dev = alloc_netdev(0, "lo", NET_NAME_UNKNOWN, loopback_setup); + dev = alloc_netdev(0, "lo", NET_NAME_PREDICTABLE, loopback_setup); if (!dev) goto out; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 789a124809e3..eb029456b594 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -240,6 +240,7 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) #define DEFAULT_SEND_SCI true #define DEFAULT_ENCRYPT false #define DEFAULT_ENCODING_SA 0 +#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1)) static bool send_sci(const struct macsec_secy *secy) { @@ -1389,7 +1390,8 @@ static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) return NULL; } -static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) +static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci, + bool active) { struct macsec_rx_sc *rx_sc; struct macsec_dev *macsec; @@ -1413,7 +1415,7 @@ static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) } rx_sc->sci = sci; - rx_sc->active = true; + rx_sc->active = active; refcount_set(&rx_sc->refcnt, 1); secy = &macsec_priv(dev)->secy; @@ -1694,7 +1696,7 @@ static bool validate_add_rxsa(struct nlattr **attrs) return false; if (attrs[MACSEC_SA_ATTR_PN] && - *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0) + nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) return false; if (attrs[MACSEC_SA_ATTR_ACTIVE]) { @@ -1750,7 +1752,8 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) } pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; - if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { + if (tb_sa[MACSEC_SA_ATTR_PN] && + nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n", nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); rtnl_unlock(); @@ -1766,7 +1769,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n", nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), - MACSEC_SA_ATTR_SALT); + MACSEC_SALT_LEN); rtnl_unlock(); return -EINVAL; } @@ -1821,6 +1824,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) secy->key_len); err = macsec_offload(ops->mdo_add_rxsa, &ctx); + memzero_explicit(ctx.sa.key, secy->key_len); if (err) goto cleanup; } @@ -1839,7 +1843,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) return 0; cleanup: - kfree(rx_sa); + macsec_rxsa_put(rx_sa); rtnl_unlock(); return err; } @@ -1865,7 +1869,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) struct macsec_rx_sc *rx_sc; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; struct macsec_secy *secy; - bool was_active; + bool active = true; int ret; if (!attrs[MACSEC_ATTR_IFINDEX]) @@ -1887,16 +1891,15 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) secy = &macsec_priv(dev)->secy; sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); - rx_sc = create_rx_sc(dev, sci); + if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) + active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); + + rx_sc = create_rx_sc(dev, sci, active); if (IS_ERR(rx_sc)) { rtnl_unlock(); return PTR_ERR(rx_sc); } - was_active = rx_sc->active; - if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) - rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); - if (macsec_is_offloaded(netdev_priv(dev))) { const struct macsec_ops *ops; struct macsec_context ctx; @@ -1920,7 +1923,8 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) return 0; cleanup: - rx_sc->active = was_active; + del_rx_sc(secy, sci); + free_rx_sc(rx_sc); rtnl_unlock(); return ret; } @@ -1936,7 +1940,7 @@ static bool validate_add_txsa(struct nlattr **attrs) if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) return false; - if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) + if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) return false; if (attrs[MACSEC_SA_ATTR_ACTIVE]) { @@ -2008,7 +2012,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n", nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), - MACSEC_SA_ATTR_SALT); + MACSEC_SALT_LEN); rtnl_unlock(); return -EINVAL; } @@ -2063,6 +2067,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) secy->key_len); err = macsec_offload(ops->mdo_add_txsa, &ctx); + memzero_explicit(ctx.sa.key, secy->key_len); if (err) goto cleanup; } @@ -2082,7 +2087,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) cleanup: secy->operational = was_operational; - kfree(tx_sa); + macsec_txsa_put(tx_sa); rtnl_unlock(); return err; } @@ -2290,7 +2295,7 @@ static bool validate_upd_sa(struct nlattr **attrs) if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) return false; - if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) + if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) return false; if (attrs[MACSEC_SA_ATTR_ACTIVE]) { @@ -2559,7 +2564,7 @@ static bool macsec_is_configured(struct macsec_dev *macsec) struct macsec_tx_sc *tx_sc = &secy->tx_sc; int i; - if (secy->n_rx_sc > 0) + if (secy->rx_sc) return true; for (i = 0; i < MACSEC_NUM_AN; i++) @@ -2643,11 +2648,6 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) if (ret) goto rollback; - /* Force features update, since they are different for SW MACSec and - * HW offloading cases. - */ - netdev_update_features(dev); - rtnl_unlock(); return 0; @@ -3415,16 +3415,9 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, return ret; } -#define SW_MACSEC_FEATURES \ +#define MACSEC_FEATURES \ (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) -/* If h/w offloading is enabled, use real device features save for - * VLAN_FEATURES - they require additional ops - * HW_MACSEC - no reason to report it - */ -#define REAL_DEV_FEATURES(dev) \ - ((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC)) - static int macsec_dev_init(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); @@ -3441,12 +3434,8 @@ static int macsec_dev_init(struct net_device *dev) return err; } - if (macsec_is_offloaded(macsec)) { - dev->features = REAL_DEV_FEATURES(real_dev); - } else { - dev->features = real_dev->features & SW_MACSEC_FEATURES; - dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; - } + dev->features = real_dev->features & MACSEC_FEATURES; + dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; dev->needed_headroom = real_dev->needed_headroom + MACSEC_NEEDED_HEADROOM; @@ -3475,10 +3464,7 @@ static netdev_features_t macsec_fix_features(struct net_device *dev, struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; - if (macsec_is_offloaded(macsec)) - return REAL_DEV_FEATURES(real_dev); - - features &= (real_dev->features & SW_MACSEC_FEATURES) | + features &= (real_dev->features & MACSEC_FEATURES) | NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; features |= NETIF_F_LLTX; @@ -3694,6 +3680,7 @@ static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, + [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 }, }; static void macsec_free_netdev(struct net_device *dev) @@ -3737,9 +3724,6 @@ static int macsec_changelink_common(struct net_device *dev, secy->operational = tx_sa && tx_sa->active; } - if (data[IFLA_MACSEC_WINDOW]) - secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); - if (data[IFLA_MACSEC_ENCRYPT]) tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); @@ -3785,6 +3769,16 @@ static int macsec_changelink_common(struct net_device *dev, } } + if (data[IFLA_MACSEC_WINDOW]) { + secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); + + /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window + * for XPN cipher suites */ + if (secy->xpn && + secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW) + return -EINVAL; + } + return 0; } @@ -3814,13 +3808,12 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], ret = macsec_changelink_common(dev, data); if (ret) - return ret; + goto cleanup; /* If h/w offloading is available, propagate to the device */ if (macsec_is_offloaded(macsec)) { const struct macsec_ops *ops; struct macsec_context ctx; - int ret; ops = macsec_get_ops(netdev_priv(dev), &ctx); if (!ops) { diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index c8d803d3616c..5869bc2c3aa7 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -139,7 +139,7 @@ static struct macvlan_source_entry *macvlan_hash_lookup_source( u32 idx = macvlan_eth_hash(addr); struct hlist_head *h = &vlan->port->vlan_source_hash[idx]; - hlist_for_each_entry_rcu(entry, h, hlist) { + hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) { if (ether_addr_equal_64bits(entry->addr, addr) && entry->vlan == vlan) return entry; @@ -1176,7 +1176,7 @@ void macvlan_common_setup(struct net_device *dev) { ether_setup(dev); - dev->min_mtu = 0; + /* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */ dev->max_mtu = ETH_MAX_MTU; dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); @@ -1509,8 +1509,10 @@ destroy_macvlan_port: /* the macvlan port may be freed by macvlan_uninit when fail to register. * so we destroy the macvlan port only when it's valid. */ - if (create && macvlan_port_get_rtnl(lowerdev)) + if (create && macvlan_port_get_rtnl(lowerdev)) { + macvlan_flush_sources(port, vlan); macvlan_port_destroy(port->dev); + } return err; } EXPORT_SYMBOL_GPL(macvlan_common_newlink); @@ -1612,7 +1614,7 @@ static int macvlan_fill_info_macaddr(struct sk_buff *skb, struct hlist_head *h = &vlan->port->vlan_source_hash[i]; struct macvlan_source_entry *entry; - hlist_for_each_entry_rcu(entry, h, hlist) { + hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) { if (entry->vlan != vlan) continue; if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr)) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 694e2f5dbbe5..39801c31e507 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -133,11 +133,17 @@ static void macvtap_setup(struct net_device *dev) dev->tx_queue_len = TUN_READQ_SIZE; } +static struct net *macvtap_link_net(const struct net_device *dev) +{ + return dev_net(macvlan_dev_real_dev(dev)); +} + static struct rtnl_link_ops macvtap_link_ops __read_mostly = { .kind = "macvtap", .setup = macvtap_setup, .newlink = macvtap_newlink, .dellink = macvtap_dellink, + .get_link_net = macvtap_link_net, .priv_size = sizeof(struct macvtap_dev), }; diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c index fbd36891ee64..5d171e7f118d 100644 --- a/drivers/net/mdio/mdio-bcm-unimac.c +++ b/drivers/net/mdio/mdio-bcm-unimac.c @@ -5,20 +5,18 @@ * Copyright (C) 2014-2017 Broadcom */ +#include +#include +#include #include +#include +#include +#include +#include #include +#include #include #include -#include -#include -#include -#include - -#include -#include -#include - -#include #define MDIO_CMD 0x00 #define MDIO_START_BUSY (1 << 29) diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c index 5136275c8e73..99588192cc78 100644 --- a/drivers/net/mdio/mdio-bitbang.c +++ b/drivers/net/mdio/mdio-bitbang.c @@ -14,10 +14,10 @@ * Vitaly Bordug */ -#include -#include -#include #include +#include +#include +#include #define MDIO_READ 2 #define MDIO_WRITE 1 diff --git a/drivers/net/mdio/mdio-cavium.c b/drivers/net/mdio/mdio-cavium.c index 1afd6fc1a351..95ce274c1be1 100644 --- a/drivers/net/mdio/mdio-cavium.c +++ b/drivers/net/mdio/mdio-cavium.c @@ -4,9 +4,9 @@ */ #include +#include #include #include -#include #include "mdio-cavium.h" diff --git a/drivers/net/mdio/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c index 1b00235d7dc5..56c8f914f893 100644 --- a/drivers/net/mdio/mdio-gpio.c +++ b/drivers/net/mdio/mdio-gpio.c @@ -17,15 +17,15 @@ * Vitaly Bordug */ -#include -#include +#include #include -#include -#include #include #include -#include +#include #include +#include +#include +#include struct mdio_gpio_info { struct mdiobb_ctrl ctrl; diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c index 25c25ea6da66..9cd71d896963 100644 --- a/drivers/net/mdio/mdio-ipq4019.c +++ b/drivers/net/mdio/mdio-ipq4019.c @@ -3,10 +3,10 @@ /* Copyright (c) 2020 Sartura Ltd. */ #include -#include -#include #include #include +#include +#include #include #include #include diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c index f0a6bfa61645..49d4e9aa30bb 100644 --- a/drivers/net/mdio/mdio-ipq8064.c +++ b/drivers/net/mdio/mdio-ipq8064.c @@ -7,12 +7,12 @@ #include #include +#include #include -#include #include #include #include -#include +#include /* MII address register definitions */ #define MII_ADDR_REG_ADDR 0x10 diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index 11f583fd4611..037649bef92e 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -6,14 +6,14 @@ * Copyright (c) 2017 Microsemi Corporation */ -#include -#include -#include -#include #include #include #include +#include +#include #include +#include +#include #define MSCC_MIIM_REG_STATUS 0x0 #define MSCC_MIIM_STATUS_STAT_PENDING BIT(2) @@ -76,6 +76,9 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum) u32 val; int ret; + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + ret = mscc_miim_wait_pending(bus); if (ret) goto out; @@ -105,6 +108,9 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id, struct mscc_miim_dev *miim = bus->priv; int ret; + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + ret = mscc_miim_wait_pending(bus); if (ret < 0) goto out; diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c index 42fb5f166136..641cfa41f492 100644 --- a/drivers/net/mdio/mdio-mux-bcm-iproc.c +++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c @@ -3,14 +3,14 @@ * Copyright 2016 Broadcom */ #include -#include -#include -#include -#include -#include -#include #include +#include #include +#include +#include +#include +#include +#include #define MDIO_RATE_ADJ_EXT_OFFSET 0x000 #define MDIO_RATE_ADJ_INT_OFFSET 0x004 diff --git a/drivers/net/mdio/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c index 10a758fdc9e6..3c7f16f06b45 100644 --- a/drivers/net/mdio/mdio-mux-gpio.c +++ b/drivers/net/mdio/mdio-mux-gpio.c @@ -3,13 +3,13 @@ * Copyright (C) 2011, 2012 Cavium, Inc. */ -#include #include -#include -#include -#include -#include #include +#include +#include +#include +#include +#include #define DRV_VERSION "1.1" #define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver" diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c index d1a8780e24d8..c02fb2a067ee 100644 --- a/drivers/net/mdio/mdio-mux-mmioreg.c +++ b/drivers/net/mdio/mdio-mux-mmioreg.c @@ -7,13 +7,13 @@ * Copyright 2012 Freescale Semiconductor, Inc. */ -#include #include +#include +#include #include #include -#include #include -#include +#include struct mdio_mux_mmioreg_state { void *mux_handle; diff --git a/drivers/net/mdio/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c index d6564381aa3e..527acfc3c045 100644 --- a/drivers/net/mdio/mdio-mux-multiplexer.c +++ b/drivers/net/mdio/mdio-mux-multiplexer.c @@ -4,10 +4,10 @@ * Copyright 2019 NXP */ -#include #include #include #include +#include struct mdio_mux_multiplexer_state { struct mux_control *muxc; diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c index ccb3ee704eb1..3dde0c2b3e09 100644 --- a/drivers/net/mdio/mdio-mux.c +++ b/drivers/net/mdio/mdio-mux.c @@ -3,12 +3,12 @@ * Copyright (C) 2011, 2012 Cavium, Inc. */ -#include -#include -#include #include +#include #include +#include #include +#include #define DRV_DESCRIPTION "MDIO bus multiplexer driver" diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c index 6faf39314ac9..e096e68ac667 100644 --- a/drivers/net/mdio/mdio-octeon.c +++ b/drivers/net/mdio/mdio-octeon.c @@ -3,13 +3,13 @@ * Copyright (C) 2009-2015 Cavium, Inc. */ -#include +#include +#include +#include #include #include -#include -#include #include -#include +#include #include "mdio-cavium.h" diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c index dd7430c998a2..822d2cdd2f35 100644 --- a/drivers/net/mdio/mdio-thunder.c +++ b/drivers/net/mdio/mdio-thunder.c @@ -3,14 +3,14 @@ * Copyright (C) 2009-2016 Cavium, Inc. */ +#include +#include +#include +#include #include #include -#include -#include -#include -#include -#include #include +#include #include "mdio-cavium.h" diff --git a/drivers/net/mdio/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c index 461207cdf5d6..7ab4e26db08c 100644 --- a/drivers/net/mdio/mdio-xgene.c +++ b/drivers/net/mdio/mdio-xgene.c @@ -13,11 +13,11 @@ #include #include #include -#include -#include #include -#include +#include +#include #include +#include #include static bool xgene_mdio_status; diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c index 4daf94bb56a5..5bae47f3da40 100644 --- a/drivers/net/mdio/of_mdio.c +++ b/drivers/net/mdio/of_mdio.c @@ -8,17 +8,17 @@ * out of the OpenFirmware device tree and using it to populate an mii_bus. */ -#include #include -#include #include -#include -#include +#include +#include +#include #include #include #include #include -#include +#include +#include #define DEFAULT_GPIO_RESET_DELAY 10 /* in microseconds */ @@ -332,6 +332,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) return 0; unregister: + of_node_put(child); mdiobus_unregister(mdio); return rc; } diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index a43820212932..50854265864d 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -351,10 +351,12 @@ nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx) { struct nsim_bpf_bound_map *nmap = offmap->dev_priv; - nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER); + nmap->entry[idx].key = kmalloc(offmap->map.key_size, + GFP_KERNEL_ACCOUNT | __GFP_NOWARN); if (!nmap->entry[idx].key) return -ENOMEM; - nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER); + nmap->entry[idx].value = kmalloc(offmap->map.value_size, + GFP_KERNEL_ACCOUNT | __GFP_NOWARN); if (!nmap->entry[idx].value) { kfree(nmap->entry[idx].key); nmap->entry[idx].key = NULL; @@ -496,7 +498,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap) if (offmap->map.map_flags) return -EINVAL; - nmap = kzalloc(sizeof(*nmap), GFP_USER); + nmap = kzalloc(sizeof(*nmap), GFP_KERNEL_ACCOUNT); if (!nmap) return -ENOMEM; diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index ad6dbf011052..4fb0638a55b4 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -67,10 +67,10 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) unsigned int start; do { - start = u64_stats_fetch_begin(&ns->syncp); + start = u64_stats_fetch_begin_irq(&ns->syncp); stats->tx_bytes = ns->tx_bytes; stats->tx_packets = ns->tx_packets; - } while (u64_stats_fetch_retry(&ns->syncp, start)); + } while (u64_stats_fetch_retry_irq(&ns->syncp, start)); } static int diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index a5bab614ff84..1b7d588ff3c5 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -484,7 +484,14 @@ static int __init ntb_netdev_init_module(void) rc = ntb_transport_register_client_dev(KBUILD_MODNAME); if (rc) return rc; - return ntb_transport_register_client(&ntb_netdev_client); + + rc = ntb_transport_register_client(&ntb_netdev_client); + if (rc) { + ntb_transport_unregister_client_dev(KBUILD_MODNAME); + return rc; + } + + return 0; } module_init(ntb_netdev_init_module); diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c index 41e7c1432497..7045595f8d7d 100644 --- a/drivers/net/phy/aquantia_main.c +++ b/drivers/net/phy/aquantia_main.c @@ -34,6 +34,8 @@ #define MDIO_AN_VEND_PROV 0xc400 #define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15) #define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14) +#define MDIO_AN_VEND_PROV_5000BASET_FULL BIT(11) +#define MDIO_AN_VEND_PROV_2500BASET_FULL BIT(10) #define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4) #define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0) #define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4 @@ -87,6 +89,9 @@ #define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8) #define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0) +#define VEND1_GLOBAL_GEN_STAT2 0xc831 +#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15) + #define VEND1_GLOBAL_RSVD_STAT1 0xc885 #define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4) #define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0) @@ -121,6 +126,12 @@ #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1) #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0) +/* Sleep and timeout for checking if the Processor-Intensive + * MDIO operation is finished + */ +#define AQR107_OP_IN_PROG_SLEEP 1000 +#define AQR107_OP_IN_PROG_TIMEOUT 100000 + struct aqr107_hw_stat { const char *name; int reg; @@ -230,9 +241,20 @@ static int aqr_config_aneg(struct phy_device *phydev) phydev->advertising)) reg |= MDIO_AN_VEND_PROV_1000BASET_HALF; + /* Handle the case when the 2.5G and 5G speeds are not advertised */ + if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + phydev->advertising)) + reg |= MDIO_AN_VEND_PROV_2500BASET_FULL; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, + phydev->advertising)) + reg |= MDIO_AN_VEND_PROV_5000BASET_FULL; + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV, MDIO_AN_VEND_PROV_1000BASET_HALF | - MDIO_AN_VEND_PROV_1000BASET_FULL, reg); + MDIO_AN_VEND_PROV_1000BASET_FULL | + MDIO_AN_VEND_PROV_2500BASET_FULL | + MDIO_AN_VEND_PROV_5000BASET_FULL, reg); if (ret < 0) return ret; if (ret > 0) @@ -556,16 +578,52 @@ static void aqr107_link_change_notify(struct phy_device *phydev) phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n"); } +static int aqr107_wait_processor_intensive_op(struct phy_device *phydev) +{ + int val, err; + + /* The datasheet notes to wait at least 1ms after issuing a + * processor intensive operation before checking. + * We cannot use the 'sleep_before_read' parameter of read_poll_timeout + * because that just determines the maximum time slept, not the minimum. + */ + usleep_range(1000, 5000); + + err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, + VEND1_GLOBAL_GEN_STAT2, val, + !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG), + AQR107_OP_IN_PROG_SLEEP, + AQR107_OP_IN_PROG_TIMEOUT, false); + if (err) { + phydev_err(phydev, "timeout: processor-intensive MDIO operation\n"); + return err; + } + + return 0; +} + static int aqr107_suspend(struct phy_device *phydev) { - return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, - MDIO_CTRL1_LPOWER); + int err; + + err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, + MDIO_CTRL1_LPOWER); + if (err) + return err; + + return aqr107_wait_processor_intensive_op(phydev); } static int aqr107_resume(struct phy_device *phydev) { - return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, - MDIO_CTRL1_LPOWER); + int err; + + err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, + MDIO_CTRL1_LPOWER); + if (err) + return err; + + return aqr107_wait_processor_intensive_op(phydev); } static int aqr107_probe(struct phy_device *phydev) diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 3d75b98f3051..db651649e0b8 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -243,9 +243,7 @@ static int dp83822_config_intr(struct phy_device *phydev) if (misr_status < 0) return misr_status; - misr_status |= (DP83822_RX_ERR_HF_INT_EN | - DP83822_FALSE_CARRIER_HF_INT_EN | - DP83822_LINK_STAT_INT_EN | + misr_status |= (DP83822_LINK_STAT_INT_EN | DP83822_ENERGY_DET_INT_EN | DP83822_LINK_QUAL_INT_EN); @@ -270,8 +268,7 @@ static int dp83822_config_intr(struct phy_device *phydev) DP83822_EEE_ERROR_CHANGE_INT_EN); if (!dp83822->fx_enabled) - misr_status |= DP83822_MDI_XOVER_INT_EN | - DP83822_ANEG_ERR_INT_EN | + misr_status |= DP83822_ANEG_ERR_INT_EN | DP83822_WOL_PKT_INT_EN; err = phy_write(phydev, MII_DP83822_MISR2, misr_status); diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index c716074fdef0..c8031e297faf 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -137,6 +137,7 @@ #define DP83867_DOWNSHIFT_2_COUNT 2 #define DP83867_DOWNSHIFT_4_COUNT 4 #define DP83867_DOWNSHIFT_8_COUNT 8 +#define DP83867_SGMII_AUTONEG_EN BIT(7) /* CFG3 bits */ #define DP83867_CFG3_INT_OE BIT(7) @@ -756,6 +757,14 @@ static int dp83867_config_init(struct phy_device *phydev) else val &= ~DP83867_SGMII_TYPE; phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val); + + /* This is a SW workaround for link instability if RX_CTRL is + * not strapped to mode 3 or 4 in HW. This is required for SGMII + * in addition to clearing bit 7, handled above. + */ + if (dp83867->rxctrl_strap_quirk) + phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, + BIT(8)); } val = phy_read(phydev, DP83867_CFG3); @@ -802,6 +811,32 @@ static int dp83867_phy_reset(struct phy_device *phydev) DP83867_PHYCR_FORCE_LINK_GOOD, 0); } +static void dp83867_link_change_notify(struct phy_device *phydev) +{ + /* There is a limitation in DP83867 PHY device where SGMII AN is + * only triggered once after the device is booted up. Even after the + * PHY TPI is down and up again, SGMII AN is not triggered and + * hence no new in-band message from PHY to MAC side SGMII. + * This could cause an issue during power up, when PHY is up prior + * to MAC. At this condition, once MAC side SGMII is up, MAC side + * SGMII wouldn`t receive new in-band message from TI PHY with + * correct link status, speed and duplex info. + * Thus, implemented a SW solution here to retrigger SGMII Auto-Neg + * whenever there is a link change. + */ + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + int val = 0; + + val = phy_clear_bits(phydev, DP83867_CFG2, + DP83867_SGMII_AUTONEG_EN); + if (val < 0) + return; + + phy_set_bits(phydev, DP83867_CFG2, + DP83867_SGMII_AUTONEG_EN); + } +} + static struct phy_driver dp83867_driver[] = { { .phy_id = DP83867_PHY_ID, @@ -826,6 +861,8 @@ static struct phy_driver dp83867_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, + + .link_change_notify = dp83867_link_change_notify, }, }; module_phy_driver(dp83867_driver); diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index b1bb9b8e1e4e..2b64318efdba 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -650,7 +650,7 @@ static int mv3310_read_status_copper(struct phy_device *phydev) cssr1 = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_CSSR1); if (cssr1 < 0) - return val; + return cssr1; /* If the link settings are not resolved, mark the link down */ if (!(cssr1 & MV_PCS_CSSR1_RESOLVED)) { diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index c416ab1d2b00..77ba6c3c7a09 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -574,7 +574,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) } for (i = 0; i < PHY_MAX_ADDR; i++) { - if ((bus->phy_mask & (1 << i)) == 0) { + if ((bus->phy_mask & BIT(i)) == 0) { struct phy_device *phydev; phydev = mdiobus_scan(bus, i); @@ -1008,7 +1008,6 @@ int __init mdio_bus_init(void) return ret; } -EXPORT_SYMBOL_GPL(mdio_bus_init); #if IS_ENABLED(CONFIG_PHYLIB) void mdio_bus_exit(void) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 92e94ac94a34..bbbe198f83e8 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -283,7 +283,7 @@ static int kszphy_config_reset(struct phy_device *phydev) } } - if (priv->led_mode >= 0) + if (priv->type && priv->led_mode >= 0) kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode); return 0; @@ -299,10 +299,10 @@ static int kszphy_config_init(struct phy_device *phydev) type = priv->type; - if (type->has_broadcast_disable) + if (type && type->has_broadcast_disable) kszphy_broadcast_disable(phydev); - if (type->has_nand_tree_disable) + if (type && type->has_nand_tree_disable) kszphy_nand_tree_disable(phydev); return kszphy_config_reset(phydev); @@ -1112,7 +1112,7 @@ static int kszphy_probe(struct phy_device *phydev) priv->type = type; - if (type->led_mode_reg) { + if (type && type->led_mode_reg) { ret = of_property_read_u32(np, "micrel,led-mode", &priv->led_mode); if (ret) @@ -1133,7 +1133,8 @@ static int kszphy_probe(struct phy_device *phydev) unsigned long rate = clk_get_rate(clk); bool rmii_ref_clk_sel_25_mhz; - priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel; + if (type) + priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel; rmii_ref_clk_sel_25_mhz = of_property_read_bool(np, "micrel,rmii-reference-clock-select-25-mhz"); diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c index b7b2521c73fb..c00eef457b85 100644 --- a/drivers/net/phy/mscc/mscc_macsec.c +++ b/drivers/net/phy/mscc/mscc_macsec.c @@ -632,6 +632,7 @@ static void vsc8584_macsec_free_flow(struct vsc8531_private *priv, list_del(&flow->list); clear_bit(flow->index, bitmap); + memzero_explicit(flow->key, sizeof(flow->key)); kfree(flow); } diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index db7866b6f752..18e67eb6d8b4 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -124,10 +124,15 @@ EXPORT_SYMBOL(phy_print_status); */ static int phy_clear_interrupt(struct phy_device *phydev) { - if (phydev->drv->ack_interrupt) - return phydev->drv->ack_interrupt(phydev); + int ret = 0; - return 0; + if (phydev->drv->ack_interrupt) { + mutex_lock(&phydev->lock); + ret = phydev->drv->ack_interrupt(phydev); + mutex_unlock(&phydev->lock); + } + + return ret; } /** @@ -981,6 +986,36 @@ int phy_disable_interrupts(struct phy_device *phydev) return phy_clear_interrupt(phydev); } +/** + * phy_did_interrupt - Checks if the PHY generated an interrupt + * @phydev: target phy_device struct + */ +static int phy_did_interrupt(struct phy_device *phydev) +{ + int ret; + + mutex_lock(&phydev->lock); + ret = phydev->drv->did_interrupt(phydev); + mutex_unlock(&phydev->lock); + + return ret; +} + +/** + * phy_handle_interrupt - Handle PHY interrupt + * @phydev: target phy_device struct + */ +static irqreturn_t phy_handle_interrupt(struct phy_device *phydev) +{ + irqreturn_t ret; + + mutex_lock(&phydev->lock); + ret = phydev->drv->handle_interrupt(phydev); + mutex_unlock(&phydev->lock); + + return ret; +} + /** * phy_interrupt - PHY interrupt handler * @irq: interrupt line @@ -994,9 +1029,9 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) struct phy_driver *drv = phydev->drv; if (drv->handle_interrupt) - return drv->handle_interrupt(phydev); + return phy_handle_interrupt(phydev); - if (drv->did_interrupt && !drv->did_interrupt(phydev)) + if (drv->did_interrupt && !phy_did_interrupt(phydev)) return IRQ_NONE; /* reschedule state queue work to run as soon as possible */ diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8789a1a0c80b..e83428c92b33 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1481,6 +1481,7 @@ error: error_module_put: module_put(d->driver->owner); + d->driver = NULL; error_put_device: put_device(d); if (ndev_owner != bus->owner) diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index a05d8372669c..850915a37f4c 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -74,6 +74,12 @@ static const struct sfp_quirk sfp_quirks[] = { .vendor = "HUAWEI", .part = "MA5671A", .modes = sfp_quirk_2500basex, + }, { + // Lantech 8330-262D-E can operate at 2500base-X, but + // incorrectly report 2500MBd NRZ in their EEPROM + .vendor = "Lantech", + .part = "8330-262D-E", + .modes = sfp_quirk_2500basex, }, { .vendor = "UBNT", .part = "UF-INSTANT", diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index efffa65f8214..dcbe278086dc 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -249,6 +249,7 @@ struct sfp { struct sfp_eeprom_id id; unsigned int module_power_mW; unsigned int module_t_start_up; + bool tx_fault_ignore; #if IS_ENABLED(CONFIG_HWMON) struct sfp_diag diag; @@ -1893,6 +1894,12 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) else sfp->module_t_start_up = T_START_UP; + if (!memcmp(id.base.vendor_name, "HUAWEI ", 16) && + !memcmp(id.base.vendor_pn, "MA5671A ", 16)) + sfp->tx_fault_ignore = true; + else + sfp->tx_fault_ignore = false; + return 0; } @@ -2320,7 +2327,10 @@ static void sfp_check_state(struct sfp *sfp) mutex_lock(&sfp->st_mutex); state = sfp_get_state(sfp); changed = state ^ sfp->state; - changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT; + if (sfp->tx_fault_ignore) + changed &= SFP_F_PRESENT | SFP_F_LOS; + else + changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT; for (i = 0; i < GPIO_MAX; i++) if (changed & BIT(i)) @@ -2417,7 +2427,7 @@ static int sfp_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sfp); - err = devm_add_action(sfp->dev, sfp_cleanup, sfp); + err = devm_add_action_or_reset(sfp->dev, sfp_cleanup, sfp); if (err < 0) return err; diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index 4406b353123e..22f7db87ed21 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -444,12 +444,12 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, } rcv->state = PLIP_PK_DONE; if (rcv->skb) { - kfree_skb(rcv->skb); + dev_kfree_skb_irq(rcv->skb); rcv->skb = NULL; } snd->state = PLIP_PK_DONE; if (snd->skb) { - dev_kfree_skb(snd->skb); + dev_consume_skb_irq(snd->skb); snd->skb = NULL; } spin_unlock_irq(&nl->lock); @@ -1103,7 +1103,7 @@ plip_open(struct net_device *dev) /* Any address will do - we take the first. We already have the first two bytes filled with 0xfc, from plip_init_dev(). */ - const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list); + const struct in_ifaddr *ifa = rtnl_dereference(in_dev->ifa_list); if (ifa != NULL) { memcpy(dev->dev_addr+2, &ifa->ifa_local, 4); } diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index f81fb0b13a94..369bd30fed35 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -468,7 +468,7 @@ static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue) spin_lock(&sl->lock); if (netif_queue_stopped(dev)) { - if (!netif_running(dev)) + if (!netif_running(dev) || !sl->tty) goto out; /* May be we must check transmitter timeout here ? diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c index 291fa449993f..45f295403cb5 100644 --- a/drivers/net/sungem_phy.c +++ b/drivers/net/sungem_phy.c @@ -454,6 +454,7 @@ static int bcm5421_init(struct mii_phy* phy) int can_low_power = 1; if (np == NULL || of_get_property(np, "no-autolowpower", NULL)) can_low_power = 0; + of_node_put(np); if (can_low_power) { /* Enable automatic low-power */ sungem_phy_write(phy, 0x1c, 0x9002); diff --git a/drivers/net/tap.c b/drivers/net/tap.c index f549d3a8e59c..8f7bb15206e9 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1202,7 +1202,8 @@ static int tap_sendmsg(struct socket *sock, struct msghdr *m, struct xdp_buff *xdp; int i; - if (ctl && (ctl->type == TUN_MSG_PTR)) { + if (m->msg_controllen == sizeof(struct tun_msg_ctl) && + ctl && ctl->type == TUN_MSG_PTR) { for (i = 0; i < ctl->num; i++) { xdp = &((struct xdp_buff *)ctl->ptr)[i]; tap_get_user_xdp(q, xdp); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 615f3776b4be..7117d559a32e 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1270,10 +1270,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev, } } - netif_addr_lock_bh(dev); - dev_uc_sync_multiple(port_dev, dev); - dev_mc_sync_multiple(port_dev, dev); - netif_addr_unlock_bh(dev); + if (dev->flags & IFF_UP) { + netif_addr_lock_bh(dev); + dev_uc_sync_multiple(port_dev, dev); + dev_mc_sync_multiple(port_dev, dev); + netif_addr_unlock_bh(dev); + } port->index = -1; list_add_tail_rcu(&port->list, &team->port_list); @@ -1344,8 +1346,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev) netdev_rx_handler_unregister(port_dev); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); - dev_uc_unsync(port_dev, dev); - dev_mc_unsync(port_dev, dev); + if (dev->flags & IFF_UP) { + dev_uc_unsync(port_dev, dev); + dev_mc_unsync(port_dev, dev); + } dev_close(port_dev); team_port_leave(team, port); @@ -1695,6 +1699,14 @@ static int team_open(struct net_device *dev) static int team_close(struct net_device *dev) { + struct team *team = netdev_priv(dev); + struct team_port *port; + + list_for_each_entry(port, &team->port_list, list) { + dev_uc_unsync(port->dev, dev); + dev_mc_unsync(port->dev, dev); + } + return 0; } diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index 3160443ef3b9..5d96dc1b00b3 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -1343,12 +1343,21 @@ static int __init tbnet_init(void) TBNET_MATCH_FRAGS_ID); ret = tb_register_property_dir("network", tbnet_dir); - if (ret) { - tb_property_free_dir(tbnet_dir); - return ret; - } + if (ret) + goto err_free_dir; - return tb_register_service_driver(&tbnet_driver); + ret = tb_register_service_driver(&tbnet_driver); + if (ret) + goto err_unregister; + + return 0; + +err_unregister: + tb_unregister_property_dir("network", tbnet_dir); +err_free_dir: + tb_property_free_dir(tbnet_dir); + + return ret; } module_init(tbnet_init); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index ffbc7eda95ee..67ce7b779af6 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -220,6 +220,9 @@ struct tun_struct { struct tun_prog __rcu *steering_prog; struct tun_prog __rcu *filter_prog; struct ethtool_link_ksettings link_ksettings; + /* init args */ + struct file *file; + struct ifreq *ifr; }; struct veth { @@ -227,6 +230,9 @@ struct veth { __be16 h_vlan_TCI; }; +static void tun_flow_init(struct tun_struct *tun); +static void tun_flow_uninit(struct tun_struct *tun); + static int tun_napi_receive(struct napi_struct *napi, int budget) { struct tun_file *tfile = container_of(napi, struct tun_file, napi); @@ -279,6 +285,12 @@ static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, } } +static void tun_napi_enable(struct tun_file *tfile) +{ + if (tfile->napi_enabled) + napi_enable(&tfile->napi); +} + static void tun_napi_disable(struct tun_file *tfile) { if (tfile->napi_enabled) @@ -640,7 +652,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean) tun = rtnl_dereference(tfile->tun); if (tun && clean) { - tun_napi_disable(tfile); + if (!tfile->detached) + tun_napi_disable(tfile); tun_napi_del(tfile); } @@ -659,8 +672,10 @@ static void __tun_detach(struct tun_file *tfile, bool clean) if (clean) { RCU_INIT_POINTER(tfile->tun, NULL); sock_put(&tfile->sk); - } else + } else { tun_disable_queue(tun, tfile); + tun_napi_disable(tfile); + } synchronize_net(); tun_flow_delete_by_queue(tun, tun->numqueues + 1); @@ -683,7 +698,6 @@ static void __tun_detach(struct tun_file *tfile, bool clean) if (tun) xdp_rxq_info_unreg(&tfile->xdp_rxq); ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); - sock_put(&tfile->sk); } } @@ -699,6 +713,9 @@ static void tun_detach(struct tun_file *tfile, bool clean) if (dev) netdev_state_change(dev); rtnl_unlock(); + + if (clean) + sock_put(&tfile->sk); } static void tun_detach_all(struct net_device *dev) @@ -733,6 +750,7 @@ static void tun_detach_all(struct net_device *dev) sock_put(&tfile->sk); } list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { + tun_napi_del(tfile); tun_enable_queue(tfile); tun_queue_purge(tfile); xdp_rxq_info_unreg(&tfile->xdp_rxq); @@ -813,6 +831,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, if (tfile->detached) { tun_enable_queue(tfile); + tun_napi_enable(tfile); } else { sock_hold(&tfile->sk); tun_napi_init(tun, tfile, napi, napi_frags); @@ -964,6 +983,49 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) static const struct ethtool_ops tun_ethtool_ops; +static int tun_net_init(struct net_device *dev) +{ + struct tun_struct *tun = netdev_priv(dev); + struct ifreq *ifr = tun->ifr; + int err; + + tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); + if (!tun->pcpu_stats) + return -ENOMEM; + + spin_lock_init(&tun->lock); + + err = security_tun_dev_alloc_security(&tun->security); + if (err < 0) { + free_percpu(tun->pcpu_stats); + return err; + } + + tun_flow_init(tun); + + dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | + TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; + dev->features = dev->hw_features | NETIF_F_LLTX; + dev->vlan_features = dev->features & + ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + + tun->flags = (tun->flags & ~TUN_FEATURES) | + (ifr->ifr_flags & TUN_FEATURES); + + INIT_LIST_HEAD(&tun->disabled); + err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI, + ifr->ifr_flags & IFF_NAPI_FRAGS, false); + if (err < 0) { + tun_flow_uninit(tun); + security_tun_dev_free_security(tun->security); + free_percpu(tun->pcpu_stats); + return err; + } + return 0; +} + /* Net device detach from fd. */ static void tun_net_uninit(struct net_device *dev) { @@ -1205,6 +1267,7 @@ static int tun_net_change_carrier(struct net_device *dev, bool new_carrier) } static const struct net_device_ops tun_netdev_ops = { + .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, @@ -1285,6 +1348,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) } static const struct net_device_ops tap_netdev_ops = { + .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, @@ -1325,7 +1389,7 @@ static void tun_flow_uninit(struct tun_struct *tun) #define MAX_MTU 65535 /* Initialize net device. */ -static void tun_net_init(struct net_device *dev) +static void tun_net_initialize(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); @@ -1413,7 +1477,8 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, int err; int i; - if (it->nr_segs > MAX_SKB_FRAGS + 1) + if (it->nr_segs > MAX_SKB_FRAGS + 1 || + len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN)) return ERR_PTR(-EMSGSIZE); local_bh_disable(); @@ -1923,17 +1988,25 @@ drop: skb_headlen(skb)); if (unlikely(headlen > skb_headlen(skb))) { + WARN_ON_ONCE(1); + err = -ENOMEM; this_cpu_inc(tun->pcpu_stats->rx_dropped); +napi_busy: napi_free_frags(&tfile->napi); rcu_read_unlock(); mutex_unlock(&tfile->napi_mutex); - WARN_ON(1); - return -ENOMEM; + return err; } - local_bh_disable(); - napi_gro_frags(&tfile->napi); - local_bh_enable(); + if (likely(napi_schedule_prep(&tfile->napi))) { + local_bh_disable(); + napi_gro_frags(&tfile->napi); + napi_complete(&tfile->napi); + local_bh_enable(); + } else { + err = -EBUSY; + goto napi_busy; + } mutex_unlock(&tfile->napi_mutex); } else if (tfile->napi_enabled) { struct sk_buff_head *queue = &tfile->sk.sk_write_queue; @@ -2257,10 +2330,6 @@ static void tun_free_netdev(struct net_device *dev) BUG_ON(!(list_empty(&tun->disabled))); free_percpu(tun->pcpu_stats); - /* We clear pcpu_stats so that tun_set_iff() can tell if - * tun_free_netdev() has been called from register_netdevice(). - */ - tun->pcpu_stats = NULL; tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); @@ -2499,7 +2568,8 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) if (!tun) return -EBADFD; - if (ctl && (ctl->type == TUN_MSG_PTR)) { + if (m->msg_controllen == sizeof(struct tun_msg_ctl) && + ctl && ctl->type == TUN_MSG_PTR) { struct tun_page tpage; int n = ctl->num; int flush = 0; @@ -2772,41 +2842,16 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) tun->rx_batched = 0; RCU_INIT_POINTER(tun->steering_prog, NULL); - tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); - if (!tun->pcpu_stats) { - err = -ENOMEM; - goto err_free_dev; - } + tun->ifr = ifr; + tun->file = file; - spin_lock_init(&tun->lock); - - err = security_tun_dev_alloc_security(&tun->security); - if (err < 0) - goto err_free_stat; - - tun_net_init(dev); - tun_flow_init(tun); - - dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | - TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX; - dev->features = dev->hw_features | NETIF_F_LLTX; - dev->vlan_features = dev->features & - ~(NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX); - - tun->flags = (tun->flags & ~TUN_FEATURES) | - (ifr->ifr_flags & TUN_FEATURES); - - INIT_LIST_HEAD(&tun->disabled); - err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, - ifr->ifr_flags & IFF_NAPI_FRAGS, false); - if (err < 0) - goto err_free_flow; + tun_net_initialize(dev); err = register_netdevice(tun->dev); - if (err < 0) - goto err_detach; + if (err < 0) { + free_netdev(dev); + return err; + } /* free_netdev() won't check refcnt, to aovid race * with dev_put() we need publish tun after registration. */ @@ -2823,24 +2868,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) strcpy(ifr->ifr_name, tun->dev->name); return 0; - -err_detach: - tun_detach_all(dev); - /* We are here because register_netdevice() has failed. - * If register_netdevice() already called tun_free_netdev() - * while dealing with the error, tun->pcpu_stats has been cleared. - */ - if (!tun->pcpu_stats) - goto err_free_dev; - -err_free_flow: - tun_flow_uninit(tun); - security_tun_dev_free_security(tun->security); -err_free_stat: - free_percpu(tun->pcpu_stats); -err_free_dev: - free_netdev(dev); - return err; } static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr) diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index 0717c18015c9..c9c409518174 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -1102,10 +1102,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb) if (start_of_descs != desc_offset) goto err; - /* self check desc_offset from header*/ - if (desc_offset >= skb_len) + /* self check desc_offset from header and make sure that the + * bounds of the metadata array are inside the SKB + */ + if (pkt_count * 2 + desc_offset >= skb_len) goto err; + /* Packets must not overlap the metadata array */ + skb_trim(skb, desc_offset); + if (pkt_count == 0) goto err; diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 0b0cbcee1920..79a53fe245e5 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1471,6 +1471,42 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) * are bundled into this buffer and where we can find an array of * per-packet metadata (which contains elements encoded into u16). */ + + /* SKB contents for current firmware: + * + * ... + * + * + * ... + * + * + * + * where: + * contains pkt_len bytes: + * 2 bytes of IP alignment pseudo header + * packet received + * contains 4 bytes: + * pkt_len and fields AX_RXHDR_* + * 0-7 bytes to terminate at + * 8 bytes boundary (64-bit). + * 4 bytes to make rx_hdr terminate at + * 8 bytes boundary (64-bit) + * contains 4 bytes: + * pkt_len=0 and AX_RXHDR_DROP_ERR + * contains 4 bytes: + * pkt_cnt and hdr_off (offset of + * ) + * + * pkt_cnt is number of entrys in the per-packet metadata. + * In current firmware there is 2 entrys per packet. + * The first points to the packet and the + * second is a dummy header. + * This was done probably to align fields in 64-bit and + * maintain compatibility with old firmware. + * This code assumes that and are + * optional. + */ + if (skb->len < 4) return 0; skb_trim(skb, skb->len - 4); @@ -1484,51 +1520,66 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) /* Make sure that the bounds of the metadata array are inside the SKB * (and in front of the counter at the end). */ - if (pkt_cnt * 2 + hdr_off > skb->len) + if (pkt_cnt * 4 + hdr_off > skb->len) return 0; pkt_hdr = (u32 *)(skb->data + hdr_off); /* Packets must not overlap the metadata array */ skb_trim(skb, hdr_off); - for (; ; pkt_cnt--, pkt_hdr++) { + for (; pkt_cnt > 0; pkt_cnt--, pkt_hdr++) { + u16 pkt_len_plus_padd; u16 pkt_len; le32_to_cpus(pkt_hdr); pkt_len = (*pkt_hdr >> 16) & 0x1fff; + pkt_len_plus_padd = (pkt_len + 7) & 0xfff8; - if (pkt_len > skb->len) + /* Skip dummy header used for alignment + */ + if (pkt_len == 0) + continue; + + if (pkt_len_plus_padd > skb->len) return 0; /* Check CRC or runt packet */ - if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) && - pkt_len >= 2 + ETH_HLEN) { - bool last = (pkt_cnt == 0); - - if (last) { - ax_skb = skb; - } else { - ax_skb = skb_clone(skb, GFP_ATOMIC); - if (!ax_skb) - return 0; - } - ax_skb->len = pkt_len; - /* Skip IP alignment pseudo header */ - skb_pull(ax_skb, 2); - skb_set_tail_pointer(ax_skb, ax_skb->len); - ax_skb->truesize = pkt_len + sizeof(struct sk_buff); - ax88179_rx_checksum(ax_skb, pkt_hdr); - - if (last) - return 1; - - usbnet_skb_return(dev, ax_skb); + if ((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) || + pkt_len < 2 + ETH_HLEN) { + dev->net->stats.rx_errors++; + skb_pull(skb, pkt_len_plus_padd); + continue; } - /* Trim this packet away from the SKB */ - if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8)) + /* last packet */ + if (pkt_len_plus_padd == skb->len) { + skb_trim(skb, pkt_len); + + /* Skip IP alignment pseudo header */ + skb_pull(skb, 2); + + skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd); + ax88179_rx_checksum(skb, pkt_hdr); + return 1; + } + + ax_skb = skb_clone(skb, GFP_ATOMIC); + if (!ax_skb) return 0; + skb_trim(ax_skb, pkt_len); + + /* Skip IP alignment pseudo header */ + skb_pull(ax_skb, 2); + + skb->truesize = pkt_len_plus_padd + + SKB_DATA_ALIGN(sizeof(struct sk_buff)); + ax88179_rx_checksum(ax_skb, pkt_hdr); + usbnet_skb_return(dev, ax_skb); + + skb_pull(skb, pkt_len_plus_padd); } + + return 0; } static struct sk_buff * diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 43ddbe61dc58..935cd296887f 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -763,6 +763,13 @@ static const struct usb_device_id products[] = { }, #endif +/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* ThinkPad USB-C Dock (based on Realtek RTL8153) */ { USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 597766d14563..bce151e3706a 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1024,6 +1024,7 @@ static const struct usb_device_id products[] = { {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */ + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0801)}, /* Quectel RM520N */ /* 3. Combined interface devices matching on interface number */ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ @@ -1291,8 +1292,11 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x103a, 0)}, /* Telit LE910C4-WWX */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ @@ -1329,6 +1333,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81c2, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ @@ -1347,6 +1352,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/ {QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */ + {QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 0bb5b1c78654..f9a79d67d6d4 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1689,7 +1689,9 @@ static void intr_callback(struct urb *urb) "Stop submitting intr, status %d\n", status); return; case -EOVERFLOW: - netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n"); + if (net_ratelimit()) + netif_info(tp, intr, tp->netdev, + "intr status -EOVERFLOW\n"); goto resubmit; /* -EPIPE: should clear the halt */ default: @@ -6868,6 +6870,7 @@ static const struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)}, {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082)}, diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index e5b744851146..e1cd4c2de2d3 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -61,6 +61,7 @@ struct smsc95xx_priv { u8 suspend_flags; struct mii_bus *mdiobus; struct phy_device *phydev; + struct task_struct *pm_task; }; static bool turbo_mode = true; @@ -70,13 +71,14 @@ MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index, u32 *data, int in_pm) { + struct smsc95xx_priv *pdata = dev->driver_priv; u32 buf; int ret; int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16); BUG_ON(!dev); - if (!in_pm) + if (current != pdata->pm_task) fn = usbnet_read_cmd; else fn = usbnet_read_cmd_nopm; @@ -100,13 +102,14 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index, static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index, u32 data, int in_pm) { + struct smsc95xx_priv *pdata = dev->driver_priv; u32 buf; int ret; int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16); BUG_ON(!dev); - if (!in_pm) + if (current != pdata->pm_task) fn = usbnet_write_cmd; else fn = usbnet_write_cmd_nopm; @@ -564,16 +567,12 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev) return smsc95xx_write_reg(dev, AFC_CFG, afc_cfg); } -static int smsc95xx_link_reset(struct usbnet *dev) +static void smsc95xx_mac_update_fullduplex(struct usbnet *dev) { struct smsc95xx_priv *pdata = dev->driver_priv; unsigned long flags; int ret; - ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_); - if (ret < 0) - return ret; - spin_lock_irqsave(&pdata->mac_cr_lock, flags); if (pdata->phydev->duplex != DUPLEX_FULL) { pdata->mac_cr &= ~MAC_CR_FDPX_; @@ -585,14 +584,16 @@ static int smsc95xx_link_reset(struct usbnet *dev) spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr); - if (ret < 0) - return ret; + if (ret < 0) { + if (ret != -ENODEV) + netdev_warn(dev->net, + "Error updating MAC full duplex mode\n"); + return; + } ret = smsc95xx_phy_update_flowcontrol(dev); if (ret < 0) netdev_warn(dev->net, "Error updating PHY flow control\n"); - - return ret; } static void smsc95xx_status(struct usbnet *dev, struct urb *urb) @@ -609,7 +610,7 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb) netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata); if (intdata & INT_ENP_PHY_INT_) - usbnet_defer_kevent(dev, EVENT_LINK_RESET); + ; else netdev_warn(dev->net, "unexpected interrupt, intdata=0x%08X\n", intdata); @@ -1066,6 +1067,7 @@ static void smsc95xx_handle_link_change(struct net_device *net) struct usbnet *dev = netdev_priv(net); phy_print_status(net->phydev); + smsc95xx_mac_update_fullduplex(dev); usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); } @@ -1469,9 +1471,12 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) u32 val, link_up; int ret; + pdata->pm_task = current; + ret = usbnet_suspend(intf, message); if (ret < 0) { netdev_warn(dev->net, "usbnet_suspend error\n"); + pdata->pm_task = NULL; return ret; } @@ -1718,6 +1723,7 @@ done: if (ret && PMSG_IS_AUTO(message)) usbnet_resume(intf); + pdata->pm_task = NULL; return ret; } @@ -1738,29 +1744,31 @@ static int smsc95xx_resume(struct usb_interface *intf) /* do this first to ensure it's cleared even in error case */ pdata->suspend_flags = 0; + pdata->pm_task = current; + if (suspend_flags & SUSPEND_ALLMODES) { /* clear wake-up sources */ ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) - return ret; + goto done; val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_); ret = smsc95xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) - return ret; + goto done; /* clear wake-up status */ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val); if (ret < 0) - return ret; + goto done; val &= ~PM_CTL_WOL_EN_; val |= PM_CTL_WUPS_; ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val); if (ret < 0) - return ret; + goto done; } ret = usbnet_resume(intf); @@ -1768,15 +1776,21 @@ static int smsc95xx_resume(struct usb_interface *intf) netdev_warn(dev->net, "usbnet_resume error\n"); phy_init_hw(pdata->phydev); + +done: + pdata->pm_task = NULL; return ret; } static int smsc95xx_reset_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); + struct smsc95xx_priv *pdata = dev->driver_priv; int ret; + pdata->pm_task = current; ret = smsc95xx_reset(dev); + pdata->pm_task = NULL; if (ret < 0) return ret; @@ -1972,7 +1986,6 @@ static const struct driver_info smsc95xx_info = { .description = "smsc95xx USB 2.0 Ethernet", .bind = smsc95xx_bind, .unbind = smsc95xx_unbind, - .link_reset = smsc95xx_link_reset, .reset = smsc95xx_reset, .check_connect = smsc95xx_start_phy, .stop = smsc95xx_stop, diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 402390b1a66b..43d70348343b 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -830,13 +830,11 @@ int usbnet_stop (struct net_device *net) mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags); - /* deferred work (task, timer, softirq) must also stop. - * can't flush_scheduled_work() until we drop rtnl (later), - * else workers could deadlock; so make workers a NOP. - */ + /* deferred work (timer, softirq, task) must also stop */ dev->flags = 0; del_timer_sync (&dev->delay); tasklet_kill (&dev->bh); + cancel_work_sync(&dev->kevent); if (!pm) usb_autopm_put_interface(dev->intf); @@ -1569,6 +1567,7 @@ void usbnet_disconnect (struct usb_interface *intf) struct usbnet *dev; struct usb_device *xdev; struct net_device *net; + struct urb *urb; dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); @@ -1585,9 +1584,11 @@ void usbnet_disconnect (struct usb_interface *intf) net = dev->net; unregister_netdev (net); - cancel_work_sync(&dev->kevent); - - usb_scuttle_anchored_urbs(&dev->deferred); + while ((urb = usb_get_from_anchor(&dev->deferred))) { + dev_kfree_skb(urb->context); + kfree(urb->sg); + usb_free_urb(urb); + } if (dev->driver_info->unbind) dev->driver_info->unbind (dev, intf); @@ -1969,7 +1970,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, cmd, reqtype, value, index, size); if (size) { - buf = kmalloc(size, GFP_KERNEL); + buf = kmalloc(size, GFP_NOIO); if (!buf) goto out; } @@ -2001,7 +2002,7 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, cmd, reqtype, value, index, size); if (data) { - buf = kmemdup(data, size, GFP_KERNEL); + buf = kmemdup(data, size, GFP_NOIO); if (!buf) goto out; } else { @@ -2102,7 +2103,7 @@ static void usbnet_async_cmd_cb(struct urb *urb) int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, u16 value, u16 index, const void *data, u16 size) { - struct usb_ctrlrequest *req = NULL; + struct usb_ctrlrequest *req; struct urb *urb; int err = -ENOMEM; void *buf = NULL; @@ -2120,7 +2121,7 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, if (!buf) { netdev_err(dev->net, "Error allocating buffer" " in %s!\n", __func__); - goto fail_free; + goto fail_free_urb; } } @@ -2144,14 +2145,21 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, if (err < 0) { netdev_err(dev->net, "Error submitting the control" " message: status=%d\n", err); - goto fail_free; + goto fail_free_all; } return 0; +fail_free_all: + kfree(req); fail_free_buf: kfree(buf); -fail_free: - kfree(req); + /* + * avoid a double free + * needed because the flag can be set only + * after filling the URB + */ + urb->transfer_flags = 0; +fail_free_urb: usb_free_urb(urb); fail: return err; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index f7e3eb309a26..5be8ed910553 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -292,7 +292,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) rcu_read_lock(); rcv = rcu_dereference(priv->peer); - if (unlikely(!rcv)) { + if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) { kfree_skb(skb); goto drop; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 1888876969f2..ca699366442e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -213,9 +213,15 @@ struct virtnet_info { /* Packet virtio header size */ u8 hdr_len; - /* Work struct for refilling if we run low on memory. */ + /* Work struct for delayed refilling if we run low on memory. */ struct delayed_work refill; + /* Is delayed refill enabled? */ + bool refill_enabled; + + /* The lock to synchronize the access to refill_enabled */ + spinlock_t refill_lock; + /* Work struct for config space updates */ struct work_struct config_work; @@ -319,6 +325,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) return p; } +static void enable_delayed_refill(struct virtnet_info *vi) +{ + spin_lock_bh(&vi->refill_lock); + vi->refill_enabled = true; + spin_unlock_bh(&vi->refill_lock); +} + +static void disable_delayed_refill(struct virtnet_info *vi) +{ + spin_lock_bh(&vi->refill_lock); + vi->refill_enabled = false; + spin_unlock_bh(&vi->refill_lock); +} + static void virtqueue_napi_schedule(struct napi_struct *napi, struct virtqueue *vq) { @@ -948,8 +968,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, case XDP_TX: stats->xdp_tx++; xdpf = xdp_convert_buff_to_frame(&xdp); - if (unlikely(!xdpf)) + if (unlikely(!xdpf)) { + if (unlikely(xdp_page != page)) + put_page(xdp_page); goto err_xdp; + } err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); if (unlikely(err < 0)) { trace_xdp_exception(vi->dev, xdp_prog, act); @@ -1403,8 +1426,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget, } if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { - if (!try_fill_recv(vi, rq, GFP_ATOMIC)) - schedule_delayed_work(&vi->refill, 0); + if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { + spin_lock(&vi->refill_lock); + if (vi->refill_enabled) + schedule_delayed_work(&vi->refill, 0); + spin_unlock(&vi->refill_lock); + } } u64_stats_update_begin(&rq->stats.syncp); @@ -1523,6 +1550,8 @@ static int virtnet_open(struct net_device *dev) struct virtnet_info *vi = netdev_priv(dev); int i, err; + enable_delayed_refill(vi); + for (i = 0; i < vi->max_queue_pairs; i++) { if (i < vi->curr_queue_pairs) /* Make sure we have some buffers: if oom use wq. */ @@ -1893,6 +1922,8 @@ static int virtnet_close(struct net_device *dev) struct virtnet_info *vi = netdev_priv(dev); int i; + /* Make sure NAPI doesn't schedule refill work */ + disable_delayed_refill(vi); /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); @@ -2366,7 +2397,6 @@ static const struct ethtool_ops virtnet_ethtool_ops = { static void virtnet_freeze_down(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; - int i; /* Make sure no work handler is accessing the device */ flush_work(&vi->config_work); @@ -2374,14 +2404,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev) netif_tx_lock_bh(vi->dev); netif_device_detach(vi->dev); netif_tx_unlock_bh(vi->dev); - cancel_delayed_work_sync(&vi->refill); - - if (netif_running(vi->dev)) { - for (i = 0; i < vi->max_queue_pairs; i++) { - napi_disable(&vi->rq[i].napi); - virtnet_napi_tx_disable(&vi->sq[i].napi); - } - } + if (netif_running(vi->dev)) + virtnet_close(vi->dev); } static int init_vqs(struct virtnet_info *vi); @@ -2389,7 +2413,7 @@ static int init_vqs(struct virtnet_info *vi); static int virtnet_restore_up(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; - int err, i; + int err; err = init_vqs(vi); if (err) @@ -2397,16 +2421,12 @@ static int virtnet_restore_up(struct virtio_device *vdev) virtio_device_ready(vdev); - if (netif_running(vi->dev)) { - for (i = 0; i < vi->curr_queue_pairs; i++) - if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) - schedule_delayed_work(&vi->refill, 0); + enable_delayed_refill(vi); - for (i = 0; i < vi->max_queue_pairs; i++) { - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); - virtnet_napi_tx_enable(vi, vi->sq[i].vq, - &vi->sq[i].napi); - } + if (netif_running(vi->dev)) { + err = virtnet_open(vi->dev); + if (err) + return err; } netif_tx_lock_bh(vi->dev); @@ -3108,6 +3128,7 @@ static int virtnet_probe(struct virtio_device *vdev) vdev->priv = vi; INIT_WORK(&vi->config_work, virtnet_config_changed_work); + spin_lock_init(&vi->refill_lock); /* If we can receive ANY GSO packets, we must allocate large ones. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || @@ -3187,14 +3208,20 @@ static int virtnet_probe(struct virtio_device *vdev) } } - err = register_netdev(dev); + /* serialize netdev register + virtio_device_ready() with ndo_open() */ + rtnl_lock(); + + err = register_netdevice(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); + rtnl_unlock(); goto free_failover; } virtio_device_ready(vdev); + rtnl_unlock(); + err = virtnet_cpu_notif_add(vi); if (err) { pr_debug("virtio_net: registering cpu notifier failed\n"); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 932a39945cc6..43a4bcdd92c1 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -595,6 +595,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, if (dma_mapping_error(&adapter->pdev->dev, rbi->dma_addr)) { dev_kfree_skb_any(rbi->skb); + rbi->skb = NULL; rq->stats.rx_buf_alloc_failure++; break; } @@ -619,6 +620,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, if (dma_mapping_error(&adapter->pdev->dev, rbi->dma_addr)) { put_page(rbi->page); + rbi->page = NULL; rq->stats.rx_buf_alloc_failure++; break; } @@ -1354,6 +1356,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, }; u32 num_pkts = 0; bool skip_page_frags = false; + bool encap_lro = false; struct Vmxnet3_RxCompDesc *rcd; struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; u16 segCnt = 0, mss = 0; @@ -1494,13 +1497,18 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, if (VMXNET3_VERSION_GE_2(adapter) && rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) { struct Vmxnet3_RxCompDescExt *rcdlro; + union Vmxnet3_GenericDesc *gdesc; + rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; + gdesc = (union Vmxnet3_GenericDesc *)rcd; segCnt = rcdlro->segCnt; WARN_ON_ONCE(segCnt == 0); mss = rcdlro->mss; if (unlikely(segCnt <= 1)) segCnt = 0; + encap_lro = (le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)); } else { segCnt = 0; } @@ -1568,7 +1576,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, vmxnet3_rx_csum(adapter, skb, (union Vmxnet3_GenericDesc *)rcd); skb->protocol = eth_type_trans(skb, adapter->netdev); - if (!rcd->tcp || + if ((!rcd->tcp && !encap_lro) || !(adapter->netdev->features & NETIF_F_LRO)) goto not_lro; @@ -1577,7 +1585,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, SKB_GSO_TCPV4 : SKB_GSO_TCPV6; skb_shinfo(skb)->gso_size = mss; skb_shinfo(skb)->gso_segs = segCnt; - } else if (segCnt != 0 || skb->len > mtu) { + } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) { u32 hlen; hlen = vmxnet3_get_hdr_len(adapter, skb, @@ -1606,6 +1614,7 @@ not_lro: napi_gro_receive(&rq->napi, skb); ctx->skb = NULL; + encap_lro = false; num_pkts++; } @@ -1654,6 +1663,10 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, u32 i, ring_idx; struct Vmxnet3_RxDesc *rxd; + /* ring has already been cleaned up */ + if (!rq->rx_ring[0].base) + return; + for (ring_idx = 0; ring_idx < 2; ring_idx++) { for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { #ifdef __BIG_ENDIAN_BITFIELD diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 48fbdce6a70e..72d670667f64 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -710,11 +710,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f, rd = kmalloc(sizeof(*rd), GFP_ATOMIC); if (rd == NULL) - return -ENOBUFS; + return -ENOMEM; if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) { kfree(rd); - return -ENOBUFS; + return -ENOMEM; } rd->remote_ip = *ip; diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index f6562a343cb4..b965eb6a4bb1 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -403,7 +403,7 @@ static int lapbeth_device_event(struct notifier_block *this, if (dev_net(dev) != &init_net) return NOTIFY_DONE; - if (!dev_is_ethdev(dev)) + if (!dev_is_ethdev(dev) && !lapbeth_get_x25_dev(dev)) return NOTIFY_DONE; switch (event) { diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c index 9a4c8ff32d9d..5bf7822c53f1 100644 --- a/drivers/net/wireguard/allowedips.c +++ b/drivers/net/wireguard/allowedips.c @@ -6,6 +6,8 @@ #include "allowedips.h" #include "peer.h" +enum { MAX_ALLOWEDIPS_BITS = 128 }; + static struct kmem_cache *node_cache; static void swap_endian(u8 *dst, const u8 *src, u8 bits) @@ -40,7 +42,8 @@ static void push_rcu(struct allowedips_node **stack, struct allowedips_node __rcu *p, unsigned int *len) { if (rcu_access_pointer(p)) { - WARN_ON(IS_ENABLED(DEBUG) && *len >= 128); + if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_BITS)) + return; stack[(*len)++] = rcu_dereference_raw(p); } } @@ -52,7 +55,7 @@ static void node_free_rcu(struct rcu_head *rcu) static void root_free_rcu(struct rcu_head *rcu) { - struct allowedips_node *node, *stack[128] = { + struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { container_of(rcu, struct allowedips_node, rcu) }; unsigned int len = 1; @@ -65,7 +68,7 @@ static void root_free_rcu(struct rcu_head *rcu) static void root_remove_peer_lists(struct allowedips_node *root) { - struct allowedips_node *node, *stack[128] = { root }; + struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { root }; unsigned int len = 1; while (len > 0 && (node = stack[--len])) { diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index e189eb95678d..e0693cd965ec 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -152,7 +153,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) goto err_peer; } - mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; + mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; __skb_queue_head_init(&packets); if (!skb_is_gso(skb)) { diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c index d0f3b6d7f408..5c804bcabfe6 100644 --- a/drivers/net/wireguard/netlink.c +++ b/drivers/net/wireguard/netlink.c @@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs) if (attrs[WGPEER_A_ENDPOINT]) { struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]); size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]); + struct endpoint endpoint = { { { 0 } } }; - if ((len == sizeof(struct sockaddr_in) && - addr->sa_family == AF_INET) || - (len == sizeof(struct sockaddr_in6) && - addr->sa_family == AF_INET6)) { - struct endpoint endpoint = { { { 0 } } }; - - memcpy(&endpoint.addr, addr, len); + if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) { + endpoint.addr4 = *(struct sockaddr_in *)addr; + wg_socket_set_peer_endpoint(peer, &endpoint); + } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) { + endpoint.addr6 = *(struct sockaddr_in6 *)addr; wg_socket_set_peer_endpoint(peer, &endpoint); } } diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c index c0cfd9b36c0b..720952b92e78 100644 --- a/drivers/net/wireguard/noise.c +++ b/drivers/net/wireguard/noise.c @@ -302,6 +302,41 @@ void wg_noise_set_static_identity_private_key( static_identity->static_public, private_key); } +static void hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, const size_t keylen) +{ + struct blake2s_state state; + u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 }; + u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32)); + int i; + + if (keylen > BLAKE2S_BLOCK_SIZE) { + blake2s_init(&state, BLAKE2S_HASH_SIZE); + blake2s_update(&state, key, keylen); + blake2s_final(&state, x_key); + } else + memcpy(x_key, key, keylen); + + for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i) + x_key[i] ^= 0x36; + + blake2s_init(&state, BLAKE2S_HASH_SIZE); + blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE); + blake2s_update(&state, in, inlen); + blake2s_final(&state, i_hash); + + for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i) + x_key[i] ^= 0x5c ^ 0x36; + + blake2s_init(&state, BLAKE2S_HASH_SIZE); + blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE); + blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE); + blake2s_final(&state, i_hash); + + memcpy(out, i_hash, BLAKE2S_HASH_SIZE); + memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE); + memzero_explicit(i_hash, BLAKE2S_HASH_SIZE); +} + /* This is Hugo Krawczyk's HKDF: * - https://eprint.iacr.org/2010/264.pdf * - https://tools.ietf.org/html/rfc5869 @@ -322,14 +357,14 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data, ((third_len || third_dst) && (!second_len || !second_dst)))); /* Extract entropy from data into secret */ - blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN); + hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN); if (!first_dst || !first_len) goto out; /* Expand first key: key = secret, data = 0x1 */ output[0] = 1; - blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE); + hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE); memcpy(first_dst, output, first_len); if (!second_dst || !second_len) @@ -337,8 +372,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data, /* Expand second key: key = secret, data = first-key || 0x2 */ output[BLAKE2S_HASH_SIZE] = 2; - blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, - BLAKE2S_HASH_SIZE); + hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE); memcpy(second_dst, output, second_len); if (!third_dst || !third_len) @@ -346,8 +380,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data, /* Expand third key: key = secret, data = second-key || 0x3 */ output[BLAKE2S_HASH_SIZE] = 3; - blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, - BLAKE2S_HASH_SIZE); + hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE); memcpy(third_dst, output, third_len); out: diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c index e173204ae7d7..41db10f9be49 100644 --- a/drivers/net/wireguard/selftest/allowedips.c +++ b/drivers/net/wireguard/selftest/allowedips.c @@ -593,10 +593,10 @@ bool __init wg_allowedips_selftest(void) wg_allowedips_remove_by_peer(&t, a, &mutex); test_negative(4, a, 192, 168, 0, 1); - /* These will hit the WARN_ON(len >= 128) in free_node if something - * goes wrong. + /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_BITS) in free_node + * if something goes wrong. */ - for (i = 0; i < 128; ++i) { + for (i = 0; i < MAX_ALLOWEDIPS_BITS; ++i) { part = cpu_to_be64(~(1LLU << (i % 64))); memset(&ip, 0xff, 16); memcpy((u8 *)&ip + (i < 64) * 8, &part, 8); diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c index 007cd4457c5f..d4bb40a695ab 100644 --- a/drivers/net/wireguard/selftest/ratelimiter.c +++ b/drivers/net/wireguard/selftest/ratelimiter.c @@ -167,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void) ++test; #endif - for (trials = TRIALS_BEFORE_GIVING_UP;;) { + for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) { int test_count = 0, ret; ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count); @@ -176,7 +176,6 @@ bool __init wg_ratelimiter_selftest(void) test += test_count; goto err; } - msleep(500); continue; } else if (ret < 0) { test += test_count; @@ -195,7 +194,6 @@ bool __init wg_ratelimiter_selftest(void) test += test_count; goto err; } - msleep(50); continue; } test += test_count; diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c index 473221aa2236..eef5911fa210 100644 --- a/drivers/net/wireguard/socket.c +++ b/drivers/net/wireguard/socket.c @@ -49,7 +49,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb, rt = dst_cache_get_ip4(cache, &fl.saddr); if (!rt) { - security_sk_classify_flow(sock, flowi4_to_flowi(&fl)); + security_sk_classify_flow(sock, flowi4_to_flowi_common(&fl)); if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0, fl.saddr, RT_SCOPE_HOST))) { endpoint->src4.s_addr = 0; @@ -129,7 +129,7 @@ static int send6(struct wg_device *wg, struct sk_buff *skb, dst = dst_cache_get_ip6(cache, &fl.saddr); if (!dst) { - security_sk_classify_flow(sock, flowi6_to_flowi(&fl)); + security_sk_classify_flow(sock, flowi6_to_flowi_common(&fl)); if (unlikely(!ipv6_addr_any(&fl.saddr) && !ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) { endpoint->src6 = fl.saddr = in6addr_any; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index b59d482d9c23..15f02bf23e9b 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -853,11 +853,36 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) return 0; } +static void ath10k_peer_map_cleanup(struct ath10k *ar, struct ath10k_peer *peer) +{ + int peer_id, i; + + lockdep_assert_held(&ar->conf_mutex); + + for_each_set_bit(peer_id, peer->peer_ids, + ATH10K_MAX_NUM_PEER_IDS) { + ar->peer_map[peer_id] = NULL; + } + + /* Double check that peer is properly un-referenced from + * the peer_map + */ + for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { + if (ar->peer_map[i] == peer) { + ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n", + peer->addr, peer, i); + ar->peer_map[i] = NULL; + } + } + + list_del(&peer->list); + kfree(peer); + ar->num_peers--; +} + static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) { struct ath10k_peer *peer, *tmp; - int peer_id; - int i; lockdep_assert_held(&ar->conf_mutex); @@ -869,25 +894,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", peer->addr, vdev_id); - for_each_set_bit(peer_id, peer->peer_ids, - ATH10K_MAX_NUM_PEER_IDS) { - ar->peer_map[peer_id] = NULL; - } - - /* Double check that peer is properly un-referenced from - * the peer_map - */ - for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { - if (ar->peer_map[i] == peer) { - ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n", - peer->addr, peer, i); - ar->peer_map[i] = NULL; - } - } - - list_del(&peer->list); - kfree(peer); - ar->num_peers--; + ath10k_peer_map_cleanup(ar, peer); } spin_unlock_bh(&ar->data_lock); } @@ -5170,13 +5177,29 @@ err: static void ath10k_stop(struct ieee80211_hw *hw) { struct ath10k *ar = hw->priv; + u32 opt; ath10k_drain_tx(ar); mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_OFF) { - if (!ar->hw_rfkill_on) - ath10k_halt(ar); + if (!ar->hw_rfkill_on) { + /* If the current driver state is RESTARTING but not yet + * fully RESTARTED because of incoming suspend event, + * then ath10k_halt() is already called via + * ath10k_core_restart() and should not be called here. + */ + if (ar->state != ATH10K_STATE_RESTARTING) { + ath10k_halt(ar); + } else { + /* Suspending here, because when in RESTARTING + * state, ath10k_core_stop() skips + * ath10k_wait_for_suspend(). + */ + opt = WMI_PDEV_SUSPEND_AND_DISABLE_INTR; + ath10k_wait_for_suspend(ar, opt); + } + } ar->state = ATH10K_STATE_OFF; } mutex_unlock(&ar->conf_mutex); @@ -7454,10 +7477,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, /* Clean up the peer object as well since we * must have failed to do this above. */ - list_del(&peer->list); - ar->peer_map[i] = NULL; - kfree(peer); - ar->num_peers--; + ath10k_peer_map_cleanup(ar, peer); } } spin_unlock_bh(&ar->data_lock); diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index e5a296039f71..4870a3dab0de 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1205,13 +1205,12 @@ static void ath10k_snoc_init_napi(struct ath10k *ar) static int ath10k_snoc_request_irq(struct ath10k *ar) { struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); - int irqflags = IRQF_TRIGGER_RISING; int ret, id; for (id = 0; id < CE_COUNT_MAX; id++) { ret = request_irq(ar_snoc->ce_irqs[id].irq_line, - ath10k_snoc_per_engine_handler, - irqflags, ce_name[id], ar); + ath10k_snoc_per_engine_handler, 0, + ce_name[id], ar); if (ret) { ath10k_err(ar, "failed to register IRQ handler for CE %d: %d\n", diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index 9ff6e6853314..190bc5712e96 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -366,6 +366,8 @@ static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab) for (j = 0; j < irq_grp->num_irq; j++) free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); + + netif_napi_del(&irq_grp->napi); } } diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 28de2c7ae899..473d92240a82 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -476,23 +476,23 @@ static int ath11k_core_pdev_create(struct ath11k_base *ab) return ret; } - ret = ath11k_mac_register(ab); - if (ret) { - ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret); - goto err_pdev_debug; - } - ret = ath11k_dp_pdev_alloc(ab); if (ret) { ath11k_err(ab, "failed to attach DP pdev: %d\n", ret); - goto err_mac_unregister; + goto err_pdev_debug; + } + + ret = ath11k_mac_register(ab); + if (ret) { + ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret); + goto err_dp_pdev_free; } ret = ath11k_thermal_register(ab); if (ret) { ath11k_err(ab, "could not register thermal device: %d\n", ret); - goto err_dp_pdev_free; + goto err_mac_unregister; } ret = ath11k_spectral_init(ab); @@ -505,10 +505,10 @@ static int ath11k_core_pdev_create(struct ath11k_base *ab) err_thermal_unregister: ath11k_thermal_unregister(ab); -err_dp_pdev_free: - ath11k_dp_pdev_free(ab); err_mac_unregister: ath11k_mac_unregister(ab); +err_dp_pdev_free: + ath11k_dp_pdev_free(ab); err_pdev_debug: ath11k_debugfs_pdev_destroy(ab); diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h index 659a275e2eb3..694ebba17fad 100644 --- a/drivers/net/wireless/ath/ath11k/debug.h +++ b/drivers/net/wireless/ath/ath11k/debug.h @@ -23,8 +23,8 @@ enum ath11k_debug_mask { ATH11K_DBG_TESTMODE = 0x00000400, ATH11k_DBG_HAL = 0x00000800, ATH11K_DBG_PCI = 0x00001000, - ATH11K_DBG_DP_TX = 0x00001000, - ATH11K_DBG_DP_RX = 0x00002000, + ATH11K_DBG_DP_TX = 0x00002000, + ATH11K_DBG_DP_RX = 0x00004000, ATH11K_DBG_ANY = 0xffffffff, }; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index cc9122f42024..67faf62999de 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -3419,6 +3419,8 @@ static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif) if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) { nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; + if (nsts > (ar->num_rx_chains - 1)) + nsts = ar->num_rx_chains - 1; value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); } @@ -3459,7 +3461,7 @@ static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif) static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap) { bool subfer, subfee; - int sound_dim = 0; + int sound_dim = 0, nsts = 0; subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)); subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)); @@ -3469,6 +3471,11 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap) subfer = false; } + if (ar->num_rx_chains < 2) { + *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE); + subfee = false; + } + /* If SU Beaformer is not set, then disable MU Beamformer Capability */ if (!subfer) *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE); @@ -3481,7 +3488,9 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap) sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; *vht_cap &= ~IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; - /* TODO: Need to check invalid STS and Sound_dim values set by FW? */ + nsts = (*vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK); + nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; + *vht_cap &= ~IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; /* Enable Sounding Dimension Field only if SU BF is enabled */ if (subfer) { @@ -3493,9 +3502,15 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap) *vht_cap |= sound_dim; } - /* Use the STS advertised by FW unless SU Beamformee is not supported*/ - if (!subfee) - *vht_cap &= ~(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK); + /* Enable Beamformee STS Field only if SU BF is enabled */ + if (subfee) { + if (nsts > (ar->num_rx_chains - 1)) + nsts = ar->num_rx_chains - 1; + + nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; + nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; + *vht_cap |= nsts; + } } static struct ieee80211_sta_vht_cap @@ -4008,8 +4023,8 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) } arvif = ath11k_vif_to_arvif(skb_cb->vif); - if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) && - arvif->is_started) { + mutex_lock(&ar->conf_mutex); + if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) { ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb); if (ret) { ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n", @@ -4025,6 +4040,7 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) arvif->is_started); ieee80211_free_txskb(ar->hw, skb); } + mutex_unlock(&ar->conf_mutex); } } @@ -5325,6 +5341,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_peer *peer; int ret; mutex_lock(&ar->conf_mutex); @@ -5336,9 +5353,13 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, WARN_ON(!arvif->is_started); if (ab->hw_params.vdev_start_delay && - arvif->vdev_type == WMI_VDEV_TYPE_MONITOR && - ath11k_peer_find_by_addr(ab, ar->mac_addr)) - ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr); + arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find_by_addr(ab, ar->mac_addr); + spin_unlock_bh(&ab->base_lock); + if (peer) + ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr); + } ret = ath11k_mac_vdev_stop(arvif); if (ret) diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index aded9a719d51..84db9e55c3e7 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -402,7 +402,7 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci, ret = 0; break; case ATH11K_MHI_POWER_ON: - ret = mhi_async_power_up(ab_pci->mhi_ctrl); + ret = mhi_sync_power_up(ab_pci->mhi_ctrl); break; case ATH11K_MHI_POWER_OFF: mhi_power_down(ab_pci->mhi_ctrl, true); diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c index ac2a8cfdc1c0..f5ab455ea1a2 100644 --- a/drivers/net/wireless/ath/ath11k/spectral.c +++ b/drivers/net/wireless/ath/ath11k/spectral.c @@ -214,7 +214,10 @@ static int ath11k_spectral_scan_config(struct ath11k *ar, return -ENODEV; arvif->spectral_enabled = (mode != ATH11K_SPECTRAL_DISABLED); + + spin_lock_bh(&ar->spectral.lock); ar->spectral.mode = mode; + spin_unlock_bh(&ar->spectral.lock); ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id, ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR, @@ -829,9 +832,6 @@ static inline void ath11k_spectral_ring_free(struct ath11k *ar) { struct ath11k_spectral *sp = &ar->spectral; - if (!sp->enabled) - return; - ath11k_dbring_srng_cleanup(ar, &sp->rx_ring); ath11k_dbring_buf_cleanup(ar, &sp->rx_ring); } @@ -883,15 +883,16 @@ void ath11k_spectral_deinit(struct ath11k_base *ab) if (!sp->enabled) continue; - ath11k_spectral_debug_unregister(ar); - ath11k_spectral_ring_free(ar); + mutex_lock(&ar->conf_mutex); + ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED); + mutex_unlock(&ar->conf_mutex); spin_lock_bh(&sp->lock); - - sp->mode = ATH11K_SPECTRAL_DISABLED; sp->enabled = false; - spin_unlock_bh(&sp->lock); + + ath11k_spectral_debug_unregister(ar); + ath11k_spectral_ring_free(ar); } } diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c index 1fbc2c19848f..d444b3d70ba2 100644 --- a/drivers/net/wireless/ath/ath5k/eeprom.c +++ b/drivers/net/wireless/ath/ath5k/eeprom.c @@ -746,6 +746,9 @@ ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode, } } + if (idx == AR5K_EEPROM_N_PD_CURVES) + goto err_out; + ee->ee_pd_gains[mode] = 1; pd = &chinfo[pier].pd_curves[idx]; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index b0a4ca3559fd..abed1effd95c 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -5615,7 +5615,7 @@ unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah, static u8 ar9003_get_eepmisc(struct ath_hw *ah) { - return ah->eeprom.map4k.baseEepHeader.eepMisc; + return ah->eeprom.ar9300_eep.baseEepHeader.opCapFlags.eepMisc; } const struct eeprom_ops eep_ar9300_ops = { diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index a171dbb29fbb..ad949eb02f3d 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -720,7 +720,7 @@ #define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \ (AR_SREV_9462(ah) ? 0x16290 : 0x16284)) #define AR_CH0_TOP2_XPABIASLVL (AR_SREV_9561(ah) ? 0x1e00 : 0xf000) -#define AR_CH0_TOP2_XPABIASLVL_S 12 +#define AR_CH0_TOP2_XPABIASLVL_S (AR_SREV_9561(ah) ? 9 : 12) #define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \ ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : \ diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index 0a1634238e67..e3d546ef71dd 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -281,6 +281,7 @@ struct ath9k_htc_rxbuf { struct ath9k_htc_rx { struct list_head rxbuf; spinlock_t rxbuflock; + bool initialized; }; #define ATH9K_HTC_TX_CLEANUP_INTERVAL 50 /* ms */ @@ -305,6 +306,7 @@ struct ath9k_htc_tx { DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM); struct timer_list cleanup_timer; spinlock_t tx_lock; + bool initialized; }; struct ath9k_htc_tx_ctl { @@ -325,11 +327,11 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb) } #ifdef CONFIG_ATH9K_HTC_DEBUGFS - -#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++) -#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a) -#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++) -#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a) +#define __STAT_SAFE(expr) (hif_dev->htc_handle->drv_priv ? (expr) : 0) +#define TX_STAT_INC(c) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.tx_stats.c++) +#define TX_STAT_ADD(c, a) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a) +#define RX_STAT_INC(c) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++) +#define RX_STAT_ADD(c, a) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a) #define CAB_STAT_INC priv->debug.tx_stats.cab_queued++ #define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index ff61ae34ecdf..07ac88fb1c57 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -944,7 +944,6 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, priv->hw = hw; priv->htc = htc_handle; priv->dev = dev; - htc_handle->drv_priv = priv; SET_IEEE80211_DEV(hw, priv->dev); ret = ath9k_htc_wait_for_target(priv); @@ -965,6 +964,8 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, if (ret) goto err_init; + htc_handle->drv_priv = priv; + return 0; err_init: diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 0bdc4dcb7b8f..43a743ec9d9e 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -808,6 +808,11 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv) skb_queue_head_init(&priv->tx.data_vi_queue); skb_queue_head_init(&priv->tx.data_vo_queue); skb_queue_head_init(&priv->tx.tx_failed); + + /* Allow ath9k_wmi_event_tasklet(WMI_TXSTATUS_EVENTID) to operate. */ + smp_wmb(); + priv->tx.initialized = true; + return 0; } @@ -1006,6 +1011,14 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, goto rx_next; } + if (rxstatus->rs_keyix >= ATH_KEYMAX && + rxstatus->rs_keyix != ATH9K_RXKEYIX_INVALID) { + ath_dbg(common, ANY, + "Invalid keyix, dropping (keyix: %d)\n", + rxstatus->rs_keyix); + goto rx_next; + } + /* Get the RX status information */ memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); @@ -1125,6 +1138,10 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb, struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL; unsigned long flags; + /* Check if ath9k_rx_init() completed. */ + if (!data_race(priv->rx.initialized)) + goto err; + spin_lock_irqsave(&priv->rx.rxbuflock, flags); list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) { if (!tmp_buf->in_process) { @@ -1180,6 +1197,10 @@ int ath9k_rx_init(struct ath9k_htc_priv *priv) list_add_tail(&rxbuf->list, &priv->rx.rxbuf); } + /* Allow ath9k_htc_rxep() to operate. */ + smp_wmb(); + priv->rx.initialized = true; + return 0; err: diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 994ec48b2f66..ca05b07a45e6 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -364,33 +364,27 @@ ret: } static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle, - struct sk_buff *skb) + struct sk_buff *skb, u32 len) { uint32_t *pattern = (uint32_t *)skb->data; - switch (*pattern) { - case 0x33221199: - { + if (*pattern == 0x33221199 && len >= sizeof(struct htc_panic_bad_vaddr)) { struct htc_panic_bad_vaddr *htc_panic; htc_panic = (struct htc_panic_bad_vaddr *) skb->data; dev_err(htc_handle->dev, "ath: firmware panic! " "exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n", htc_panic->exccause, htc_panic->pc, htc_panic->badvaddr); - break; - } - case 0x33221299: - { + return; + } + if (*pattern == 0x33221299) { struct htc_panic_bad_epid *htc_panic; htc_panic = (struct htc_panic_bad_epid *) skb->data; dev_err(htc_handle->dev, "ath: firmware panic! " "bad epid: 0x%08x\n", htc_panic->epid); - break; - } - default: - dev_err(htc_handle->dev, "ath: unknown panic pattern!\n"); - break; + return; } + dev_err(htc_handle->dev, "ath: unknown panic pattern!\n"); } /* @@ -411,16 +405,26 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle, if (!htc_handle || !skb) return; + /* A valid message requires len >= 8. + * + * sizeof(struct htc_frame_hdr) == 8 + * sizeof(struct htc_ready_msg) == 8 + * sizeof(struct htc_panic_bad_vaddr) == 16 + * sizeof(struct htc_panic_bad_epid) == 8 + */ + if (unlikely(len < sizeof(struct htc_frame_hdr))) + goto invalid; htc_hdr = (struct htc_frame_hdr *) skb->data; epid = htc_hdr->endpoint_id; if (epid == 0x99) { - ath9k_htc_fw_panic_report(htc_handle, skb); + ath9k_htc_fw_panic_report(htc_handle, skb, len); kfree_skb(skb); return; } if (epid < 0 || epid >= ENDPOINT_MAX) { +invalid: if (pipe_id != USB_REG_IN_PIPE) dev_kfree_skb_any(skb); else @@ -432,21 +436,30 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle, /* Handle trailer */ if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) { - if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000) + if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000) { /* Move past the Watchdog pattern */ htc_hdr = (struct htc_frame_hdr *)(skb->data + 4); + len -= 4; + } } /* Get the message ID */ + if (unlikely(len < sizeof(struct htc_frame_hdr) + sizeof(__be16))) + goto invalid; msg_id = (__be16 *) ((void *) htc_hdr + sizeof(struct htc_frame_hdr)); /* Now process HTC messages */ switch (be16_to_cpu(*msg_id)) { case HTC_MSG_READY_ID: + if (unlikely(len < sizeof(struct htc_ready_msg))) + goto invalid; htc_process_target_rdy(htc_handle, htc_hdr); break; case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID: + if (unlikely(len < sizeof(struct htc_frame_hdr) + + sizeof(struct htc_conn_svc_rspmsg))) + goto invalid; htc_process_conn_rsp(htc_handle, htc_hdr); break; default: diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index af367696fd92..ac354dfc5055 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix) continue; txinfo = IEEE80211_SKB_CB(bf->bf_mpdu); - fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0]; + fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0]; if (fi->keyix == keyix) return true; } diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index fe29ad4b9023..f315c54bd3ac 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -169,6 +169,10 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t) &wmi->drv_priv->fatal_work); break; case WMI_TXSTATUS_EVENTID: + /* Check if ath9k_tx_init() completed. */ + if (!data_race(priv->tx.initialized)) + break; + spin_lock_bh(&priv->tx.tx_lock); if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) { spin_unlock_bh(&priv->tx.tx_lock); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 5691bd6eb82c..6555abf02f18 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); BUILD_BUG_ON(sizeof(struct ath_frame_info) > - sizeof(tx_info->rate_driver_data)); - return (struct ath_frame_info *) &tx_info->rate_driver_data[0]; + sizeof(tx_info->status.status_driver_data)); + return (struct ath_frame_info *) &tx_info->status.status_driver_data[0]; } static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) @@ -2501,6 +2501,16 @@ skip_tx_complete: spin_unlock_irqrestore(&sc->tx.txbuflock, flags); } +static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info) +{ + void *ptr = &tx_info->status; + + memset(ptr + sizeof(tx_info->status.rates), 0, + sizeof(tx_info->status) - + sizeof(tx_info->status.rates) - + sizeof(tx_info->status.status_driver_data)); +} + static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_status *ts, int nframes, int nbad, int txok) @@ -2512,6 +2522,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, struct ath_hw *ah = sc->sc_ah; u8 i, tx_rateindex; + ath_clear_tx_status(tx_info); + if (txok) tx_info->status.ack_signal = ts->ts_rssi; @@ -2526,6 +2538,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, tx_info->status.ampdu_len = nframes; tx_info->status.ampdu_ack_len = nframes - nbad; + tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; + + for (i = tx_rateindex + 1; i < hw->max_rates; i++) { + tx_info->status.rates[i].count = 0; + tx_info->status.rates[i].idx = -1; + } + if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) { /* @@ -2547,16 +2566,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, tx_info->status.rates[tx_rateindex].count = hw->max_rate_tries; } - - for (i = tx_rateindex + 1; i < hw->max_rates; i++) { - tx_info->status.rates[i].count = 0; - tx_info->status.rates[i].idx = -1; - } - - tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; - - /* we report airtime in ath_tx_count_airtime(), don't report twice */ - tx_info->status.tx_time = 0; } static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index 235cf77cd60c..43050dc7b98d 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -1557,6 +1557,9 @@ static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar) goto out; } } while (ar->beacon_enabled && i--); + + /* no entry found in list */ + return NULL; } out: diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 2d618f90afa7..cb40162bae99 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1010,20 +1010,14 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf, void *cmd; int cmdlen = len - sizeof(struct wmi_cmd_hdr); u16 cmdid; - int rc, rc1; + int rc1; - if (cmdlen < 0) + if (cmdlen < 0 || *ppos != 0) return -EINVAL; - wmi = kmalloc(len, GFP_KERNEL); - if (!wmi) - return -ENOMEM; - - rc = simple_write_to_buffer(wmi, len, ppos, buf, len); - if (rc < 0) { - kfree(wmi); - return rc; - } + wmi = memdup_user(buf, len); + if (IS_ERR(wmi)) + return PTR_ERR(wmi); cmd = (cmdlen > 0) ? &wmi[1] : NULL; cmdid = le16_to_cpu(wmi->command_id); @@ -1033,7 +1027,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf, wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1); - return rc; + return len; } static const struct file_operations fops_wmi = { diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c index 665b737fbb0d..39975b7d1a16 100644 --- a/drivers/net/wireless/broadcom/b43/phy_n.c +++ b/drivers/net/wireless/broadcom/b43/phy_n.c @@ -582,7 +582,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev) u16 data[4]; s16 gain[2]; u16 minmax[2]; - static const u16 lna_gain[4] = { -2, 10, 19, 25 }; + static const s16 lna_gain[4] = { -2, 10, 19, 25 }; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); diff --git a/drivers/net/wireless/broadcom/b43legacy/phy.c b/drivers/net/wireless/broadcom/b43legacy/phy.c index 05404fbd1e70..c1395e622759 100644 --- a/drivers/net/wireless/broadcom/b43legacy/phy.c +++ b/drivers/net/wireless/broadcom/b43legacy/phy.c @@ -1123,7 +1123,7 @@ void b43legacy_phy_lo_b_measure(struct b43legacy_wldev *dev) struct b43legacy_phy *phy = &dev->phy; u16 regstack[12] = { 0 }; u16 mls; - u16 fval; + s16 fval; int i; int j; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 61039538a15b..c8e1d505f7b5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -290,6 +290,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, struct brcmf_pub *drvr = ifp->drvr; struct ethhdr *eh; int head_delta; + unsigned int tx_bytes = skb->len; brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); @@ -364,7 +365,7 @@ done: ndev->stats.tx_dropped++; } else { ndev->stats.tx_packets++; - ndev->stats.tx_bytes += skb->len; + ndev->stats.tx_bytes += tx_bytes; } /* Return ok: we always eat the packet */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index 430d2cca98b3..1285d3685c4f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -228,6 +228,10 @@ static void brcmf_fweh_event_worker(struct work_struct *work) brcmf_fweh_event_name(event->code), event->code, event->emsg.ifidx, event->emsg.bsscfgidx, event->emsg.addr); + if (event->emsg.bsscfgidx >= BRCMF_MAX_IFS) { + bphy_err(drvr, "invalid bsscfg index: %u\n", event->emsg.bsscfgidx); + goto event_free; + } /* convert event message */ emsg_be = &event->emsg; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c index fabfbb0b40b0..d0a7465be586 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c @@ -158,12 +158,12 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi) struct brcmf_pno_macaddr_le pfn_mac; u8 *mac_addr = NULL; u8 *mac_mask = NULL; - int err, i; + int err, i, ri; - for (i = 0; i < pi->n_reqs; i++) - if (pi->reqs[i]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { - mac_addr = pi->reqs[i]->mac_addr; - mac_mask = pi->reqs[i]->mac_addr_mask; + for (ri = 0; ri < pi->n_reqs; ri++) + if (pi->reqs[ri]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + mac_addr = pi->reqs[ri]->mac_addr; + mac_mask = pi->reqs[ri]->mac_addr_mask; break; } @@ -185,7 +185,7 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi) pfn_mac.mac[0] |= 0x02; brcmf_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n", - pi->reqs[i]->reqid, pfn_mac.mac); + pi->reqs[ri]->reqid, pfn_mac.mac); err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac, sizeof(pfn_mac)); if (err) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 6d5d5c39c635..9929e90866f0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype { BRCMF_SDIO_FT_SUB, }; -#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu)) +#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu)) /* SDIO Pad drive strength to select value mappings */ struct sdiod_drive_str { diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 0569f37e9ed5..8c9c6bfbaeee 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -5236,7 +5236,7 @@ static int get_wep_tx_idx(struct airo_info *ai) return -1; } -static int set_wep_key(struct airo_info *ai, u16 index, const char *key, +static int set_wep_key(struct airo_info *ai, u16 index, const u8 *key, u16 keylen, int perm, int lock) { static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; @@ -5287,7 +5287,7 @@ static void proc_wepkey_on_close(struct inode *inode, struct file *file) struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; int i, rc; - char key[16]; + u8 key[16]; u16 index = 0; int j = 0; @@ -5315,12 +5315,22 @@ static void proc_wepkey_on_close(struct inode *inode, struct file *file) } for (i = 0; i < 16*3 && data->wbuffer[i+j]; i++) { + int val; + + if (i % 3 == 2) + continue; + + val = hex_to_bin(data->wbuffer[i+j]); + if (val < 0) { + airo_print_err(ai->dev->name, "WebKey passed invalid key hex"); + return; + } switch(i%3) { case 0: - key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4; + key[i/3] = (u8)val << 4; break; case 1: - key[i/3] |= hex_to_bin(data->wbuffer[i+j]); + key[i/3] |= (u8)val; break; } } diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c index d9baa2fa603b..e4c60caa6543 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c @@ -383,7 +383,7 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev) /* Each fragment may need to have room for encryption * pre/postfix */ - if (host_encrypt) + if (host_encrypt && crypt && crypt->ops) bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len + crypt->ops->extra_mpdu_postfix_len; diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c index 9a491e5db75b..150805aec407 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c @@ -2403,7 +2403,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, /* Repeat initial/next rate. * For legacy IL_NUMBER_TRY == 1, this loop will not execute. * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */ - while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) { + while (repeat_rate > 0 && idx < (LINK_QUAL_MAX_RETRY_NUM - 1)) { if (is_legacy(tbl_type.lq_type)) { if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) ant_toggle_cnt++; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index fcad5cdcabfa..3c931b1b2a0b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -367,7 +367,7 @@ void iwl_dbg_tlv_del_timers(struct iwl_trans *trans) struct iwl_dbg_tlv_timer_node *node, *tmp; list_for_each_entry_safe(node, tmp, timer_list, list) { - del_timer(&node->timer); + del_timer_sync(&node->timer); list_del(&node->list); kfree(node); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index c146303ec73b..66a968de3bc2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -621,6 +621,9 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, struct iwl_power_vifs *power_iterator = _data; bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX; + if (!mvmvif->uploaded) + return; + switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_P2P_DEVICE: break; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 46255d2c555b..17b992526694 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1706,7 +1706,10 @@ static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm, IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; /* set fragmented ebs for fragmented scan on HB channels */ - if (iwl_mvm_is_scan_fragmented(params->hb_type)) + if ((!iwl_mvm_is_cdb_supported(mvm) && + iwl_mvm_is_scan_fragmented(params->type)) || + (iwl_mvm_is_cdb_supported(mvm) && + iwl_mvm_is_scan_fragmented(params->hb_type))) flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG; return flags; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index ef62839894c7..09f870c48a4f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1840,6 +1840,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, iwl_mvm_txq_from_mac80211(sta->txq[i]); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + list_del_init(&mvmtxq->list); } } diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c index a3ca6620dc0c..8fa3ec71603e 100644 --- a/drivers/net/wireless/intersil/p54/main.c +++ b/drivers/net/wireless/intersil/p54/main.c @@ -682,7 +682,7 @@ static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif, * queues have already been stopped and no new frames can sneak * up from behind. */ - while ((total = p54_flush_count(priv) && i--)) { + while ((total = p54_flush_count(priv)) && i--) { /* waste time */ msleep(20); } diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c index ab0fe8565851..cdb57819684a 100644 --- a/drivers/net/wireless/intersil/p54/p54spi.c +++ b/drivers/net/wireless/intersil/p54/p54spi.c @@ -164,7 +164,7 @@ static int p54spi_request_firmware(struct ieee80211_hw *dev) ret = p54_parse_firmware(dev, priv->firmware); if (ret) { - release_firmware(priv->firmware); + /* the firmware is released by the caller */ return ret; } @@ -659,6 +659,7 @@ static int p54spi_probe(struct spi_device *spi) return 0; err_free_common: + release_firmware(priv->firmware); free_irq(gpio_to_irq(p54spi_gpio_irq), spi); err_free_gpio_irq: gpio_free(p54spi_gpio_irq); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 6911e588e578..3da8ff71ffcc 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -597,7 +597,7 @@ struct mac80211_hwsim_data { bool ps_poll_pending; struct dentry *debugfs; - uintptr_t pending_cookie; + atomic_t pending_cookie; struct sk_buff_head pending; /* packets pending */ /* * Only radios in the same group can communicate together (the @@ -779,6 +779,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_hdr *hdr; + struct ieee80211_tx_info *cb; if (!vp->assoc) return; @@ -800,6 +801,10 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, memcpy(hdr->addr2, mac, ETH_ALEN); memcpy(hdr->addr3, vp->bssid, ETH_ALEN); + cb = IEEE80211_SKB_CB(skb); + cb->control.rates[0].count = 1; + cb->control.rates[1].idx = -1; + rcu_read_lock(); mac80211_hwsim_tx_frame(data->hw, skb, rcu_dereference(vif->chanctx_conf)->def.chan); @@ -1273,8 +1278,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, goto nla_put_failure; /* We create a cookie to identify this skb */ - data->pending_cookie++; - cookie = data->pending_cookie; + cookie = atomic_inc_return(&data->pending_cookie); info->rate_driver_data[0] = (void *)cookie; if (nla_put_u64_64bit(skb, HWSIM_ATTR_COOKIE, cookie, HWSIM_ATTR_PAD)) goto nla_put_failure; @@ -2268,11 +2272,13 @@ static void hw_scan_work(struct work_struct *work) if (req->ie_len) skb_put_data(probe, req->ie, req->ie_len); + rcu_read_lock(); if (!ieee80211_tx_prepare_skb(hwsim->hw, hwsim->hw_scan_vif, probe, hwsim->tmp_chan->band, NULL)) { + rcu_read_unlock(); kfree_skb(probe); continue; } @@ -2280,6 +2286,7 @@ static void hw_scan_work(struct work_struct *work) local_bh_disable(); mac80211_hwsim_tx_frame(hwsim->hw, probe, hwsim->tmp_chan); + rcu_read_unlock(); local_bh_enable(); } } @@ -3511,6 +3518,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, const u8 *src; unsigned int hwsim_flags; int i; + unsigned long flags; bool found = false; if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || @@ -3538,18 +3546,20 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, } /* look for the skb matching the cookie passed back from user */ + spin_lock_irqsave(&data2->pending.lock, flags); skb_queue_walk_safe(&data2->pending, skb, tmp) { - u64 skb_cookie; + uintptr_t skb_cookie; txi = IEEE80211_SKB_CB(skb); - skb_cookie = (u64)(uintptr_t)txi->rate_driver_data[0]; + skb_cookie = (uintptr_t)txi->rate_driver_data[0]; if (skb_cookie == ret_skb_cookie) { - skb_unlink(skb, &data2->pending); + __skb_unlink(skb, &data2->pending); found = true; break; } } + spin_unlock_irqrestore(&data2->pending.lock, flags); /* not found */ if (!found) @@ -3676,6 +3686,8 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, rx_status.band = channel->band; rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]); + if (rx_status.rate_idx >= data2->hw->wiphy->bands[rx_status.band]->n_bitrates) + goto out; rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); hdr = (void *)skb->data; @@ -4210,6 +4222,10 @@ static int hwsim_virtio_handle_cmd(struct sk_buff *skb) nlh = nlmsg_hdr(skb); gnlh = nlmsg_data(nlh); + + if (skb->len < nlh->nlmsg_len) + return -EINVAL; + err = genlmsg_parse(nlh, &hwsim_genl_family, tb, HWSIM_ATTR_MAX, hwsim_genl_policy, NULL); if (err) { @@ -4252,7 +4268,8 @@ static void hwsim_virtio_rx_work(struct work_struct *work) spin_unlock_irqrestore(&hwsim_virtio_lock, flags); skb->data = skb->head; - skb_set_tail_pointer(skb, len); + skb_reset_tail_pointer(skb); + skb_put(skb, len); hwsim_virtio_handle_cmd(skb); spin_lock_irqsave(&hwsim_virtio_lock, flags); diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index 5d6dc1dd050d..32fdc4150b60 100644 --- a/drivers/net/wireless/marvell/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c @@ -287,6 +287,7 @@ static int if_usb_probe(struct usb_interface *intf, return 0; err_get_fw: + usb_put_dev(udev); lbs_remove_card(priv); err_add_card: if_usb_reset_device(cardp); diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c index d2ee6469e67b..3fa25cd64cda 100644 --- a/drivers/net/wireless/marvell/mwifiex/11h.c +++ b/drivers/net/wireless/marvell/mwifiex/11h.c @@ -303,5 +303,7 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work) mwifiex_dbg(priv->adapter, MSG, "indicating channel switch completion to kernel\n"); + mutex_lock(&priv->wdev.mtx); cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef); + mutex_unlock(&priv->wdev.mtx); } diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 5923c5c14c8d..f4e3dce10d65 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1054,6 +1054,8 @@ struct mwifiex_adapter { void *devdump_data; int devdump_len; struct timer_list devdump_timer; + + bool ignore_btcoex_events; }; void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 7c137eba8cda..b0024893a1cb 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -3142,6 +3142,9 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter) if (ret) goto err_alloc_buffers; + if (pdev->device == PCIE_DEVICE_ID_MARVELL_88W8897) + adapter->ignore_btcoex_events = true; + return 0; err_alloc_buffers: diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index 753458628f86..05073a49ab5f 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -1061,6 +1061,9 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_BT_COEX_WLAN_PARA_CHANGE: dev_dbg(adapter->dev, "EVENT: BT coex wlan param update\n"); + if (adapter->ignore_btcoex_events) + break; + mwifiex_bt_coex_wlan_param_update_event(priv, adapter->event_skb); break; diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 0fdfead45c77..f01b455783b2 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -455,6 +455,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) qbuf.addr = addr + offset; qbuf.len = len - offset; + qbuf.skip_unmap = false; mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL); frames++; } diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 466447a5184f..81ff3b4c6c1b 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -107,6 +107,7 @@ static int mt76_led_init(struct mt76_dev *dev) if (!of_property_read_u32(np, "led-sources", &led_pin)) dev->led_pin = led_pin; dev->led_al = of_property_read_bool(np, "led-active-low"); + of_node_put(np); } return led_classdev_register(dev->dev, &dev->led_cdev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 424be103093c..b26617026e83 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -950,7 +950,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid) offset %= 32; val = mt76_rr(dev, addr); - val >>= (tid % 32); + val >>= offset; if (offset > 20) { addr += 4; @@ -1626,7 +1626,7 @@ mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy, struct mt7615_dev *dev = phy->dev; int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck; bool ext_phy = phy != &dev->phy; - u16 def_th = ofdm ? -98 : -110; + s16 def_th = ofdm ? -98 : -110; bool update = false; s8 *sensitivity; int signal; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c index e43d13d7c988..2dad61fd451f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c @@ -108,7 +108,7 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb, ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500, MT_EP_OUT_INBAND_CMD); if (ret) - return ret; + goto out; if (wait_resp) ret = mt76x02u_mcu_wait_resp(dev, seq); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index ecaf85b483ac..e57e49a722dc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id) mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9); /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */ - mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf); + mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf); /* RG_SSUSB_CDR_BR_PE1D = 0x3 */ mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3); diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c index 6bcc4a13ae6c..cc772045d526 100644 --- a/drivers/net/wireless/mediatek/mt7601u/usb.c +++ b/drivers/net/wireless/mediatek/mt7601u/usb.c @@ -26,6 +26,7 @@ static const struct usb_device_id mt7601u_device_table[] = { { USB_DEVICE(0x2717, 0x4106) }, { USB_DEVICE(0x2955, 0x0001) }, { USB_DEVICE(0x2955, 0x1001) }, + { USB_DEVICE(0x2955, 0x1003) }, { USB_DEVICE(0x2a5f, 0x1000) }, { USB_DEVICE(0x7392, 0x7710) }, { 0, } diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c index 6be5ac8ba518..dd26f2086180 100644 --- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c +++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c @@ -939,30 +939,52 @@ static inline void wilc_wfi_cfg_parse_ch_attr(u8 *buf, u32 len, u8 sta_ch) return; while (index + sizeof(*e) <= len) { + u16 attr_size; + e = (struct wilc_attr_entry *)&buf[index]; - if (e->attr_type == IEEE80211_P2P_ATTR_CHANNEL_LIST) + attr_size = le16_to_cpu(e->attr_len); + + if (index + sizeof(*e) + attr_size > len) + return; + + if (e->attr_type == IEEE80211_P2P_ATTR_CHANNEL_LIST && + attr_size >= (sizeof(struct wilc_attr_ch_list) - sizeof(*e))) ch_list_idx = index; - else if (e->attr_type == IEEE80211_P2P_ATTR_OPER_CHANNEL) + else if (e->attr_type == IEEE80211_P2P_ATTR_OPER_CHANNEL && + attr_size == (sizeof(struct wilc_attr_oper_ch) - sizeof(*e))) op_ch_idx = index; + if (ch_list_idx && op_ch_idx) break; - index += le16_to_cpu(e->attr_len) + sizeof(*e); + + index += sizeof(*e) + attr_size; } if (ch_list_idx) { - u16 attr_size; - struct wilc_ch_list_elem *e; - int i; + unsigned int i; + u16 elem_size; ch_list = (struct wilc_attr_ch_list *)&buf[ch_list_idx]; - attr_size = le16_to_cpu(ch_list->attr_len); - for (i = 0; i < attr_size;) { + /* the number of bytes following the final 'elem' member */ + elem_size = le16_to_cpu(ch_list->attr_len) - + (sizeof(*ch_list) - sizeof(struct wilc_attr_entry)); + for (i = 0; i < elem_size;) { + struct wilc_ch_list_elem *e; + e = (struct wilc_ch_list_elem *)(ch_list->elem + i); + + i += sizeof(*e); + if (i > elem_size) + break; + + i += e->no_of_channels; + if (i > elem_size) + break; + if (e->op_class == WILC_WLAN_OPERATING_CLASS_2_4GHZ) { memset(e->ch_list, sta_ch, e->no_of_channels); break; } - i += e->no_of_channels; } } diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c index d025a3093015..b25847799138 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.c +++ b/drivers/net/wireless/microchip/wilc1000/hif.c @@ -467,14 +467,25 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len); if (rsn_ie) { + int rsn_ie_len = sizeof(struct element) + rsn_ie[1]; int offset = 8; - param->mode_802_11i = 2; - param->rsn_found = true; /* extract RSN capabilities */ - offset += (rsn_ie[offset] * 4) + 2; - offset += (rsn_ie[offset] * 4) + 2; - memcpy(param->rsn_cap, &rsn_ie[offset], 2); + if (offset < rsn_ie_len) { + /* skip over pairwise suites */ + offset += (rsn_ie[offset] * 4) + 2; + + if (offset < rsn_ie_len) { + /* skip over authentication suites */ + offset += (rsn_ie[offset] * 4) + 2; + + if (offset + 1 < rsn_ie_len) { + param->mode_802_11i = 2; + param->rsn_found = true; + memcpy(param->rsn_cap, &rsn_ie[offset], 2); + } + } + } } if (param->rsn_found) { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index fed6d21cd6ce..4bdd3a95f2d2 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -4151,7 +4151,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain); - rt2800_bbp_write(rt2x00dev, 86, 0); + if (rt2x00_rt(rt2x00dev, RT6352)) + rt2800_bbp_write(rt2x00dev, 86, 0x38); + else + rt2800_bbp_write(rt2x00dev, 86, 0); } if (rf->channel <= 14) { @@ -4352,7 +4355,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2*rt2x00dev->lna_gain; rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg); - rt2800_iq_calibrate(rt2x00dev, rf->channel); + if (rt2x00_rt(rt2x00dev, RT5592)) + rt2800_iq_calibrate(rt2x00dev, rf->channel); } bbp = rt2800_bbp_read(rt2x00dev, 4); @@ -5625,7 +5629,8 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev, if (qual->vgc_level != vgc_level) { if (rt2x00_rt(rt2x00dev, RT3572) || rt2x00_rt(rt2x00dev, RT3593) || - rt2x00_rt(rt2x00dev, RT3883)) { + rt2x00_rt(rt2x00dev, RT3883) || + rt2x00_rt(rt2x00dev, RT6352)) { rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, vgc_level); } else if (rt2x00_rt(rt2x00dev, RT5592)) { @@ -5848,7 +5853,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); } else if (rt2x00_rt(rt2x00dev, RT6352)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401); - rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0000); + rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0001); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000); rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0); @@ -6110,6 +6115,27 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) reg = rt2800_register_read(rt2x00dev, US_CYC_CNT); rt2x00_set_field32(®, US_CYC_CNT_CLOCK_CYCLE, 125); rt2800_register_write(rt2x00dev, US_CYC_CNT, reg); + } else if (rt2x00_is_soc(rt2x00dev)) { + struct clk *clk = clk_get_sys("bus", NULL); + int rate; + + if (IS_ERR(clk)) { + clk = clk_get_sys("cpu", NULL); + + if (IS_ERR(clk)) { + rate = 125; + } else { + rate = clk_get_rate(clk) / 3000000; + clk_put(clk); + } + } else { + rate = clk_get_rate(clk) / 1000000; + clk_put(clk); + } + + reg = rt2800_register_read(rt2x00dev, US_CYC_CNT); + rt2x00_set_field32(®, US_CYC_CNT_CLOCK_CYCLE, rate); + rt2800_register_write(rt2x00dev, US_CYC_CNT, reg); } reg = rt2800_register_read(rt2x00dev, HT_FBK_CFG0); diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c index 2477e18c7cae..025619cd14e8 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c @@ -460,8 +460,10 @@ static void rtl8180_tx(struct ieee80211_hw *dev, struct rtl8180_priv *priv = dev->priv; struct rtl8180_tx_ring *ring; struct rtl8180_tx_desc *entry; + unsigned int prio = 0; unsigned long flags; - unsigned int idx, prio, hw_prio; + unsigned int idx, hw_prio; + dma_addr_t mapping; u32 tx_flags; u8 rc_flags; @@ -470,7 +472,9 @@ static void rtl8180_tx(struct ieee80211_hw *dev, /* do arithmetic and then convert to le16 */ u16 frame_duration = 0; - prio = skb_get_queue_mapping(skb); + /* rtl8180/rtl8185 only has one useable tx queue */ + if (dev->queues > IEEE80211_AC_BK) + prio = skb_get_queue_mapping(skb); ring = &priv->tx_ring[prio]; mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 0d374a294840..e34cd6fed7e8 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -1874,13 +1874,6 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv) /* We have 8 bits to indicate validity */ map_addr = offset * 8; - if (map_addr >= EFUSE_MAP_LEN) { - dev_warn(dev, "%s: Illegal map_addr (%04x), " - "efuse corrupt!\n", - __func__, map_addr); - ret = -EINVAL; - goto exit; - } for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) { /* Check word enable condition in the section */ if (word_mask & BIT(i)) { @@ -1891,6 +1884,13 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv) ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8); if (ret) goto exit; + if (map_addr >= EFUSE_MAP_LEN - 1) { + dev_warn(dev, "%s: Illegal map_addr (%04x), " + "efuse corrupt!\n", + __func__, map_addr); + ret = -EINVAL; + goto exit; + } priv->efuse_wifi.raw[map_addr++] = val8; ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8); @@ -2925,12 +2925,12 @@ bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv, } if (!(simubitmap & 0x30) && priv->tx_paths > 1) { - /* path B RX OK */ + /* path B TX OK */ for (i = 4; i < 6; i++) result[3][i] = result[c1][i]; } - if (!(simubitmap & 0x30) && priv->tx_paths > 1) { + if (!(simubitmap & 0xc0) && priv->tx_paths > 1) { /* path B RX OK */ for (i = 6; i < 8; i++) result[3][i] = result[c1][i]; @@ -4338,15 +4338,14 @@ void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv, h2c.b_macid_cfg.ramask2 = (ramask >> 16) & 0xff; h2c.b_macid_cfg.ramask3 = (ramask >> 24) & 0xff; - h2c.ramask.arg = 0x80; h2c.b_macid_cfg.data1 = rateid; if (sgi) h2c.b_macid_cfg.data1 |= BIT(7); h2c.b_macid_cfg.data2 = bw; - dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %zi\n", - __func__, ramask, h2c.ramask.arg, sizeof(h2c.b_macid_cfg)); + dev_dbg(&priv->udev->dev, "%s: rate mask %08x, rateid %02x, sgi %d, size %zi\n", + __func__, ramask, rateid, sgi, sizeof(h2c.b_macid_cfg)); rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg)); } @@ -4508,6 +4507,53 @@ rtl8xxxu_wireless_mode(struct ieee80211_hw *hw, struct ieee80211_sta *sta) return network_type; } +static void rtl8xxxu_set_aifs(struct rtl8xxxu_priv *priv, u8 slot_time) +{ + u32 reg_edca_param[IEEE80211_NUM_ACS] = { + [IEEE80211_AC_VO] = REG_EDCA_VO_PARAM, + [IEEE80211_AC_VI] = REG_EDCA_VI_PARAM, + [IEEE80211_AC_BE] = REG_EDCA_BE_PARAM, + [IEEE80211_AC_BK] = REG_EDCA_BK_PARAM, + }; + u32 val32; + u16 wireless_mode = 0; + u8 aifs, aifsn, sifs; + int i; + + if (priv->vif) { + struct ieee80211_sta *sta; + + rcu_read_lock(); + sta = ieee80211_find_sta(priv->vif, priv->vif->bss_conf.bssid); + if (sta) + wireless_mode = rtl8xxxu_wireless_mode(priv->hw, sta); + rcu_read_unlock(); + } + + if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ || + (wireless_mode & WIRELESS_MODE_N_24G)) + sifs = 16; + else + sifs = 10; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + val32 = rtl8xxxu_read32(priv, reg_edca_param[i]); + + /* It was set in conf_tx. */ + aifsn = val32 & 0xff; + + /* aifsn not set yet or already fixed */ + if (aifsn < 2 || aifsn > 15) + continue; + + aifs = aifsn * slot_time + sifs; + + val32 &= ~0xff; + val32 |= aifs; + rtl8xxxu_write32(priv, reg_edca_param[i], val32); + } +} + static void rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changed) @@ -4593,6 +4639,8 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, else val8 = 20; rtl8xxxu_write8(priv, REG_SLOT, val8); + + rtl8xxxu_set_aifs(priv, val8); } if (changed & BSS_CHANGED_BSSID) { @@ -4984,6 +5032,8 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, if (control && control->sta) sta = control->sta; + queue = rtl8xxxu_queue_select(hw, skb); + tx_desc = skb_push(skb, tx_desc_size); memset(tx_desc, 0, tx_desc_size); @@ -4996,7 +5046,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, is_broadcast_ether_addr(ieee80211_get_DA(hdr))) tx_desc->txdw0 |= TXDESC_BROADMULTICAST; - queue = rtl8xxxu_queue_select(hw, skb); tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT); if (tx_info->control.hw_key) { diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c index 901cdfe3723c..0b1bc04cb6ad 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.c +++ b/drivers/net/wireless/realtek/rtlwifi/debug.c @@ -329,8 +329,8 @@ static ssize_t rtl_debugfs_set_write_h2c(struct file *filp, tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count); - if (!buffer || copy_from_user(tmp, buffer, tmp_len)) - return count; + if (copy_from_user(tmp, buffer, tmp_len)) + return -EFAULT; tmp[tmp_len] = '\0'; @@ -340,8 +340,8 @@ static ssize_t rtl_debugfs_set_write_h2c(struct file *filp, &h2c_data[4], &h2c_data[5], &h2c_data[6], &h2c_data[7]); - if (h2c_len <= 0) - return count; + if (h2c_len == 0) + return -EINVAL; for (i = 0; i < h2c_len; i++) h2c_data_packed[i] = (u8)h2c_data[i]; diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 06e073defad6..c6e4fda7e431 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -1015,7 +1015,7 @@ int rtl_usb_probe(struct usb_interface *intf, hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) + sizeof(struct rtl_usb_priv), &rtl_ops); if (!hw) { - WARN_ONCE(true, "rtl_usb: ieee80211 alloc failed\n"); + pr_warn("rtl_usb: ieee80211 alloc failed\n"); return -ENOMEM; } rtlpriv = hw->priv; diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 6a9178896c90..1ba974969216 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -48,7 +48,6 @@ #include typedef unsigned int pending_ring_idx_t; -#define INVALID_PENDING_RING_IDX (~0U) struct pending_tx_info { struct xen_netif_tx_request req; /* tx request */ @@ -82,8 +81,6 @@ struct xenvif_rx_meta { /* Discriminate from any valid pending_idx value. */ #define INVALID_PENDING_IDX 0xFFFF -#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE - #define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE /* The maximum number of frags is derived from the size of a grant (same @@ -367,11 +364,6 @@ void xenvif_free(struct xenvif *vif); int xenvif_xenbus_init(void); void xenvif_xenbus_fini(void); -int xenvif_schedulable(struct xenvif *vif); - -int xenvif_queue_stopped(struct xenvif_queue *queue); -void xenvif_wake_queue(struct xenvif_queue *queue); - /* (Un)Map communication rings. */ void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue); int xenvif_map_frontend_data_rings(struct xenvif_queue *queue, @@ -394,17 +386,13 @@ int xenvif_dealloc_kthread(void *data); irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data); bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread); -void xenvif_rx_action(struct xenvif_queue *queue); -void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); +bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); void xenvif_carrier_on(struct xenvif *vif); /* Callback from stack when TX packet can be released */ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); -/* Unmap a pending page and release it back to the guest */ -void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); - static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue) { return MAX_PENDING_REQS - diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 7ce9807fc24c..97cf5bc48902 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -70,7 +70,7 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) wake_up(&queue->dealloc_wq); } -int xenvif_schedulable(struct xenvif *vif) +static int xenvif_schedulable(struct xenvif *vif) { return netif_running(vif->dev) && test_bit(VIF_STATUS_CONNECTED, &vif->status) && @@ -178,20 +178,6 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -int xenvif_queue_stopped(struct xenvif_queue *queue) -{ - struct net_device *dev = queue->vif->dev; - unsigned int id = queue->id; - return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id)); -} - -void xenvif_wake_queue(struct xenvif_queue *queue) -{ - struct net_device *dev = queue->vif->dev; - unsigned int id = queue->id; - netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); -} - static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { @@ -269,14 +255,16 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) skb_clear_hash(skb); - xenvif_rx_queue_tail(queue, skb); + if (!xenvif_rx_queue_tail(queue, skb)) + goto drop; + xenvif_kick_thread(queue); return NETDEV_TX_OK; drop: vif->dev->stats.tx_dropped++; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index b0cbc7fead74..f9373a88cf37 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -112,6 +112,8 @@ static void make_tx_response(struct xenvif_queue *queue, s8 st); static void push_tx_responses(struct xenvif_queue *queue); +static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); + static inline int tx_work_todo(struct xenvif_queue *queue); static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, @@ -330,10 +332,13 @@ static int xenvif_count_requests(struct xenvif_queue *queue, struct xenvif_tx_cb { - u16 pending_idx; + u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1]; + u8 copy_count; }; #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) +#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i]) +#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count) static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, u16 pending_idx, @@ -368,31 +373,93 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) return skb; } -static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, - struct sk_buff *skb, - struct xen_netif_tx_request *txp, - struct gnttab_map_grant_ref *gop, - unsigned int frag_overflow, - struct sk_buff *nskb) +static void xenvif_get_requests(struct xenvif_queue *queue, + struct sk_buff *skb, + struct xen_netif_tx_request *first, + struct xen_netif_tx_request *txfrags, + unsigned *copy_ops, + unsigned *map_ops, + unsigned int frag_overflow, + struct sk_buff *nskb, + unsigned int extra_count, + unsigned int data_len) { struct skb_shared_info *shinfo = skb_shinfo(skb); skb_frag_t *frags = shinfo->frags; - u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; - int start; + u16 pending_idx; pending_ring_idx_t index; unsigned int nr_slots; + struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops; + struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; + struct xen_netif_tx_request *txp = first; - nr_slots = shinfo->nr_frags; + nr_slots = shinfo->nr_frags + 1; - /* Skip first skb fragment if it is on same page as header fragment. */ - start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); + copy_count(skb) = 0; - for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; - shinfo->nr_frags++, txp++, gop++) { + /* Create copy ops for exactly data_len bytes into the skb head. */ + __skb_put(skb, data_len); + while (data_len > 0) { + int amount = data_len > txp->size ? txp->size : data_len; + + cop->source.u.ref = txp->gref; + cop->source.domid = queue->vif->domid; + cop->source.offset = txp->offset; + + cop->dest.domid = DOMID_SELF; + cop->dest.offset = (offset_in_page(skb->data + + skb_headlen(skb) - + data_len)) & ~XEN_PAGE_MASK; + cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb) + - data_len); + + cop->len = amount; + cop->flags = GNTCOPY_source_gref; + + index = pending_index(queue->pending_cons); + pending_idx = queue->pending_ring[index]; + callback_param(queue, pending_idx).ctx = NULL; + copy_pending_idx(skb, copy_count(skb)) = pending_idx; + copy_count(skb)++; + + cop++; + data_len -= amount; + + if (amount == txp->size) { + /* The copy op covered the full tx_request */ + + memcpy(&queue->pending_tx_info[pending_idx].req, + txp, sizeof(*txp)); + queue->pending_tx_info[pending_idx].extra_count = + (txp == first) ? extra_count : 0; + + if (txp == first) + txp = txfrags; + else + txp++; + queue->pending_cons++; + nr_slots--; + } else { + /* The copy op partially covered the tx_request. + * The remainder will be mapped. + */ + txp->offset += amount; + txp->size -= amount; + } + } + + for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; + shinfo->nr_frags++, gop++) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; - xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop); + xenvif_tx_create_map_op(queue, pending_idx, txp, + txp == first ? extra_count : 0, gop); frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); + + if (txp == first) + txp = txfrags; + else + txp++; } if (frag_overflow) { @@ -413,7 +480,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que skb_shinfo(skb)->frag_list = nskb; } - return gop; + (*copy_ops) = cop - queue->tx_copy_ops; + (*map_ops) = gop - queue->tx_map_ops; } static inline void xenvif_grant_handle_set(struct xenvif_queue *queue, @@ -449,7 +517,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, struct gnttab_copy **gopp_copy) { struct gnttab_map_grant_ref *gop_map = *gopp_map; - u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; + u16 pending_idx; /* This always points to the shinfo of the skb being checked, which * could be either the first or the one on the frag_list */ @@ -460,24 +528,37 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, struct skb_shared_info *first_shinfo = NULL; int nr_frags = shinfo->nr_frags; const bool sharedslot = nr_frags && - frag_get_pending_idx(&shinfo->frags[0]) == pending_idx; - int i, err; + frag_get_pending_idx(&shinfo->frags[0]) == + copy_pending_idx(skb, copy_count(skb) - 1); + int i, err = 0; - /* Check status of header. */ - err = (*gopp_copy)->status; - if (unlikely(err)) { - if (net_ratelimit()) - netdev_dbg(queue->vif->dev, - "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", - (*gopp_copy)->status, - pending_idx, - (*gopp_copy)->source.u.ref); - /* The first frag might still have this slot mapped */ - if (!sharedslot) - xenvif_idx_release(queue, pending_idx, - XEN_NETIF_RSP_ERROR); + for (i = 0; i < copy_count(skb); i++) { + int newerr; + + /* Check status of header. */ + pending_idx = copy_pending_idx(skb, i); + + newerr = (*gopp_copy)->status; + if (likely(!newerr)) { + /* The first frag might still have this slot mapped */ + if (i < copy_count(skb) - 1 || !sharedslot) + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_OKAY); + } else { + err = newerr; + if (net_ratelimit()) + netdev_dbg(queue->vif->dev, + "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", + (*gopp_copy)->status, + pending_idx, + (*gopp_copy)->source.u.ref); + /* The first frag might still have this slot mapped */ + if (i < copy_count(skb) - 1 || !sharedslot) + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_ERROR); + } + (*gopp_copy)++; } - (*gopp_copy)++; check_frags: for (i = 0; i < nr_frags; i++, gop_map++) { @@ -524,14 +605,6 @@ check_frags: if (err) continue; - /* First error: if the header haven't shared a slot with the - * first frag, release it as well. - */ - if (!sharedslot) - xenvif_idx_release(queue, - XENVIF_TX_CB(skb)->pending_idx, - XEN_NETIF_RSP_OKAY); - /* Invalidate preceding fragments of this skb. */ for (j = 0; j < i; j++) { pending_idx = frag_get_pending_idx(&shinfo->frags[j]); @@ -801,7 +874,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, unsigned *copy_ops, unsigned *map_ops) { - struct gnttab_map_grant_ref *gop = queue->tx_map_ops; struct sk_buff *skb, *nskb; int ret; unsigned int frag_overflow; @@ -883,8 +955,12 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, continue; } + data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ? + XEN_NETBACK_TX_COPY_LEN : txreq.size; + ret = xenvif_count_requests(queue, &txreq, extra_count, txfrags, work_to_do); + if (unlikely(ret < 0)) break; @@ -910,9 +986,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, index = pending_index(queue->pending_cons); pending_idx = queue->pending_ring[index]; - data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN && - ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? - XEN_NETBACK_TX_COPY_LEN : txreq.size; + if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size) + data_len = txreq.size; skb = xenvif_alloc_skb(data_len); if (unlikely(skb == NULL)) { @@ -923,8 +998,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, } skb_shinfo(skb)->nr_frags = ret; - if (data_len < txreq.size) - skb_shinfo(skb)->nr_frags++; /* At this point shinfo->nr_frags is in fact the number of * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. */ @@ -986,54 +1059,19 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, type); } - XENVIF_TX_CB(skb)->pending_idx = pending_idx; - - __skb_put(skb, data_len); - queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; - queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid; - queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; - - queue->tx_copy_ops[*copy_ops].dest.u.gmfn = - virt_to_gfn(skb->data); - queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; - queue->tx_copy_ops[*copy_ops].dest.offset = - offset_in_page(skb->data) & ~XEN_PAGE_MASK; - - queue->tx_copy_ops[*copy_ops].len = data_len; - queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; - - (*copy_ops)++; - - if (data_len < txreq.size) { - frag_set_pending_idx(&skb_shinfo(skb)->frags[0], - pending_idx); - xenvif_tx_create_map_op(queue, pending_idx, &txreq, - extra_count, gop); - gop++; - } else { - frag_set_pending_idx(&skb_shinfo(skb)->frags[0], - INVALID_PENDING_IDX); - memcpy(&queue->pending_tx_info[pending_idx].req, - &txreq, sizeof(txreq)); - queue->pending_tx_info[pending_idx].extra_count = - extra_count; - } - - queue->pending_cons++; - - gop = xenvif_get_requests(queue, skb, txfrags, gop, - frag_overflow, nskb); + xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops, + map_ops, frag_overflow, nskb, extra_count, + data_len); __skb_queue_tail(&queue->tx_queue, skb); queue->tx.req_cons = idx; - if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) || + if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) || (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) break; } - (*map_ops) = gop - queue->tx_map_ops; return; } @@ -1112,9 +1150,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { struct xen_netif_tx_request *txp; u16 pending_idx; - unsigned data_len; - pending_idx = XENVIF_TX_CB(skb)->pending_idx; + pending_idx = copy_pending_idx(skb, 0); txp = &queue->pending_tx_info[pending_idx].req; /* Check the remap error code. */ @@ -1133,18 +1170,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) continue; } - data_len = skb->len; - callback_param(queue, pending_idx).ctx = NULL; - if (data_len < txp->size) { - /* Append the packet payload as a fragment. */ - txp->offset += data_len; - txp->size -= data_len; - } else { - /* Schedule a response immediately. */ - xenvif_idx_release(queue, pending_idx, - XEN_NETIF_RSP_OKAY); - } - if (txp->flags & XEN_NETTXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (txp->flags & XEN_NETTXF_data_validated) @@ -1330,7 +1355,7 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) /* Called after netfront has transmitted */ int xenvif_tx_action(struct xenvif_queue *queue, int budget) { - unsigned nr_mops, nr_cops = 0; + unsigned nr_mops = 0, nr_cops = 0; int work_done, ret; if (unlikely(!tx_work_todo(queue))) @@ -1417,7 +1442,7 @@ static void push_tx_responses(struct xenvif_queue *queue) notify_remote_via_irq(queue->tx_irq); } -void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) +static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) { int ret; struct gnttab_unmap_grant_ref tx_unmap_op; diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c index dbac4c03d21a..0ba754ebc5ba 100644 --- a/drivers/net/xen-netback/rx.c +++ b/drivers/net/xen-netback/rx.c @@ -82,9 +82,10 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) return false; } -void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) +bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) { unsigned long flags; + bool ret = true; spin_lock_irqsave(&queue->rx_queue.lock, flags); @@ -92,8 +93,7 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) struct net_device *dev = queue->vif->dev; netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); - kfree_skb(skb); - queue->vif->dev->stats.rx_dropped++; + ret = false; } else { if (skb_queue_empty(&queue->rx_queue)) xenvif_update_needed_slots(queue, skb); @@ -104,6 +104,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) } spin_unlock_irqrestore(&queue->rx_queue.lock, flags); + + return ret; } static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) @@ -486,7 +488,7 @@ static void xenvif_rx_skb(struct xenvif_queue *queue) #define RX_BATCH_SIZE 64 -void xenvif_rx_action(struct xenvif_queue *queue) +static void xenvif_rx_action(struct xenvif_queue *queue) { struct sk_buff_head completed_skbs; unsigned int work_done = 0; @@ -495,6 +497,7 @@ void xenvif_rx_action(struct xenvif_queue *queue) queue->rx_copy.completed = &completed_skbs; while (xenvif_rx_ring_slots_available(queue) && + !skb_queue_empty(&queue->rx_queue) && work_done < RX_BATCH_SIZE) { xenvif_rx_skb(queue); work_done++; diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index ca261e0fc9c9..9ee9ce0493fe 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -256,7 +256,6 @@ static void backend_disconnect(struct backend_info *be) unsigned int queue_index; xen_unregister_watchers(vif); - xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); #ifdef CONFIG_DEBUG_FS xenvif_debugfs_delif(vif); #endif /* CONFIG_DEBUG_FS */ @@ -984,6 +983,7 @@ static int netback_remove(struct xenbus_device *dev) struct backend_info *be = dev_get_drvdata(&dev->dev); unregister_hotplug_status_watch(be); + xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); if (be->vif) { kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); backend_disconnect(be); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 1a69b5246133..3d149890fa36 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -66,6 +66,10 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface"); +static bool __read_mostly xennet_trusted = true; +module_param_named(trusted, xennet_trusted, bool, 0644); +MODULE_PARM_DESC(trusted, "Is the backend trusted"); + #define XENNET_TIMEOUT (5 * HZ) static const struct ethtool_ops xennet_ethtool_ops; @@ -175,6 +179,9 @@ struct netfront_info { /* Is device behaving sane? */ bool broken; + /* Should skbs be bounced into a zeroed buffer? */ + bool bounce; + atomic_t rx_gso_checksum_fixup; }; @@ -273,7 +280,8 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) if (unlikely(!skb)) return NULL; - page = page_pool_dev_alloc_pages(queue->page_pool); + page = page_pool_alloc_pages(queue->page_pool, + GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO); if (unlikely(!page)) { kfree_skb(skb); return NULL; @@ -669,6 +677,33 @@ static int xennet_xdp_xmit(struct net_device *dev, int n, return n - drops; } +struct sk_buff *bounce_skb(const struct sk_buff *skb) +{ + unsigned int headerlen = skb_headroom(skb); + /* Align size to allocate full pages and avoid contiguous data leaks */ + unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len, + XEN_PAGE_SIZE); + struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO); + + if (!n) + return NULL; + + if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) { + WARN_ONCE(1, "misaligned skb allocated\n"); + kfree_skb(n); + return NULL; + } + + /* Set the data pointer */ + skb_reserve(n, headerlen); + /* Set the tail pointer and length */ + skb_put(n, skb->len); + + BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); + + skb_copy_header(n, skb); + return n; +} #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) @@ -722,9 +757,13 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev /* The first req should be at least ETH_HLEN size or the packet will be * dropped by netback. + * + * If the backend is not trusted bounce all data to zeroed pages to + * avoid exposing contiguous data on the granted page not belonging to + * the skb. */ - if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) { - nskb = skb_copy(skb, GFP_ATOMIC); + if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) { + nskb = bounce_skb(skb); if (!nskb) goto drop; dev_consume_skb_any(skb); @@ -1057,8 +1096,10 @@ static int xennet_get_responses(struct netfront_queue *queue, } } rcu_read_unlock(); -next: + __skb_queue_tail(list, skb); + +next: if (!(rx->flags & XEN_NETRXF_more_data)) break; @@ -1827,6 +1868,12 @@ static int netfront_resume(struct xenbus_device *dev) netif_tx_unlock_bh(info->netdev); xennet_disconnect_backend(info); + + rtnl_lock(); + if (info->queues) + xennet_destroy_queues(info); + rtnl_unlock(); + return 0; } @@ -2248,6 +2295,10 @@ static int talk_to_netback(struct xenbus_device *dev, info->netdev->irq = 0; + /* Check if backend is trusted. */ + info->bounce = !xennet_trusted || + !xenbus_read_unsigned(dev->nodename, "trusted", 1); + /* Check if backend supports multiple queues */ max_queues = xenbus_read_unsigned(info->xbdev->otherend, "multi-queue-max-queues", 1); @@ -2414,6 +2465,9 @@ static int xennet_connect(struct net_device *dev) return err; if (np->netback_has_xdp_headroom) pr_info("backend supports XDP headroom\n"); + if (np->bounce) + dev_info(&np->xbdev->dev, + "bouncing transmitted data to zeroed pages\n"); /* talk_to_netback() sets the correct number of queues */ num_queues = dev->real_num_tx_queues; diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c index 4dc7bd7e02b6..90bea6a1db69 100644 --- a/drivers/nfc/fdp/fdp.c +++ b/drivers/nfc/fdp/fdp.c @@ -238,9 +238,6 @@ static int fdp_nci_open(struct nci_dev *ndev) { int r; struct fdp_nci_info *info = nci_get_drvdata(ndev); - struct device *dev = &info->phy->i2c_dev->dev; - - dev_dbg(dev, "%s\n", __func__); r = info->phy_ops->enable(info->phy); @@ -249,35 +246,26 @@ static int fdp_nci_open(struct nci_dev *ndev) static int fdp_nci_close(struct nci_dev *ndev) { - struct fdp_nci_info *info = nci_get_drvdata(ndev); - struct device *dev = &info->phy->i2c_dev->dev; - - dev_dbg(dev, "%s\n", __func__); return 0; } static int fdp_nci_send(struct nci_dev *ndev, struct sk_buff *skb) { struct fdp_nci_info *info = nci_get_drvdata(ndev); - struct device *dev = &info->phy->i2c_dev->dev; - - dev_dbg(dev, "%s\n", __func__); + int ret; if (atomic_dec_and_test(&info->data_pkt_counter)) info->data_pkt_counter_cb(ndev); - return info->phy_ops->write(info->phy, skb); -} + ret = info->phy_ops->write(info->phy, skb); + if (ret < 0) { + kfree_skb(skb); + return ret; + } -int fdp_nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb) -{ - struct fdp_nci_info *info = nci_get_drvdata(ndev); - struct device *dev = &info->phy->i2c_dev->dev; - - dev_dbg(dev, "%s\n", __func__); - return nci_recv_frame(ndev, skb); + consume_skb(skb); + return 0; } -EXPORT_SYMBOL(fdp_nci_recv_frame); static int fdp_nci_request_firmware(struct nci_dev *ndev) { @@ -489,8 +477,6 @@ static int fdp_nci_setup(struct nci_dev *ndev) int r; u8 patched = 0; - dev_dbg(dev, "%s\n", __func__); - r = nci_core_init(ndev); if (r) goto error; @@ -598,9 +584,7 @@ static int fdp_nci_core_reset_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct fdp_nci_info *info = nci_get_drvdata(ndev); - struct device *dev = &info->phy->i2c_dev->dev; - dev_dbg(dev, "%s\n", __func__); info->setup_reset_ntf = 1; wake_up(&info->setup_wq); @@ -611,9 +595,7 @@ static int fdp_nci_prop_patch_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct fdp_nci_info *info = nci_get_drvdata(ndev); - struct device *dev = &info->phy->i2c_dev->dev; - dev_dbg(dev, "%s\n", __func__); info->setup_patch_ntf = 1; info->setup_patch_status = skb->data[0]; wake_up(&info->setup_wq); @@ -786,11 +768,6 @@ EXPORT_SYMBOL(fdp_nci_probe); void fdp_nci_remove(struct nci_dev *ndev) { - struct fdp_nci_info *info = nci_get_drvdata(ndev); - struct device *dev = &info->phy->i2c_dev->dev; - - dev_dbg(dev, "%s\n", __func__); - nci_unregister_device(ndev); nci_free_device(ndev); } diff --git a/drivers/nfc/fdp/fdp.h b/drivers/nfc/fdp/fdp.h index 9bd1f3f23e2d..ead3b21ccae6 100644 --- a/drivers/nfc/fdp/fdp.h +++ b/drivers/nfc/fdp/fdp.h @@ -25,6 +25,5 @@ int fdp_nci_probe(struct fdp_i2c_phy *phy, struct nfc_phy_ops *phy_ops, struct nci_dev **ndev, int tx_headroom, int tx_tailroom, u8 clock_type, u32 clock_freq, u8 *fw_vsc_cfg); void fdp_nci_remove(struct nci_dev *ndev); -int fdp_nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb); #endif /* __LOCAL_FDP_H_ */ diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c index ad0abb1f0bae..5e300788be52 100644 --- a/drivers/nfc/fdp/i2c.c +++ b/drivers/nfc/fdp/i2c.c @@ -49,7 +49,6 @@ static int fdp_nci_i2c_enable(void *phy_id) { struct fdp_i2c_phy *phy = phy_id; - dev_dbg(&phy->i2c_dev->dev, "%s\n", __func__); fdp_nci_i2c_reset(phy); return 0; @@ -59,7 +58,6 @@ static void fdp_nci_i2c_disable(void *phy_id) { struct fdp_i2c_phy *phy = phy_id; - dev_dbg(&phy->i2c_dev->dev, "%s\n", __func__); fdp_nci_i2c_reset(phy); } @@ -197,7 +195,6 @@ flush: static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id) { struct fdp_i2c_phy *phy = phy_id; - struct i2c_client *client; struct sk_buff *skb; int r; @@ -206,9 +203,6 @@ static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id) return IRQ_NONE; } - client = phy->i2c_dev; - dev_dbg(&client->dev, "%s\n", __func__); - r = fdp_nci_i2c_read(phy, &skb); if (r == -EREMOTEIO) @@ -217,7 +211,7 @@ static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id) return IRQ_HANDLED; if (skb != NULL) - fdp_nci_recv_frame(phy->ndev, skb); + nci_recv_frame(phy->ndev, skb); return IRQ_HANDLED; } @@ -288,8 +282,6 @@ static int fdp_nci_i2c_probe(struct i2c_client *client) u32 clock_freq; int r = 0; - dev_dbg(dev, "%s\n", __func__); - if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { nfc_err(dev, "No I2C_FUNC_I2C support\n"); return -ENODEV; @@ -351,8 +343,6 @@ static int fdp_nci_i2c_remove(struct i2c_client *client) { struct fdp_i2c_phy *phy = i2c_get_clientdata(client); - dev_dbg(&client->dev, "%s\n", __func__); - fdp_nci_remove(phy->ndev); fdp_nci_i2c_disable(phy); diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c index 18cd96284b77..41f27e1cac20 100644 --- a/drivers/nfc/nfcmrvl/i2c.c +++ b/drivers/nfc/nfcmrvl/i2c.c @@ -151,10 +151,15 @@ static int nfcmrvl_i2c_nci_send(struct nfcmrvl_private *priv, ret = -EREMOTEIO; } else ret = 0; - kfree_skb(skb); } - return ret; + if (ret) { + kfree_skb(skb); + return ret; + } + + consume_skb(skb); + return 0; } static void nfcmrvl_i2c_nci_update_config(struct nfcmrvl_private *priv, @@ -186,9 +191,9 @@ static int nfcmrvl_i2c_parse_dt(struct device_node *node, pdata->irq_polarity = IRQF_TRIGGER_RISING; ret = irq_of_parse_and_map(node, 0); - if (ret < 0) { - pr_err("Unable to get irq, error: %d\n", ret); - return ret; + if (!ret) { + pr_err("Unable to get irq\n"); + return -EINVAL; } pdata->irq = ret; diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c index 8e0ddb434770..1f4120e3314b 100644 --- a/drivers/nfc/nfcmrvl/spi.c +++ b/drivers/nfc/nfcmrvl/spi.c @@ -129,9 +129,9 @@ static int nfcmrvl_spi_parse_dt(struct device_node *node, } ret = irq_of_parse_and_map(node, 0); - if (ret < 0) { - pr_err("Unable to get irq, error: %d\n", ret); - return ret; + if (!ret) { + pr_err("Unable to get irq\n"); + return -EINVAL; } pdata->irq = ret; diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c index 888e298f610b..f26986eb53f1 100644 --- a/drivers/nfc/nfcmrvl/usb.c +++ b/drivers/nfc/nfcmrvl/usb.c @@ -401,13 +401,25 @@ static void nfcmrvl_play_deferred(struct nfcmrvl_usb_drv_data *drv_data) int err; while ((urb = usb_get_from_anchor(&drv_data->deferred))) { + usb_anchor_urb(urb, &drv_data->tx_anchor); + err = usb_submit_urb(urb, GFP_ATOMIC); - if (err) + if (err) { + kfree(urb->setup_packet); + usb_unanchor_urb(urb); + usb_free_urb(urb); break; + } drv_data->tx_in_flight++; + usb_free_urb(urb); + } + + /* Cleanup the rest deferred urbs. */ + while ((urb = usb_get_from_anchor(&drv_data->deferred))) { + kfree(urb->setup_packet); + usb_free_urb(urb); } - usb_scuttle_anchored_urbs(&drv_data->deferred); } static int nfcmrvl_resume(struct usb_interface *intf) diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c index a0ce95a287c5..b68b315689c3 100644 --- a/drivers/nfc/nxp-nci/core.c +++ b/drivers/nfc/nxp-nci/core.c @@ -70,22 +70,20 @@ static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb) struct nxp_nci_info *info = nci_get_drvdata(ndev); int r; - if (!info->phy_ops->write) { - r = -ENOTSUPP; - goto send_exit; - } + if (!info->phy_ops->write) + return -EOPNOTSUPP; - if (info->mode != NXP_NCI_MODE_NCI) { - r = -EINVAL; - goto send_exit; - } + if (info->mode != NXP_NCI_MODE_NCI) + return -EINVAL; r = info->phy_ops->write(info->phy_id, skb); - if (r < 0) + if (r < 0) { kfree_skb(skb); + return r; + } -send_exit: - return r; + consume_skb(skb); + return 0; } static struct nci_ops nxp_nci_ops = { @@ -104,10 +102,8 @@ int nxp_nci_probe(void *phy_id, struct device *pdev, int r; info = devm_kzalloc(pdev, sizeof(struct nxp_nci_info), GFP_KERNEL); - if (!info) { - r = -ENOMEM; - goto probe_exit; - } + if (!info) + return -ENOMEM; info->phy_id = phy_id; info->pdev = pdev; @@ -120,31 +116,25 @@ int nxp_nci_probe(void *phy_id, struct device *pdev, if (info->phy_ops->set_mode) { r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD); if (r < 0) - goto probe_exit; + return r; } info->mode = NXP_NCI_MODE_COLD; info->ndev = nci_allocate_device(&nxp_nci_ops, NXP_NCI_NFC_PROTOCOLS, NXP_NCI_HDR_LEN, 0); - if (!info->ndev) { - r = -ENOMEM; - goto probe_exit; - } + if (!info->ndev) + return -ENOMEM; nci_set_parent_dev(info->ndev, pdev); nci_set_drvdata(info->ndev, info); r = nci_register_device(info->ndev); - if (r < 0) - goto probe_exit_free_nci; + if (r < 0) { + nci_free_device(info->ndev); + return r; + } *ndev = info->ndev; - - goto probe_exit; - -probe_exit_free_nci: - nci_free_device(info->ndev); -probe_exit: return r; } EXPORT_SYMBOL(nxp_nci_probe); diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c index 9f60e4dc5a90..f426dcdfcdd6 100644 --- a/drivers/nfc/nxp-nci/i2c.c +++ b/drivers/nfc/nxp-nci/i2c.c @@ -122,7 +122,9 @@ static int nxp_nci_i2c_fw_read(struct nxp_nci_i2c_phy *phy, skb_put_data(*skb, &header, NXP_NCI_FW_HDR_LEN); r = i2c_master_recv(client, skb_put(*skb, frame_len), frame_len); - if (r != frame_len) { + if (r < 0) { + goto fw_read_exit_free_skb; + } else if (r != frame_len) { nfc_err(&client->dev, "Invalid frame length: %u (expected %zu)\n", r, frame_len); @@ -162,8 +164,13 @@ static int nxp_nci_i2c_nci_read(struct nxp_nci_i2c_phy *phy, skb_put_data(*skb, (void *)&header, NCI_CTRL_HDR_SIZE); + if (!header.plen) + return 0; + r = i2c_master_recv(client, skb_put(*skb, header.plen), header.plen); - if (r != header.plen) { + if (r < 0) { + goto nci_read_exit_free_skb; + } else if (r != header.plen) { nfc_err(&client->dev, "Invalid frame payload length: %u (expected %u)\n", r, header.plen); diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c index d2c011615775..8d7e29d953b7 100644 --- a/drivers/nfc/pn533/pn533.c +++ b/drivers/nfc/pn533/pn533.c @@ -2844,13 +2844,14 @@ void pn53x_common_clean(struct pn533 *priv) { struct pn533_cmd *cmd, *n; + /* delete the timer before cleanup the worker */ + del_timer_sync(&priv->listen_timer); + flush_delayed_work(&priv->poll_work); destroy_workqueue(priv->wq); skb_queue_purge(&priv->resp_q); - del_timer(&priv->listen_timer); - list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) { list_del(&cmd->queue); kfree(cmd); diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c index a0665d8ea85b..e92535ebb528 100644 --- a/drivers/nfc/pn533/uart.c +++ b/drivers/nfc/pn533/uart.c @@ -310,6 +310,7 @@ static void pn532_uart_remove(struct serdev_device *serdev) pn53x_unregister_nfc(pn532->priv); serdev_device_close(serdev); pn53x_common_clean(pn532->priv); + del_timer_sync(&pn532->cmd_timeout); kfree_skb(pn532->recv_skb); kfree(pn532); } diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c index ba6c486d6465..9b43cd3a45af 100644 --- a/drivers/nfc/s3fwrn5/core.c +++ b/drivers/nfc/s3fwrn5/core.c @@ -97,11 +97,15 @@ static int s3fwrn5_nci_send(struct nci_dev *ndev, struct sk_buff *skb) } ret = s3fwrn5_write(info, skb); - if (ret < 0) + if (ret < 0) { kfree_skb(skb); + mutex_unlock(&info->mutex); + return ret; + } + consume_skb(skb); mutex_unlock(&info->mutex); - return ret; + return 0; } static int s3fwrn5_nci_post_setup(struct nci_dev *ndev) diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c index 807eae04c1e3..37d397aae9b9 100644 --- a/drivers/nfc/st-nci/se.c +++ b/drivers/nfc/st-nci/se.c @@ -327,7 +327,7 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev, * AID 81 5 to 16 * PARAMETERS 82 0 to 255 */ - if (skb->len < NFC_MIN_AID_LENGTH + 2 && + if (skb->len < NFC_MIN_AID_LENGTH + 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) return -EPROTO; @@ -340,8 +340,10 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev, /* Check next byte is PARAMETERS tag (82) */ if (skb->data[transaction->aid_len + 2] != - NFC_EVT_TRANSACTION_PARAMS_TAG) + NFC_EVT_TRANSACTION_PARAMS_TAG) { + devm_kfree(dev, transaction); return -EPROTO; + } transaction->params_len = skb->data[transaction->aid_len + 3]; memcpy(transaction->params, skb->data + diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index 0841e0e370a0..d41636504246 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -241,7 +241,7 @@ int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx, } EXPORT_SYMBOL(st21nfca_hci_se_io); -static void st21nfca_se_wt_timeout(struct timer_list *t) +static void st21nfca_se_wt_work(struct work_struct *work) { /* * No answer from the secure element @@ -254,8 +254,9 @@ static void st21nfca_se_wt_timeout(struct timer_list *t) */ /* hardware reset managed through VCC_UICC_OUT power supply */ u8 param = 0x01; - struct st21nfca_hci_info *info = from_timer(info, t, - se_info.bwi_timer); + struct st21nfca_hci_info *info = container_of(work, + struct st21nfca_hci_info, + se_info.timeout_work); pr_debug("\n"); @@ -273,6 +274,13 @@ static void st21nfca_se_wt_timeout(struct timer_list *t) info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME); } +static void st21nfca_se_wt_timeout(struct timer_list *t) +{ + struct st21nfca_hci_info *info = from_timer(info, t, se_info.bwi_timer); + + schedule_work(&info->se_info.timeout_work); +} + static void st21nfca_se_activation_timeout(struct timer_list *t) { struct st21nfca_hci_info *info = from_timer(info, t, @@ -296,6 +304,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, int r = 0; struct device *dev = &hdev->ndev->dev; struct nfc_evt_transaction *transaction; + u32 aid_len; + u8 params_len; pr_debug("connectivity gate event: %x\n", event); @@ -304,43 +314,48 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, r = nfc_se_connectivity(hdev->ndev, host); break; case ST21NFCA_EVT_TRANSACTION: - /* - * According to specification etsi 102 622 + /* According to specification etsi 102 622 * 11.2.2.4 EVT_TRANSACTION Table 52 * Description Tag Length * AID 81 5 to 16 * PARAMETERS 82 0 to 255 + * + * The key differences are aid storage length is variably sized + * in the packet, but fixed in nfc_evt_transaction, and that the aid_len + * is u8 in the packet, but u32 in the structure, and the tags in + * the packet are not included in nfc_evt_transaction. + * + * size in bytes: 1 1 5-16 1 1 0-255 + * offset: 0 1 2 aid_len + 2 aid_len + 3 aid_len + 4 + * member name: aid_tag(M) aid_len aid params_tag(M) params_len params + * example: 0x81 5-16 X 0x82 0-255 X */ - if (skb->len < NFC_MIN_AID_LENGTH + 2 && - skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) + if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) return -EPROTO; - transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL); + aid_len = skb->data[1]; + + if (skb->len < aid_len + 4 || aid_len > sizeof(transaction->aid)) + return -EPROTO; + + params_len = skb->data[aid_len + 3]; + + /* Verify PARAMETERS tag is (82), and final check that there is enough + * space in the packet to read everything. + */ + if ((skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG) || + (skb->len < aid_len + 4 + params_len)) + return -EPROTO; + + transaction = devm_kzalloc(dev, sizeof(*transaction) + params_len, GFP_KERNEL); if (!transaction) return -ENOMEM; - transaction->aid_len = skb->data[1]; + transaction->aid_len = aid_len; + transaction->params_len = params_len; - /* Checking if the length of the AID is valid */ - if (transaction->aid_len > sizeof(transaction->aid)) - return -EINVAL; - - memcpy(transaction->aid, &skb->data[2], - transaction->aid_len); - - /* Check next byte is PARAMETERS tag (82) */ - if (skb->data[transaction->aid_len + 2] != - NFC_EVT_TRANSACTION_PARAMS_TAG) - return -EPROTO; - - transaction->params_len = skb->data[transaction->aid_len + 3]; - - /* Total size is allocated (skb->len - 2) minus fixed array members */ - if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction))) - return -EINVAL; - - memcpy(transaction->params, skb->data + - transaction->aid_len + 4, transaction->params_len); + memcpy(transaction->aid, &skb->data[2], aid_len); + memcpy(transaction->params, &skb->data[aid_len + 4], params_len); r = nfc_se_transaction(hdev->ndev, host, transaction); break; @@ -364,6 +379,7 @@ int st21nfca_apdu_reader_event_received(struct nfc_hci_dev *hdev, switch (event) { case ST21NFCA_EVT_TRANSMIT_DATA: del_timer_sync(&info->se_info.bwi_timer); + cancel_work_sync(&info->se_info.timeout_work); info->se_info.bwi_active = false; r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0); @@ -393,6 +409,7 @@ void st21nfca_se_init(struct nfc_hci_dev *hdev) struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); init_completion(&info->se_info.req_completion); + INIT_WORK(&info->se_info.timeout_work, st21nfca_se_wt_work); /* initialize timers */ timer_setup(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 0); info->se_info.bwi_active = false; @@ -420,6 +437,7 @@ void st21nfca_se_deinit(struct nfc_hci_dev *hdev) if (info->se_info.se_active) del_timer_sync(&info->se_info.se_active_timer); + cancel_work_sync(&info->se_info.timeout_work); info->se_info.bwi_active = false; info->se_info.se_active = false; } diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h index 5e0de0fef1d4..0e4a93d11efb 100644 --- a/drivers/nfc/st21nfca/st21nfca.h +++ b/drivers/nfc/st21nfca/st21nfca.h @@ -141,6 +141,7 @@ struct st21nfca_se_info { se_io_cb_t cb; void *cb_context; + struct work_struct timeout_work; }; struct st21nfca_hci_info { diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c index b7bf3f863d79..5ee0afa621a9 100644 --- a/drivers/ntb/test/ntb_tool.c +++ b/drivers/ntb/test/ntb_tool.c @@ -367,14 +367,16 @@ static ssize_t tool_fn_write(struct tool_ctx *tc, u64 bits; int n; + if (*offp) + return 0; + buf = kmalloc(size + 1, GFP_KERNEL); if (!buf) return -ENOMEM; - ret = simple_write_to_buffer(buf, size, offp, ubuf, size); - if (ret < 0) { + if (copy_from_user(buf, ubuf, size)) { kfree(buf); - return ret; + return -EFAULT; } buf[size] = 0; diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 2304c6183822..9ec59960f216 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -187,8 +187,8 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data) ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1; /* make sure we are in the region */ - if (ctx->phys < nd_region->ndr_start - || (ctx->phys + ctx->cleared) > ndr_end) + if (ctx->phys < nd_region->ndr_start || + (ctx->phys + ctx->cleared - 1) > ndr_end) return 0; sector = (ctx->phys - nd_region->ndr_start) / 512; diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index c21ba0602029..1c92c883afdd 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -400,9 +400,7 @@ static ssize_t capability_show(struct device *dev, if (!nd_desc->fw_ops) return -EOPNOTSUPP; - nvdimm_bus_lock(dev); cap = nd_desc->fw_ops->capability(nd_desc); - nvdimm_bus_unlock(dev); switch (cap) { case NVDIMM_FWA_CAP_QUIESCE: @@ -427,10 +425,8 @@ static ssize_t activate_show(struct device *dev, if (!nd_desc->fw_ops) return -EOPNOTSUPP; - nvdimm_bus_lock(dev); cap = nd_desc->fw_ops->capability(nd_desc); state = nd_desc->fw_ops->activate_state(nd_desc); - nvdimm_bus_unlock(dev); if (cap < NVDIMM_FWA_CAP_QUIESCE) return -EOPNOTSUPP; @@ -475,7 +471,6 @@ static ssize_t activate_store(struct device *dev, else return -EINVAL; - nvdimm_bus_lock(dev); state = nd_desc->fw_ops->activate_state(nd_desc); switch (state) { @@ -493,7 +488,6 @@ static ssize_t activate_store(struct device *dev, default: rc = -ENXIO; } - nvdimm_bus_unlock(dev); if (rc == 0) rc = len; @@ -516,10 +510,7 @@ static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribut if (!nd_desc->fw_ops) return 0; - nvdimm_bus_lock(dev); cap = nd_desc->fw_ops->capability(nd_desc); - nvdimm_bus_unlock(dev); - if (cap < NVDIMM_FWA_CAP_QUIESCE) return 0; diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c index 4b80150e4afa..b5aa55c61461 100644 --- a/drivers/nvdimm/security.c +++ b/drivers/nvdimm/security.c @@ -379,11 +379,6 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid) || !nvdimm->sec.flags) return -EOPNOTSUPP; - if (dev->driver == NULL) { - dev_dbg(dev, "Unable to overwrite while DIMM active.\n"); - return -EINVAL; - } - rc = check_security_state(nvdimm); if (rc) return rc; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 853b9a24f744..e162f1dfbafe 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -531,36 +531,54 @@ EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU); static inline void nvme_clear_nvme_request(struct request *req) { - if (!(req->rq_flags & RQF_DONTPREP)) { - nvme_req(req)->retries = 0; - nvme_req(req)->flags = 0; - req->rq_flags |= RQF_DONTPREP; - } + nvme_req(req)->retries = 0; + nvme_req(req)->flags = 0; + req->rq_flags |= RQF_DONTPREP; } -struct request *nvme_alloc_request(struct request_queue *q, - struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid) +static inline unsigned int nvme_req_op(struct nvme_command *cmd) { - unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; - struct request *req; + return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; +} - if (qid == NVME_QID_ANY) { - req = blk_mq_alloc_request(q, op, flags); - } else { - req = blk_mq_alloc_request_hctx(q, op, flags, - qid ? qid - 1 : 0); - } - if (IS_ERR(req)) - return req; +static inline void nvme_init_request(struct request *req, + struct nvme_command *cmd) +{ + if (req->q->queuedata) + req->timeout = NVME_IO_TIMEOUT; + else /* no queuedata implies admin queue */ + req->timeout = ADMIN_TIMEOUT; req->cmd_flags |= REQ_FAILFAST_DRIVER; nvme_clear_nvme_request(req); nvme_req(req)->cmd = cmd; +} +struct request *nvme_alloc_request(struct request_queue *q, + struct nvme_command *cmd, blk_mq_req_flags_t flags) +{ + struct request *req; + + req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags); + if (!IS_ERR(req)) + nvme_init_request(req, cmd); return req; } EXPORT_SYMBOL_GPL(nvme_alloc_request); +struct request *nvme_alloc_request_qid(struct request_queue *q, + struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid) +{ + struct request *req; + + req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags, + qid ? qid - 1 : 0); + if (!IS_ERR(req)) + nvme_init_request(req, cmd); + return req; +} +EXPORT_SYMBOL_GPL(nvme_alloc_request_qid); + static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) { struct nvme_command c; @@ -663,7 +681,7 @@ static void nvme_assign_write_stream(struct nvme_ctrl *ctrl, req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9; } -static void nvme_setup_passthrough(struct request *req, +static inline void nvme_setup_passthrough(struct request *req, struct nvme_command *cmd) { memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); @@ -834,7 +852,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; blk_status_t ret = BLK_STS_OK; - nvme_clear_nvme_request(req); + if (!(req->rq_flags & RQF_DONTPREP)) + nvme_clear_nvme_request(req); memset(cmd, 0, sizeof(*cmd)); switch (req_op(req)) { @@ -923,11 +942,15 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, struct request *req; int ret; - req = nvme_alloc_request(q, cmd, flags, qid); + if (qid == NVME_QID_ANY) + req = nvme_alloc_request(q, cmd, flags); + else + req = nvme_alloc_request_qid(q, cmd, flags, qid); if (IS_ERR(req)) return PTR_ERR(req); - req->timeout = timeout ? timeout : ADMIN_TIMEOUT; + if (timeout) + req->timeout = timeout; if (buffer && bufflen) { ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); @@ -1093,11 +1116,12 @@ static int nvme_submit_user_cmd(struct request_queue *q, void *meta = NULL; int ret; - req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY); + req = nvme_alloc_request(q, cmd, 0); if (IS_ERR(req)) return PTR_ERR(req); - req->timeout = timeout ? timeout : ADMIN_TIMEOUT; + if (timeout) + req->timeout = timeout; nvme_req(req)->flags |= NVME_REQ_USERCMD; if (ubuffer && bufflen) { @@ -1167,8 +1191,8 @@ static int nvme_keep_alive(struct nvme_ctrl *ctrl) { struct request *rq; - rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED, - NVME_QID_ANY); + rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, + BLK_MQ_REQ_RESERVED); if (IS_ERR(rq)) return PTR_ERR(rq); @@ -1270,6 +1294,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, warn_str, cur->nidl); return -1; } + if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) + return NVME_NIDT_EUI64_LEN; memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); return NVME_NIDT_EUI64_LEN; case NVME_NIDT_NGUID: @@ -1278,6 +1304,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, warn_str, cur->nidl); return -1; } + if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) + return NVME_NIDT_NGUID_LEN; memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); return NVME_NIDT_NGUID_LEN; case NVME_NIDT_UUID: @@ -1286,6 +1314,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, warn_str, cur->nidl); return -1; } + if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) + return NVME_NIDT_UUID_LEN; uuid_copy(&ids->uuid, data + sizeof(*cur)); return NVME_NIDT_UUID_LEN; case NVME_NIDT_CSI: @@ -1381,12 +1411,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, if ((*id)->ncap == 0) /* namespace not allocated or attached */ goto out_free_id; - if (ctrl->vs >= NVME_VS(1, 1, 0) && - !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) - memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64)); - if (ctrl->vs >= NVME_VS(1, 2, 0) && - !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) - memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid)); + + if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { + dev_info(ctrl->device, + "Ignoring bogus Namespace Identifiers\n"); + } else { + if (ctrl->vs >= NVME_VS(1, 1, 0) && + !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) + memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64)); + if (ctrl->vs >= NVME_VS(1, 2, 0) && + !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) + memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid)); + } return 0; @@ -2012,7 +2048,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); } blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1); - blk_queue_dma_alignment(q, 7); + blk_queue_dma_alignment(q, 3); blk_queue_write_cache(q, vwc, vwc); } @@ -2249,18 +2285,21 @@ static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, enum pr_type type, bool abort) { u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1); + return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); } static int nvme_pr_clear(struct block_device *bdev, u64 key) { - u32 cdw10 = 1 | (key ? 1 << 3 : 0); - return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); + u32 cdw10 = 1 | (key ? 0 : 1 << 3); + + return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); } static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) { - u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0); + u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3); + return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); } @@ -2663,6 +2702,34 @@ static const struct nvme_core_quirk_entry core_quirks[] = { .vid = 0x14a4, .fr = "22301111", .quirks = NVME_QUIRK_SIMPLE_SUSPEND, + }, + { + /* + * This Kioxia CD6-V Series / HPE PE8030 device times out and + * aborts I/O during any load, but more easily reproducible + * with discards (fstrim). + * + * The device is left in a state where it is also not possible + * to use "nvme set-feature" to disable APST, but booting with + * nvme_core.default_ps_max_latency=0 works. + */ + .vid = 0x1e0f, + .mn = "KCD6XVUL6T40", + .quirks = NVME_QUIRK_NO_APST, + }, + { + /* + * The external Samsung X5 SSD fails initialization without a + * delay before checking if it is ready and has a whole set of + * other problems. To make this even more interesting, it + * shares the PCI ID with internal Samsung 970 Evo Plus that + * does not need or want these quirks. + */ + .vid = 0x144d, + .mn = "Samsung Portable SSD X5", + .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | + NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_IGNORE_DEV_SUBNQN, } }; @@ -2801,8 +2868,8 @@ static ssize_t subsys_##field##_show(struct device *dev, \ { \ struct nvme_subsystem *subsys = \ container_of(dev, struct nvme_subsystem, dev); \ - return sprintf(buf, "%.*s\n", \ - (int)sizeof(subsys->field), subsys->field); \ + return sysfs_emit(buf, "%.*s\n", \ + (int)sizeof(subsys->field), subsys->field); \ } \ static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); @@ -2882,7 +2949,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) nvme_init_subnqn(subsys, ctrl, id); memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); memcpy(subsys->model, id->mn, sizeof(subsys->model)); - memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev)); subsys->vendor_id = le16_to_cpu(id->vid); subsys->cmic = id->cmic; subsys->awupf = le16_to_cpu(id->awupf); @@ -3026,10 +3092,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) if (!ctrl->identified) { int i; - ret = nvme_init_subsystem(ctrl, id); - if (ret) - goto out_free; - /* * Check for quirks. Quirk can depend on firmware version, * so, in principle, the set of quirks present can change @@ -3042,7 +3104,13 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) if (quirk_matches(id, &core_quirks[i])) ctrl->quirks |= core_quirks[i].quirks; } + + ret = nvme_init_subsystem(ctrl, id); + if (ret) + goto out_free; } + memcpy(ctrl->subsys->firmware_rev, id->fr, + sizeof(ctrl->subsys->firmware_rev)); if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); @@ -3164,8 +3232,12 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) return ret; if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { + /* + * Do not return errors unless we are in a controller reset, + * the controller works perfectly fine without hwmon. + */ ret = nvme_hwmon_init(ctrl); - if (ret < 0) + if (ret == -EINTR) return ret; } @@ -3258,11 +3330,17 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd, case NVME_IOCTL_IO_CMD: return nvme_dev_user_cmd(ctrl, argp); case NVME_IOCTL_RESET: + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; dev_warn(ctrl->device, "resetting controller\n"); return nvme_reset_ctrl_sync(ctrl); case NVME_IOCTL_SUBSYS_RESET: + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; return nvme_reset_subsystem(ctrl); case NVME_IOCTL_RESCAN: + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; nvme_queue_scan(ctrl); return 0; default: @@ -3323,13 +3401,13 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, int model_len = sizeof(subsys->model); if (!uuid_is_null(&ids->uuid)) - return sprintf(buf, "uuid.%pU\n", &ids->uuid); + return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid); if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) - return sprintf(buf, "eui.%16phN\n", ids->nguid); + return sysfs_emit(buf, "eui.%16phN\n", ids->nguid); if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) - return sprintf(buf, "eui.%8phN\n", ids->eui64); + return sysfs_emit(buf, "eui.%8phN\n", ids->eui64); while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || subsys->serial[serial_len - 1] == '\0')) @@ -3338,7 +3416,7 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, subsys->model[model_len - 1] == '\0')) model_len--; - return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, + return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, serial_len, subsys->serial, model_len, subsys->model, head->ns_id); } @@ -3347,7 +3425,7 @@ static DEVICE_ATTR_RO(wwid); static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); + return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); } static DEVICE_ATTR_RO(nguid); @@ -3360,25 +3438,25 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, * we have no UUID set */ if (uuid_is_null(&ids->uuid)) { - printk_ratelimited(KERN_WARNING - "No UUID available providing old NGUID\n"); - return sprintf(buf, "%pU\n", ids->nguid); + dev_warn_ratelimited(dev, + "No UUID available providing old NGUID\n"); + return sysfs_emit(buf, "%pU\n", ids->nguid); } - return sprintf(buf, "%pU\n", &ids->uuid); + return sysfs_emit(buf, "%pU\n", &ids->uuid); } static DEVICE_ATTR_RO(uuid); static ssize_t eui_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); + return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); } static DEVICE_ATTR_RO(eui); static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id); + return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id); } static DEVICE_ATTR_RO(nsid); @@ -3443,7 +3521,7 @@ static ssize_t field##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ - return sprintf(buf, "%.*s\n", \ + return sysfs_emit(buf, "%.*s\n", \ (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ } \ static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); @@ -3457,7 +3535,7 @@ static ssize_t field##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ - return sprintf(buf, "%d\n", ctrl->field); \ + return sysfs_emit(buf, "%d\n", ctrl->field); \ } \ static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); @@ -3505,9 +3583,9 @@ static ssize_t nvme_sysfs_show_state(struct device *dev, if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && state_name[ctrl->state]) - return sprintf(buf, "%s\n", state_name[ctrl->state]); + return sysfs_emit(buf, "%s\n", state_name[ctrl->state]); - return sprintf(buf, "unknown state\n"); + return sysfs_emit(buf, "unknown state\n"); } static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); @@ -3559,9 +3637,9 @@ static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev, struct nvmf_ctrl_options *opts = ctrl->opts; if (ctrl->opts->max_reconnects == -1) - return sprintf(buf, "off\n"); - return sprintf(buf, "%d\n", - opts->max_reconnects * opts->reconnect_delay); + return sysfs_emit(buf, "off\n"); + return sysfs_emit(buf, "%d\n", + opts->max_reconnects * opts->reconnect_delay); } static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev, @@ -3591,8 +3669,8 @@ static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev, struct nvme_ctrl *ctrl = dev_get_drvdata(dev); if (ctrl->opts->reconnect_delay == -1) - return sprintf(buf, "off\n"); - return sprintf(buf, "%d\n", ctrl->opts->reconnect_delay); + return sysfs_emit(buf, "off\n"); + return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay); } static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev, @@ -4396,6 +4474,8 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl) nvme_stop_keep_alive(ctrl); flush_work(&ctrl->async_event_work); cancel_work_sync(&ctrl->fw_act_work); + if (ctrl->ops->stop_ctrl) + ctrl->ops->stop_ctrl(ctrl); } EXPORT_SYMBOL_GPL(nvme_stop_ctrl); @@ -4408,12 +4488,14 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) if (ctrl->queue_count > 1) { nvme_queue_scan(ctrl); nvme_start_queues(ctrl); + nvme_mpath_update(ctrl); } } EXPORT_SYMBOL_GPL(nvme_start_ctrl); void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) { + nvme_hwmon_exit(ctrl); nvme_fault_inject_fini(&ctrl->fault_inject); dev_pm_qos_hide_latency_tolerance(ctrl->device); cdev_device_del(&ctrl->cdev, ctrl->device); diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c index 552dbc04567b..9e6e56c20ec9 100644 --- a/drivers/nvme/host/hwmon.c +++ b/drivers/nvme/host/hwmon.c @@ -12,7 +12,7 @@ struct nvme_hwmon_data { struct nvme_ctrl *ctrl; - struct nvme_smart_log log; + struct nvme_smart_log *log; struct mutex read_lock; }; @@ -60,14 +60,14 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under, static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data) { return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0, - NVME_CSI_NVM, &data->log, sizeof(data->log), 0); + NVME_CSI_NVM, data->log, sizeof(*data->log), 0); } static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { struct nvme_hwmon_data *data = dev_get_drvdata(dev); - struct nvme_smart_log *log = &data->log; + struct nvme_smart_log *log = data->log; int temp; int err; @@ -163,7 +163,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data, case hwmon_temp_max: case hwmon_temp_min: if ((!channel && data->ctrl->wctemp) || - (channel && data->log.temp_sensor[channel - 1])) { + (channel && data->log->temp_sensor[channel - 1])) { if (data->ctrl->quirks & NVME_QUIRK_NO_TEMP_THRESH_CHANGE) return 0444; @@ -176,7 +176,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data, break; case hwmon_temp_input: case hwmon_temp_label: - if (!channel || data->log.temp_sensor[channel - 1]) + if (!channel || data->log->temp_sensor[channel - 1]) return 0444; break; default: @@ -223,33 +223,57 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = { int nvme_hwmon_init(struct nvme_ctrl *ctrl) { - struct device *dev = ctrl->dev; + struct device *dev = ctrl->device; struct nvme_hwmon_data *data; struct device *hwmon; int err; - data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) - return 0; + return -ENOMEM; + + data->log = kzalloc(sizeof(*data->log), GFP_KERNEL); + if (!data->log) { + err = -ENOMEM; + goto err_free_data; + } data->ctrl = ctrl; mutex_init(&data->read_lock); err = nvme_hwmon_get_smart_log(data); if (err) { - dev_warn(ctrl->device, - "Failed to read smart log (error %d)\n", err); - devm_kfree(dev, data); - return err; + dev_warn(dev, "Failed to read smart log (error %d)\n", err); + goto err_free_log; } - hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data, - &nvme_hwmon_chip_info, - NULL); + hwmon = hwmon_device_register_with_info(dev, "nvme", + data, &nvme_hwmon_chip_info, + NULL); if (IS_ERR(hwmon)) { dev_warn(dev, "Failed to instantiate hwmon device\n"); - devm_kfree(dev, data); + err = PTR_ERR(hwmon); + goto err_free_log; } - + ctrl->hwmon_device = hwmon; return 0; + +err_free_log: + kfree(data->log); +err_free_data: + kfree(data); + return err; +} + +void nvme_hwmon_exit(struct nvme_ctrl *ctrl) +{ + if (ctrl->hwmon_device) { + struct nvme_hwmon_data *data = + dev_get_drvdata(ctrl->hwmon_device); + + hwmon_device_unregister(ctrl->hwmon_device); + ctrl->hwmon_device = NULL; + kfree(data->log); + kfree(data); + } } diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 8e562d0f2c30..470cef3abec3 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -653,7 +653,7 @@ static struct request *nvme_nvm_alloc_request(struct request_queue *q, nvme_nvm_rqtocmd(rqd, ns, cmd); - rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY); + rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0); if (IS_ERR(rq)) return rq; @@ -767,14 +767,14 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q, DECLARE_COMPLETION_ONSTACK(wait); int ret = 0; - rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0, - NVME_QID_ANY); + rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0); if (IS_ERR(rq)) { ret = -ENOMEM; goto err_cmd; } - rq->timeout = timeout ? timeout : ADMIN_TIMEOUT; + if (timeout) + rq->timeout = timeout; if (ppa_buf && ppa_len) { ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 18a756444d5a..379d6818a063 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -484,8 +484,17 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, ns->ana_grpid = le32_to_cpu(desc->grpid); ns->ana_state = desc->state; clear_bit(NVME_NS_ANA_PENDING, &ns->flags); - - if (nvme_state_is_live(ns->ana_state)) + /* + * nvme_mpath_set_live() will trigger I/O to the multipath path device + * and in turn to this path device. However we cannot accept this I/O + * if the controller is not live. This may deadlock if called from + * nvme_mpath_init_identify() and the ctrl will never complete + * initialization, preventing I/O from completing. For this case we + * will reprocess the ANA log page in nvme_mpath_update() once the + * controller is ready. + */ + if (nvme_state_is_live(ns->ana_state) && + ns->ctrl->state == NVME_CTRL_LIVE) nvme_mpath_set_live(ns); } @@ -572,6 +581,18 @@ static void nvme_ana_work(struct work_struct *work) nvme_read_ana_log(ctrl); } +void nvme_mpath_update(struct nvme_ctrl *ctrl) +{ + u32 nr_change_groups = 0; + + if (!ctrl->ana_log_buf) + return; + + mutex_lock(&ctrl->ana_lock); + nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state); + mutex_unlock(&ctrl->ana_lock); +} + static void nvme_anatt_timeout(struct timer_list *t) { struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer); @@ -603,8 +624,8 @@ static ssize_t nvme_subsys_iopolicy_show(struct device *dev, struct nvme_subsystem *subsys = container_of(dev, struct nvme_subsystem, dev); - return sprintf(buf, "%s\n", - nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]); + return sysfs_emit(buf, "%s\n", + nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]); } static ssize_t nvme_subsys_iopolicy_store(struct device *dev, @@ -629,7 +650,7 @@ SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR, static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid); + return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid); } DEVICE_ATTR_RO(ana_grpid); @@ -638,7 +659,7 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr, { struct nvme_ns *ns = nvme_get_ns_from_dev(dev); - return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]); + return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]); } DEVICE_ATTR_RO(ana_state); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 5dd1dd8021ba..86336496c65c 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -150,6 +150,11 @@ enum nvme_quirks { * encoding the generation sequence number. */ NVME_QUIRK_SKIP_CID_GEN = (1 << 17), + + /* + * Reports garbage in the namespace identifiers (eui64, nguid, uuid). + */ + NVME_QUIRK_BOGUS_NID = (1 << 18), }; /* @@ -252,6 +257,9 @@ struct nvme_ctrl { struct rw_semaphore namespaces_rwsem; struct device ctrl_device; struct device *device; /* char device */ +#ifdef CONFIG_NVME_HWMON + struct device *hwmon_device; +#endif struct cdev cdev; struct work_struct reset_work; struct work_struct delete_work; @@ -473,6 +481,7 @@ struct nvme_ctrl_ops { void (*free_ctrl)(struct nvme_ctrl *ctrl); void (*submit_async_event)(struct nvme_ctrl *ctrl); void (*delete_ctrl)(struct nvme_ctrl *ctrl); + void (*stop_ctrl)(struct nvme_ctrl *ctrl); int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); }; @@ -535,11 +544,23 @@ static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj) static inline void nvme_should_fail(struct request *req) {} #endif +bool nvme_wait_reset(struct nvme_ctrl *ctrl); +int nvme_try_sched_reset(struct nvme_ctrl *ctrl); + static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) { + int ret; + if (!ctrl->subsystem) return -ENOTTY; - return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); + if (!nvme_wait_reset(ctrl)) + return -EBUSY; + + ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); + if (ret) + return ret; + + return nvme_try_sched_reset(ctrl); } /* @@ -626,7 +647,6 @@ void nvme_cancel_tagset(struct nvme_ctrl *ctrl); void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl); bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, enum nvme_ctrl_state new_state); -bool nvme_wait_reset(struct nvme_ctrl *ctrl); int nvme_disable_ctrl(struct nvme_ctrl *ctrl); int nvme_enable_ctrl(struct nvme_ctrl *ctrl); int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); @@ -657,6 +677,8 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl); #define NVME_QID_ANY -1 struct request *nvme_alloc_request(struct request_queue *q, + struct nvme_command *cmd, blk_mq_req_flags_t flags); +struct request *nvme_alloc_request_qid(struct request_queue *q, struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid); void nvme_cleanup_cmd(struct request *req); blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, @@ -677,7 +699,6 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); int nvme_reset_ctrl(struct nvme_ctrl *ctrl); int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); -int nvme_try_sched_reset(struct nvme_ctrl *ctrl); int nvme_delete_ctrl(struct nvme_ctrl *ctrl); int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, @@ -707,6 +728,7 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); void nvme_mpath_remove_disk(struct nvme_ns_head *head); int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl); +void nvme_mpath_update(struct nvme_ctrl *ctrl); void nvme_mpath_uninit(struct nvme_ctrl *ctrl); void nvme_mpath_stop(struct nvme_ctrl *ctrl); bool nvme_mpath_clear_current_path(struct nvme_ns *ns); @@ -793,6 +815,9 @@ static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); return 0; } +static inline void nvme_mpath_update(struct nvme_ctrl *ctrl) +{ +} static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) { } @@ -864,11 +889,16 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) #ifdef CONFIG_NVME_HWMON int nvme_hwmon_init(struct nvme_ctrl *ctrl); +void nvme_hwmon_exit(struct nvme_ctrl *ctrl); #else static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl) { return 0; } + +static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl) +{ +} #endif u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 97afeb898b25..c222d7bf6ce1 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -224,6 +224,7 @@ struct nvme_queue { */ struct nvme_iod { struct nvme_request req; + struct nvme_command cmd; struct nvme_queue *nvmeq; bool use_sgl; int aborted; @@ -816,6 +817,8 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); if (bv->bv_len > first_prp_len) cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); + else + cmnd->dptr.prp2 = 0; return BLK_STS_OK; } @@ -917,7 +920,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, struct nvme_dev *dev = nvmeq->dev; struct request *req = bd->rq; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - struct nvme_command cmnd; + struct nvme_command *cmnd = &iod->cmd; blk_status_t ret; iod->aborted = 0; @@ -931,24 +934,24 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) return BLK_STS_IOERR; - ret = nvme_setup_cmd(ns, req, &cmnd); + ret = nvme_setup_cmd(ns, req, cmnd); if (ret) return ret; if (blk_rq_nr_phys_segments(req)) { - ret = nvme_map_data(dev, req, &cmnd); + ret = nvme_map_data(dev, req, cmnd); if (ret) goto out_free_cmd; } if (blk_integrity_rq(req)) { - ret = nvme_map_metadata(dev, req, &cmnd); + ret = nvme_map_metadata(dev, req, cmnd); if (ret) goto out_unmap_data; } blk_mq_start_request(req); - nvme_submit_cmd(nvmeq, &cmnd, bd->last); + nvme_submit_cmd(nvmeq, cmnd, bd->last); return BLK_STS_OK; out_unmap_data: nvme_unmap_data(dev, req); @@ -1350,13 +1353,12 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) req->tag, nvmeq->qid); abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, - BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); + BLK_MQ_REQ_NOWAIT); if (IS_ERR(abort_req)) { atomic_inc(&dev->ctrl.abort_limit); return BLK_EH_RESET_TIMER; } - abort_req->timeout = ADMIN_TIMEOUT; abort_req->end_io_data = NULL; blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio); @@ -1666,6 +1668,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset); if (IS_ERR(dev->ctrl.admin_q)) { blk_mq_free_tag_set(&dev->admin_tagset); + dev->ctrl.admin_q = NULL; return -ENOMEM; } if (!blk_get_queue(dev->ctrl.admin_q)) { @@ -2278,11 +2281,10 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) cmd.delete_queue.opcode = opcode; cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); - req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); + req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT); if (IS_ERR(req)) return PTR_ERR(req); - req->timeout = ADMIN_TIMEOUT; req->end_io_data = nvmeq; init_completion(&nvmeq->delete_done); @@ -2624,6 +2626,8 @@ static void nvme_reset_work(struct work_struct *work) if (result) goto out_unlock; + dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1); + /* * Limit the max command size to prevent iod->sg allocations going * over a single page. @@ -2636,7 +2640,6 @@ static void nvme_reset_work(struct work_struct *work) * Don't limit the IOMMU merged segment size. */ dma_set_max_seg_size(dev->dev, 0xffffffff); - dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1); mutex_unlock(&dev->shutdown_lock); @@ -3212,7 +3215,10 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS | - NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + NVME_QUIRK_DISABLE_WRITE_ZEROES | + NVME_QUIRK_BOGUS_NID, }, + { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ @@ -3231,7 +3237,8 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_DISABLE_WRITE_ZEROES| NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ - .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | + NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, @@ -3242,10 +3249,15 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ .driver_data = NVME_QUIRK_LIGHTNVM, }, { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ - .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | + NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */ + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN }, + { PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ @@ -3262,7 +3274,6 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_128_BYTES_SQES | NVME_QUIRK_SHARED_TAGS | NVME_QUIRK_SKIP_CID_GEN }, - { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { 0, } }; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 8eacc9bd58f5..b61924394032 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1057,6 +1057,14 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, } } +static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl) +{ + struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); + + cancel_work_sync(&ctrl->err_work); + cancel_delayed_work_sync(&ctrl->reconnect_work); +} + static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); @@ -2236,9 +2244,6 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) { - cancel_work_sync(&ctrl->err_work); - cancel_delayed_work_sync(&ctrl->reconnect_work); - nvme_rdma_teardown_io_queues(ctrl, shutdown); blk_mq_quiesce_queue(ctrl->ctrl.admin_q); if (shutdown) @@ -2288,6 +2293,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { .submit_async_event = nvme_rdma_submit_async_event, .delete_ctrl = nvme_rdma_delete_ctrl, .get_address = nvmf_get_address, + .stop_ctrl = nvme_rdma_stop_ctrl, }; /* diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 7e3932033707..57df87def8c3 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -118,7 +118,6 @@ struct nvme_tcp_queue { struct mutex send_mutex; struct llist_head req_list; struct list_head send_list; - bool more_requests; /* recv state */ void *pdu; @@ -314,7 +313,7 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) { return !list_empty(&queue->send_list) || - !llist_empty(&queue->req_list) || queue->more_requests; + !llist_empty(&queue->req_list); } static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, @@ -333,9 +332,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, */ if (queue->io_cpu == raw_smp_processor_id() && sync && empty && mutex_trylock(&queue->send_mutex)) { - queue->more_requests = !last; nvme_tcp_send_all(queue); - queue->more_requests = false; mutex_unlock(&queue->send_mutex); } @@ -1149,8 +1146,7 @@ done: } else if (ret < 0) { dev_err(queue->ctrl->ctrl.device, "failed to send request %d\n", ret); - if (ret != -EPIPE && ret != -ECONNRESET) - nvme_tcp_fail_request(queue->request); + nvme_tcp_fail_request(queue->request); nvme_tcp_done_send_req(queue); } return ret; @@ -1197,7 +1193,7 @@ static void nvme_tcp_io_work(struct work_struct *w) else if (unlikely(result < 0)) return; - if (!pending) + if (!pending || !queue->rd_enabled) return; } while (!time_after(jiffies, deadline)); /* quota is exhausted */ @@ -2136,9 +2132,6 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work) static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) { - cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); - cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); - nvme_tcp_teardown_io_queues(ctrl, shutdown); blk_mq_quiesce_queue(ctrl->admin_q); if (shutdown) @@ -2178,6 +2171,12 @@ out_fail: nvme_tcp_reconnect_or_remove(ctrl); } +static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl) +{ + cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); + cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); +} + static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); @@ -2500,6 +2499,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = { .submit_async_event = nvme_tcp_submit_async_event, .delete_ctrl = nvme_tcp_delete_ctrl, .get_address = nvmf_get_address, + .stop_ctrl = nvme_tcp_stop_ctrl, }; static bool diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h index 35bac7a25422..aa8b0f86b2be 100644 --- a/drivers/nvme/host/trace.h +++ b/drivers/nvme/host/trace.h @@ -98,7 +98,7 @@ TRACE_EVENT(nvme_complete_rq, TP_fast_assign( __entry->ctrl_id = nvme_req(req)->ctrl->instance; __entry->qid = nvme_req_qid(req); - __entry->cid = req->tag; + __entry->cid = nvme_req(req)->cmd->common.command_id; __entry->result = le64_to_cpu(nvme_req(req)->result.u64); __entry->retries = nvme_req(req)->retries; __entry->flags = nvme_req(req)->flags; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index f0f9d9007d49..fa04d976bbe7 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -730,6 +730,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status) static void __nvmet_req_complete(struct nvmet_req *req, u16 status) { + struct nvmet_ns *ns = req->ns; + if (!req->sq->sqhd_disabled) nvmet_update_sq_head(req); req->cqe->sq_id = cpu_to_le16(req->sq->qid); @@ -740,9 +742,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) trace_nvmet_req_complete(req); - if (req->ns) - nvmet_put_namespace(req->ns); req->ops->queue_response(req); + if (ns) + nvmet_put_namespace(ns); } void nvmet_req_complete(struct nvmet_req *req, u16 status) diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 8ee94f056898..d24251ece502 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -244,7 +244,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) q = ns->queue; } - rq = nvme_alloc_request(q, req->cmd, 0, NVME_QID_ANY); + rq = nvme_alloc_request(q, req->cmd, 0); if (IS_ERR(rq)) { status = NVME_SC_INTERNAL; goto out_put_ns; diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 96b67a70cbbb..2ddbd4f4f628 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -922,10 +922,17 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) struct nvme_tcp_data_pdu *data = &queue->pdu.data; struct nvmet_tcp_cmd *cmd; - if (likely(queue->nr_cmds)) + if (likely(queue->nr_cmds)) { + if (unlikely(data->ttag >= queue->nr_cmds)) { + pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n", + queue->idx, data->ttag, queue->nr_cmds); + nvmet_tcp_fatal_error(queue); + return -EPROTO; + } cmd = &queue->cmds[data->ttag]; - else + } else { cmd = &queue->connect; + } if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { pr_err("ttag %u unexpected data offset %u (expected %u)\n", @@ -1471,6 +1478,9 @@ static void nvmet_tcp_state_change(struct sock *sk) goto done; switch (sk->sk_state) { + case TCP_FIN_WAIT2: + case TCP_LAST_ACK: + break; case TCP_FIN_WAIT1: case TCP_CLOSE_WAIT: case TCP_CLOSE: @@ -1802,7 +1812,8 @@ static int __init nvmet_tcp_init(void) { int ret; - nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0); + nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", + WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); if (!nvmet_tcp_wq) return -ENOMEM; diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 74d2bd7a6550..93ec9367ad28 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -315,7 +315,7 @@ static int unflatten_dt_nodes(const void *blob, for (offset = 0; offset >= 0 && depth >= initial_depth; offset = fdt_next_node(blob, offset, &depth)) { - if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH)) + if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1)) continue; if (!IS_ENABLED(CONFIG_OF_KOBJ) && @@ -1181,6 +1181,9 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, if (memblock_is_region_reserved(base, size)) return -EBUSY; + if (memblock_is_nomap_remove()) + return memblock_remove(base, size); + return memblock_mark_nomap(base, size); } return memblock_reserve(base, size); diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 43a77d720008..c8a0c0e9dec1 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -170,9 +170,7 @@ static int overlay_notify(struct overlay_changeset *ovcs, ret = blocking_notifier_call_chain(&overlay_notify_chain, action, &nd); - if (ret == NOTIFY_OK || ret == NOTIFY_STOP) - return 0; - if (ret) { + if (notifier_to_errno(ret)) { ret = notifier_to_errno(ret); pr_err("overlay changeset %s notifier error %d, target: %pOF\n", of_overlay_action_name[action], ret, nd.target); diff --git a/drivers/of/property.c b/drivers/of/property.c index 78427c85bef3..12fec0e1b73d 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -957,8 +957,10 @@ of_fwnode_get_reference_args(const struct fwnode_handle *fwnode, nargs, index, &of_args); if (ret < 0) return ret; - if (!args) + if (!args) { + of_node_put(of_args.np); return 0; + } args->nargs = of_args.args_count; args->fwnode = of_fwnode_handle(of_args.np); diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 903b465c8568..7ed605ffb717 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -2052,8 +2052,8 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, } virt_dev = dev_pm_domain_attach_by_name(dev, *name); - if (IS_ERR(virt_dev)) { - ret = PTR_ERR(virt_dev); + if (IS_ERR_OR_NULL(virt_dev)) { + ret = PTR_ERR(virt_dev) ? : -ENODEV; dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret); goto err; } diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 446862368949..b163f5d13910 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -346,11 +346,11 @@ static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) /* Checking only first OPP is sufficient */ np = of_get_next_available_child(opp_np, NULL); + of_node_put(opp_np); if (!np) { dev_err(dev, "OPP table empty\n"); return -EINVAL; } - of_node_put(opp_np); prop = of_find_property(np, "opp-peak-kBps", NULL); of_node_put(np); diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index b916fab9b161..be81b765858b 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1380,15 +1380,17 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr) } } -static void __init ccio_init_resources(struct ioc *ioc) +static int __init ccio_init_resources(struct ioc *ioc) { struct resource *res = ioc->mmio_region; char *name = kmalloc(14, GFP_KERNEL); - + if (unlikely(!name)) + return -ENOMEM; snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path); ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low); ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv); + return 0; } static int new_ioc_area(struct resource *res, unsigned long size, @@ -1543,7 +1545,11 @@ static int __init ccio_probe(struct parisc_device *dev) return -ENOMEM; } ccio_ioc_init(ioc); - ccio_init_resources(ioc); + if (ccio_init_resources(ioc)) { + iounmap(ioc->ioc_regs); + kfree(ioc); + return -ENOMEM; + } hppa_dma_ops = &ccio_ops; hba = kzalloc(sizeof(*hba), GFP_KERNEL); diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 952a92504df6..e33036281327 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -142,9 +142,8 @@ struct dino_device { struct pci_hba_data hba; /* 'C' inheritance - must be first */ spinlock_t dinosaur_pen; - unsigned long txn_addr; /* EIR addr to generate interrupt */ - u32 txn_data; /* EIR data assign to each dino */ u32 imr; /* IRQ's which are enabled */ + struct gsc_irq gsc_irq; int global_irq[DINO_LOCAL_IRQS]; /* map IMR bit to global irq */ #ifdef DINO_DEBUG unsigned int dino_irr0; /* save most recent IRQ line stat */ @@ -339,14 +338,43 @@ static void dino_unmask_irq(struct irq_data *d) if (tmp & DINO_MASK_IRQ(local_irq)) { DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n", __func__, tmp); - gsc_writel(dino_dev->txn_data, dino_dev->txn_addr); + gsc_writel(dino_dev->gsc_irq.txn_data, dino_dev->gsc_irq.txn_addr); } } +#ifdef CONFIG_SMP +static int dino_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, + bool force) +{ + struct dino_device *dino_dev = irq_data_get_irq_chip_data(d); + struct cpumask tmask; + int cpu_irq; + u32 eim; + + if (!cpumask_and(&tmask, dest, cpu_online_mask)) + return -EINVAL; + + cpu_irq = cpu_check_affinity(d, &tmask); + if (cpu_irq < 0) + return cpu_irq; + + dino_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq); + eim = ((u32) dino_dev->gsc_irq.txn_addr) | dino_dev->gsc_irq.txn_data; + __raw_writel(eim, dino_dev->hba.base_addr+DINO_IAR0); + + irq_data_update_effective_affinity(d, &tmask); + + return IRQ_SET_MASK_OK; +} +#endif + static struct irq_chip dino_interrupt_type = { .name = "GSC-PCI", .irq_unmask = dino_unmask_irq, .irq_mask = dino_mask_irq, +#ifdef CONFIG_SMP + .irq_set_affinity = dino_set_affinity_irq, +#endif }; @@ -806,7 +834,6 @@ static int __init dino_common_init(struct parisc_device *dev, { int status; u32 eim; - struct gsc_irq gsc_irq; struct resource *res; pcibios_register_hba(&dino_dev->hba); @@ -821,10 +848,8 @@ static int __init dino_common_init(struct parisc_device *dev, ** still only has 11 IRQ input lines - just map some of them ** to a different processor. */ - dev->irq = gsc_alloc_irq(&gsc_irq); - dino_dev->txn_addr = gsc_irq.txn_addr; - dino_dev->txn_data = gsc_irq.txn_data; - eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; + dev->irq = gsc_alloc_irq(&dino_dev->gsc_irq); + eim = ((u32) dino_dev->gsc_irq.txn_addr) | dino_dev->gsc_irq.txn_data; /* ** Dino needs a PA "IRQ" to get a processor's attention. diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c index ed9371acf37e..ec175ae99873 100644 --- a/drivers/parisc/gsc.c +++ b/drivers/parisc/gsc.c @@ -135,10 +135,41 @@ static void gsc_asic_unmask_irq(struct irq_data *d) */ } +#ifdef CONFIG_SMP +static int gsc_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, + bool force) +{ + struct gsc_asic *gsc_dev = irq_data_get_irq_chip_data(d); + struct cpumask tmask; + int cpu_irq; + + if (!cpumask_and(&tmask, dest, cpu_online_mask)) + return -EINVAL; + + cpu_irq = cpu_check_affinity(d, &tmask); + if (cpu_irq < 0) + return cpu_irq; + + gsc_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq); + gsc_dev->eim = ((u32) gsc_dev->gsc_irq.txn_addr) | gsc_dev->gsc_irq.txn_data; + + /* switch IRQ's for devices below LASI/WAX to other CPU */ + gsc_writel(gsc_dev->eim, gsc_dev->hpa + OFFSET_IAR); + + irq_data_update_effective_affinity(d, &tmask); + + return IRQ_SET_MASK_OK; +} +#endif + + static struct irq_chip gsc_asic_interrupt_type = { .name = "GSC-ASIC", .irq_unmask = gsc_asic_unmask_irq, .irq_mask = gsc_asic_mask_irq, +#ifdef CONFIG_SMP + .irq_set_affinity = gsc_set_affinity_irq, +#endif }; int gsc_assign_irq(struct irq_chip *type, void *data) diff --git a/drivers/parisc/gsc.h b/drivers/parisc/gsc.h index 86abad3fa215..73cbd0bb1975 100644 --- a/drivers/parisc/gsc.h +++ b/drivers/parisc/gsc.h @@ -31,6 +31,7 @@ struct gsc_asic { int version; int type; int eim; + struct gsc_irq gsc_irq; int global_irq[32]; }; diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 8a3b0c3a1e92..fd99735dca3e 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -875,6 +875,7 @@ int iosapic_serial_irq(struct parisc_device *dev) return vi->txn_irq; } +EXPORT_SYMBOL(iosapic_serial_irq); #endif diff --git a/drivers/parisc/lasi.c b/drivers/parisc/lasi.c index 4e4fd12c2112..6ef621adb63a 100644 --- a/drivers/parisc/lasi.c +++ b/drivers/parisc/lasi.c @@ -163,7 +163,6 @@ static int __init lasi_init_chip(struct parisc_device *dev) { extern void (*chassis_power_off)(void); struct gsc_asic *lasi; - struct gsc_irq gsc_irq; int ret; lasi = kzalloc(sizeof(*lasi), GFP_KERNEL); @@ -185,7 +184,7 @@ static int __init lasi_init_chip(struct parisc_device *dev) lasi_init_irq(lasi); /* the IRQ lasi should use */ - dev->irq = gsc_alloc_irq(&gsc_irq); + dev->irq = gsc_alloc_irq(&lasi->gsc_irq); if (dev->irq < 0) { printk(KERN_ERR "%s(): cannot get GSC irq\n", __func__); @@ -193,9 +192,9 @@ static int __init lasi_init_chip(struct parisc_device *dev) return -EBUSY; } - lasi->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; + lasi->eim = ((u32) lasi->gsc_irq.txn_addr) | lasi->gsc_irq.txn_data; - ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi); + ret = request_irq(lasi->gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi); if (ret < 0) { kfree(lasi); return ret; diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index 732b516c7bf8..afc6e66ddc31 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -1476,9 +1476,13 @@ lba_driver_probe(struct parisc_device *dev) u32 func_class; void *tmp_obj; char *version; - void __iomem *addr = ioremap(dev->hpa.start, 4096); + void __iomem *addr; int max; + addr = ioremap(dev->hpa.start, 4096); + if (addr == NULL) + return -ENOMEM; + /* Read HW Rev First */ func_class = READ_REG32(addr + LBA_FCLASS); diff --git a/drivers/parisc/wax.c b/drivers/parisc/wax.c index 5b6df1516235..73a2b01f8d9c 100644 --- a/drivers/parisc/wax.c +++ b/drivers/parisc/wax.c @@ -68,7 +68,6 @@ static int __init wax_init_chip(struct parisc_device *dev) { struct gsc_asic *wax; struct parisc_device *parent; - struct gsc_irq gsc_irq; int ret; wax = kzalloc(sizeof(*wax), GFP_KERNEL); @@ -85,7 +84,7 @@ static int __init wax_init_chip(struct parisc_device *dev) wax_init_irq(wax); /* the IRQ wax should use */ - dev->irq = gsc_claim_irq(&gsc_irq, WAX_GSC_IRQ); + dev->irq = gsc_claim_irq(&wax->gsc_irq, WAX_GSC_IRQ); if (dev->irq < 0) { printk(KERN_ERR "%s(): cannot get GSC irq\n", __func__); @@ -93,9 +92,9 @@ static int __init wax_init_chip(struct parisc_device *dev) return -EBUSY; } - wax->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; + wax->eim = ((u32) wax->gsc_irq.txn_addr) | wax->gsc_irq.txn_data; - ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "wax", wax); + ret = request_irq(wax->gsc_irq.irq, gsc_asic_intr, 0, "wax", wax); if (ret < 0) { kfree(wax); return ret; diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index eda4ded4d5e5..925be41eeebe 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -468,7 +468,7 @@ static size_t parport_pc_fifo_write_block_pio(struct parport *port, const unsigned char *bufp = buf; size_t left = length; unsigned long expire = jiffies + port->physport->cad->timeout; - const int fifo = FIFO(port); + const unsigned long fifo = FIFO(port); int poll_for = 8; /* 80 usecs */ const struct parport_pc_private *priv = port->physport->private_data; const int fifo_depth = priv->fifo_depth; diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index 1af14474abcf..4c5e6349d78c 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -154,8 +154,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr, struct cdns_pcie *pcie = &ep->pcie; u32 r; - r = find_first_zero_bit(&ep->ob_region_map, - sizeof(ep->ob_region_map) * BITS_PER_LONG); + r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG); if (r >= ep->max_regions - 1) { dev_err(&epc->dev, "no free outbound region\n"); return -EINVAL; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 5cf1ef12fb9b..ceb4815379cd 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -401,6 +401,11 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) dev_err(dev, "failed to disable vpcie regulator: %d\n", ret); } + + /* Some boards don't have PCIe reset GPIO. */ + if (gpio_is_valid(imx6_pcie->reset_gpio)) + gpio_set_value_cansleep(imx6_pcie->reset_gpio, + imx6_pcie->gpio_active_high); } static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie) @@ -523,15 +528,6 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) /* allow the clocks to stabilize */ usleep_range(200, 500); - /* Some boards don't have PCIe reset GPIO. */ - if (gpio_is_valid(imx6_pcie->reset_gpio)) { - gpio_set_value_cansleep(imx6_pcie->reset_gpio, - imx6_pcie->gpio_active_high); - msleep(100); - gpio_set_value_cansleep(imx6_pcie->reset_gpio, - !imx6_pcie->gpio_active_high); - } - switch (imx6_pcie->drvdata->variant) { case IMX8MQ: reset_control_deassert(imx6_pcie->pciephy_reset); @@ -574,6 +570,15 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) break; } + /* Some boards don't have PCIe reset GPIO. */ + if (gpio_is_valid(imx6_pcie->reset_gpio)) { + msleep(100); + gpio_set_value_cansleep(imx6_pcie->reset_gpio, + !imx6_pcie->gpio_active_high); + /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ + msleep(100); + } + return; err_ref_clk: diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index dde9d6fde54f..a483b2bae0d3 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -777,8 +777,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, epc->mem->window.page_size); if (!ep->msi_mem) { + ret = -ENOMEM; dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); - return -ENOMEM; + goto err_exit_epc_mem; } if (ep->ops->get_features) { @@ -787,6 +788,19 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) return 0; } - return dw_pcie_ep_init_complete(ep); + ret = dw_pcie_ep_init_complete(ep); + if (ret) + goto err_free_epc_mem; + + return 0; + +err_free_epc_mem: + pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, + epc->mem->window.page_size); + +err_exit_epc_mem: + pci_epc_mem_exit(epc); + + return ret; } EXPORT_SYMBOL_GPL(dw_pcie_ep_init); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 34816e514d79..1cb04f4e809f 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -393,7 +393,8 @@ int dw_pcie_host_init(struct pcie_port *pp) sizeof(pp->msi_msg), DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); - if (dma_mapping_error(pci->dev, pp->msi_data)) { + ret = dma_mapping_error(pci->dev, pp->msi_data); + if (ret) { dev_err(pci->dev, "Failed to map MSI data\n"); pp->msi_data = 0; goto err_free_msi; @@ -479,7 +480,7 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn, ret = pci_generic_config_read(bus, devfn, where, size, val); - if (!ret && pci->io_cfg_atu_shared) + if (!ret && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED)) dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base, pp->io_bus_addr, pp->io_size); @@ -495,7 +496,7 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn, ret = pci_generic_config_write(bus, devfn, where, size, val); - if (!ret && pci->io_cfg_atu_shared) + if (!ret && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED)) dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base, pp->io_bus_addr, pp->io_size); @@ -606,7 +607,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp) PCIE_ATU_TYPE_IO, pp->io_base, pp->io_bus_addr, pp->io_size); else - pci->io_cfg_atu_shared = true; + pci->iatu_unroll_enabled |= DWC_IATU_IOCFG_SHARED; } if (pci->num_viewport <= atu_idx) diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index c85e128bc0ea..daa88851366a 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -274,7 +274,7 @@ static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no, if (pci->ops->cpu_addr_fixup) cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); - if (pci->iatu_unroll_enabled) { + if (pci->iatu_unroll_enabled & DWC_IATU_UNROLL_EN) { dw_pcie_prog_outbound_atu_unroll(pci, func_no, index, type, cpu_addr, pci_addr, size); return; @@ -394,7 +394,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, int type; u32 retries, val; - if (pci->iatu_unroll_enabled) + if (pci->iatu_unroll_enabled & DWC_IATU_UNROLL_EN) return dw_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar, cpu_addr, as_type); @@ -439,7 +439,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, void dw_pcie_disable_atu(struct dw_pcie *pci, int index, enum dw_pcie_region_type type) { - int region; + u32 region; switch (type) { case DW_PCIE_REGION_INBOUND: @@ -452,8 +452,18 @@ void dw_pcie_disable_atu(struct dw_pcie *pci, int index, return; } - dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); - dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE); + if (pci->iatu_unroll_enabled) { + if (region == PCIE_ATU_REGION_INBOUND) { + dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, + ~(u32)PCIE_ATU_ENABLE); + } else { + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, + ~(u32)PCIE_ATU_ENABLE); + } + } else { + dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); + dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE); + } } int dw_pcie_wait_for_link(struct dw_pcie *pci) @@ -554,14 +564,15 @@ void dw_pcie_setup(struct dw_pcie *pci) if (pci->version >= 0x480A || (!pci->version && dw_pcie_iatu_unroll_enabled(pci))) { - pci->iatu_unroll_enabled = true; + pci->iatu_unroll_enabled |= DWC_IATU_UNROLL_EN; if (!pci->atu_base) pci->atu_base = devm_platform_ioremap_resource_byname(pdev, "atu"); if (IS_ERR(pci->atu_base)) pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; } - dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ? + dev_dbg(pci->dev, "iATU unroll: %s\n", + pci->iatu_unroll_enabled & DWC_IATU_UNROLL_EN ? "enabled" : "disabled"); if (pci->link_gen > 0) @@ -589,6 +600,13 @@ void dw_pcie_setup(struct dw_pcie *pci) val |= PORT_LINK_DLL_LINK_EN; dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); + if (of_property_read_bool(np, "snps,enable-cdm-check")) { + val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); + val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | + PCIE_PL_CHK_REG_CHK_REG_START; + dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); + } + of_property_read_u32(np, "num-lanes", &pci->num_lanes); if (!pci->num_lanes) { dev_dbg(pci->dev, "Using h/w default number of lanes\n"); @@ -635,11 +653,4 @@ void dw_pcie_setup(struct dw_pcie *pci) break; } dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); - - if (of_property_read_bool(np, "snps,enable-cdm-check")) { - val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); - val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | - PCIE_PL_CHK_REG_CHK_REG_START; - dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); - } } diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 5a2f57b7299c..8363f77b2f26 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -257,6 +257,8 @@ struct dw_pcie_ops { void (*stop_link)(struct dw_pcie *pcie); }; +#define DWC_IATU_UNROLL_EN BIT(0) +#define DWC_IATU_IOCFG_SHARED BIT(1) struct dw_pcie { struct device *dev; void __iomem *dbi_base; @@ -264,6 +266,7 @@ struct dw_pcie { /* Used when iatu_unroll_enabled is true */ void __iomem *atu_base; u32 num_viewport; + u8 iatu_unroll_enabled; struct pcie_port pp; struct dw_pcie_ep ep; const struct dw_pcie_ops *ops; @@ -271,8 +274,6 @@ struct dw_pcie { int num_lanes; int link_gen; u8 n_fts[2]; - bool iatu_unroll_enabled: 1; - bool io_cfg_atu_shared: 1; }; #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 557554f53ce9..5fbd80908a99 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -320,8 +320,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) reset_control_assert(res->ext_reset); reset_control_assert(res->phy_reset); - writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); - ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); if (ret < 0) { dev_err(dev, "cannot enable regulators\n"); @@ -364,15 +362,15 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) goto err_deassert_axi; } - ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); - if (ret) - goto err_clks; - /* enable PCIe clocks and resets */ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); val &= ~BIT(0); writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); + if (ret) + goto err_clks; + if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | @@ -1192,12 +1190,6 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) goto err_disable_clocks; } - ret = clk_prepare_enable(res->pipe_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable pipe clock\n"); - goto err_disable_clocks; - } - /* configure PCIe to RC mode */ writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); @@ -1443,22 +1435,21 @@ static int qcom_pcie_probe(struct platform_device *pdev) } ret = phy_init(pcie->phy); - if (ret) { - pm_runtime_disable(&pdev->dev); + if (ret) goto err_pm_runtime_put; - } platform_set_drvdata(pdev, pcie); ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "cannot initialize host\n"); - pm_runtime_disable(&pdev->dev); - goto err_pm_runtime_put; + goto err_phy_exit; } return 0; +err_phy_exit: + phy_exit(pcie->phy); err_pm_runtime_put: pm_runtime_put(dev); pm_runtime_disable(dev); diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index a5b677ec0769..1222f5749bc6 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -370,15 +370,14 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) struct tegra_pcie_dw *pcie = arg; struct dw_pcie *pci = &pcie->pci; struct pcie_port *pp = &pci->pp; - u32 val, tmp; + u32 val, status_l0, status_l1; u16 val_w; - val = appl_readl(pcie, APPL_INTR_STATUS_L0); - if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) { - val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); - if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) { - appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0); - + status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0); + if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) { + status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); + appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0); + if (status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) { /* SBR & Surprise Link Down WAR */ val = appl_readl(pcie, APPL_CAR_RESET_OVRD); val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; @@ -394,15 +393,15 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) } } - if (val & APPL_INTR_STATUS_L0_INT_INT) { - val = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0); - if (val & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) { + if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) { + status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0); + if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) { appl_writel(pcie, APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS, APPL_INTR_STATUS_L1_8_0); apply_bad_link_workaround(pp); } - if (val & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) { + if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) { appl_writel(pcie, APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS, APPL_INTR_STATUS_L1_8_0); @@ -414,25 +413,24 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) } } - val = appl_readl(pcie, APPL_INTR_STATUS_L0); - if (val & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) { - val = appl_readl(pcie, APPL_INTR_STATUS_L1_18); - tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); - if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) { + if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) { + status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18); + val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); + if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) { dev_info(pci->dev, "CDM check complete\n"); - tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE; + val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE; } - if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) { + if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) { dev_err(pci->dev, "CDM comparison mismatch\n"); - tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR; + val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR; } - if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) { + if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) { dev_err(pci->dev, "CDM Logic error\n"); - tmp |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR; + val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR; } - dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, tmp); - tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR); - dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", tmp); + dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); + val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR); + dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val); } return IRQ_HANDLED; @@ -965,7 +963,7 @@ static int tegra_pcie_dw_host_init(struct pcie_port *pp) offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF); val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP); val &= ~PCI_DLF_EXCHANGE_ENABLE; - dw_pcie_writel_dbi(pci, offset, val); + dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val); tegra_pcie_prepare_host(pp); @@ -1970,6 +1968,7 @@ static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie, if (ret) { dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n", ret); + pm_runtime_disable(dev); return ret; } diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 49ff8bf10c74..0c603e069f21 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -114,6 +114,7 @@ #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C) +#define PCIE_MSI_ALL_MASK GENMASK(31, 0) #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C) #define PCIE_MSI_DATA_MASK GENMASK(15, 0) @@ -577,6 +578,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); /* Clear all interrupts */ + advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG); advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); @@ -589,7 +591,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); /* Unmask all MSIs */ - advk_writel(pcie, 0, PCIE_MSI_MASK_REG); + advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); /* Enable summary interrupt for GIC SPI source */ reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); @@ -1186,7 +1188,7 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data, msg->address_lo = lower_32_bits(msi_msg); msg->address_hi = upper_32_bits(msi_msg); - msg->data = data->irq; + msg->data = data->hwirq; } static int advk_msi_set_affinity(struct irq_data *irq_data, @@ -1203,15 +1205,11 @@ static int advk_msi_irq_domain_alloc(struct irq_domain *domain, int hwirq, i; mutex_lock(&pcie->msi_used_lock); - hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM, - 0, nr_irqs, 0); - if (hwirq >= MSI_IRQ_NUM) { - mutex_unlock(&pcie->msi_used_lock); - return -ENOSPC; - } - - bitmap_set(pcie->msi_used, hwirq, nr_irqs); + hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM, + order_base_2(nr_irqs)); mutex_unlock(&pcie->msi_used_lock); + if (hwirq < 0) + return -ENOSPC; for (i = 0; i < nr_irqs; i++) irq_domain_set_info(domain, virq + i, hwirq + i, @@ -1229,7 +1227,7 @@ static void advk_msi_irq_domain_free(struct irq_domain *domain, struct advk_pcie *pcie = domain->host_data; mutex_lock(&pcie->msi_used_lock); - bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs); + bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs)); mutex_unlock(&pcie->msi_used_lock); } @@ -1390,23 +1388,19 @@ static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie) static void advk_pcie_handle_msi(struct advk_pcie *pcie) { u32 msi_val, msi_mask, msi_status, msi_idx; - u16 msi_data; + int virq; msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG); msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG); - msi_status = msi_val & ~msi_mask; + msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK); for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) { if (!(BIT(msi_idx) & msi_status)) continue; - /* - * msi_idx contains bits [4:0] of the msi_data and msi_data - * contains 16bit MSI interrupt number - */ advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG); - msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & PCIE_MSI_DATA_MASK; - generic_handle_irq(msi_data); + virq = irq_find_mapping(pcie->msi_inner_domain, msi_idx); + generic_handle_irq(virq); } advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING, diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index a070e69bb49c..4353443b89d8 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -1118,6 +1118,10 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev, u8 buffer[sizeof(struct pci_delete_interrupt)]; } ctxt; + if (!int_desc->vector_count) { + kfree(int_desc); + return; + } memset(&ctxt, 0, sizeof(ctxt)); int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; int_pkt->message_type.type = @@ -1180,6 +1184,28 @@ static void hv_irq_mask(struct irq_data *data) pci_msi_mask_irq(data); } +static unsigned int hv_msi_get_int_vector(struct irq_data *data) +{ + struct irq_cfg *cfg = irqd_cfg(data); + + return cfg->vector; +} + +static int hv_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + int ret = pci_msi_prepare(domain, dev, nvec, info); + + /* + * By using the interrupt remapper in the hypervisor IOMMU, contiguous + * CPU vectors is not needed for multi-MSI + */ + if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) + info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; + + return ret; +} + /** * hv_irq_unmask() - "Unmask" the IRQ by setting its current * affinity. @@ -1195,6 +1221,7 @@ static void hv_irq_unmask(struct irq_data *data) struct msi_desc *msi_desc = irq_data_get_msi_desc(data); struct irq_cfg *cfg = irqd_cfg(data); struct hv_retarget_device_interrupt *params; + struct tran_int_desc *int_desc; struct hv_pcibus_device *hbus; struct cpumask *dest; cpumask_var_t tmp; @@ -1209,6 +1236,7 @@ static void hv_irq_unmask(struct irq_data *data) pdev = msi_desc_to_pci_dev(msi_desc); pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); + int_desc = data->chip_data; spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags); @@ -1216,7 +1244,8 @@ static void hv_irq_unmask(struct irq_data *data) memset(params, 0, sizeof(*params)); params->partition_id = HV_PARTITION_ID_SELF; params->int_entry.source = 1; /* MSI(-X) */ - hv_set_msi_entry_from_desc(¶ms->int_entry.msi_entry, msi_desc); + params->int_entry.msi_entry.address = int_desc->address & 0xffffffff; + params->int_entry.msi_entry.data = int_desc->data; params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | (hbus->hdev->dev_instance.b[4] << 16) | (hbus->hdev->dev_instance.b[7] << 8) | @@ -1317,12 +1346,12 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp, static u32 hv_compose_msi_req_v1( struct pci_create_interrupt *int_pkt, struct cpumask *affinity, - u32 slot, u8 vector) + u32 slot, u8 vector, u8 vector_count) { int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; - int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.vector_count = vector_count; int_pkt->int_desc.delivery_mode = dest_Fixed; /* @@ -1336,14 +1365,14 @@ static u32 hv_compose_msi_req_v1( static u32 hv_compose_msi_req_v2( struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, - u32 slot, u8 vector) + u32 slot, u8 vector, u8 vector_count) { int cpu; int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2; int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; - int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.vector_count = vector_count; int_pkt->int_desc.delivery_mode = dest_Fixed; /* @@ -1371,7 +1400,6 @@ static u32 hv_compose_msi_req_v2( */ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { - struct irq_cfg *cfg = irqd_cfg(data); struct hv_pcibus_device *hbus; struct vmbus_channel *channel; struct hv_pci_dev *hpdev; @@ -1380,6 +1408,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct cpumask *dest; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; + struct msi_desc *msi_desc; + u8 vector, vector_count; struct { struct pci_packet pci_pkt; union { @@ -1391,7 +1421,17 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) u32 size; int ret; - pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); + /* Reuse the previous allocation */ + if (data->chip_data) { + int_desc = data->chip_data; + msg->address_hi = int_desc->address >> 32; + msg->address_lo = int_desc->address & 0xffffffff; + msg->data = int_desc->data; + return; + } + + msi_desc = irq_data_get_msi_desc(data); + pdev = msi_desc_to_pci_dev(msi_desc); dest = irq_data_get_effective_affinity_mask(data); pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); @@ -1400,17 +1440,40 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) if (!hpdev) goto return_null_message; - /* Free any previous message that might have already been composed. */ - if (data->chip_data) { - int_desc = data->chip_data; - data->chip_data = NULL; - hv_int_desc_free(hpdev, int_desc); - } - int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC); if (!int_desc) goto drop_reference; + if (!msi_desc->msi_attrib.is_msix && msi_desc->nvec_used > 1) { + /* + * If this is not the first MSI of Multi MSI, we already have + * a mapping. Can exit early. + */ + if (msi_desc->irq != data->irq) { + data->chip_data = int_desc; + int_desc->address = msi_desc->msg.address_lo | + (u64)msi_desc->msg.address_hi << 32; + int_desc->data = msi_desc->msg.data + + (data->irq - msi_desc->irq); + msg->address_hi = msi_desc->msg.address_hi; + msg->address_lo = msi_desc->msg.address_lo; + msg->data = int_desc->data; + put_pcichild(hpdev); + return; + } + /* + * The vector we select here is a dummy value. The correct + * value gets sent to the hypervisor in unmask(). This needs + * to be aligned with the count, and also not zero. Multi-msi + * is powers of 2 up to 32, so 32 will always work here. + */ + vector = 32; + vector_count = msi_desc->nvec_used; + } else { + vector = hv_msi_get_int_vector(data); + vector_count = 1; + } + memset(&ctxt, 0, sizeof(ctxt)); init_completion(&comp.comp_pkt.host_event); ctxt.pci_pkt.completion_func = hv_pci_compose_compl; @@ -1421,7 +1484,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, dest, hpdev->desc.win_slot.slot, - cfg->vector); + vector, + vector_count); break; case PCI_PROTOCOL_VERSION_1_2: @@ -1429,7 +1493,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, dest, hpdev->desc.win_slot.slot, - cfg->vector); + vector, + vector_count); break; default: @@ -1545,7 +1610,7 @@ static struct irq_chip hv_msi_irq_chip = { }; static struct msi_domain_ops hv_msi_ops = { - .msi_prepare = pci_msi_prepare, + .msi_prepare = hv_msi_prepare, .msi_free = hv_msi_free, }; diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index 7631dc3961c1..379cde59988c 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -264,8 +264,7 @@ static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, struct rockchip_pcie *pcie = &ep->rockchip; u32 r; - r = find_first_zero_bit(&ep->ob_region_map, - sizeof(ep->ob_region_map) * BITS_PER_LONG); + r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG); /* * Region 0 is reserved for configuration space and shouldn't * be used elsewhere per TRM, so leave it out. diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index d41570715dc7..ddfeca9016a0 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -285,7 +285,17 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test) if (ret) dev_err(dev, "Data transfer failed\n"); } else { - memcpy(dst_addr, src_addr, reg->size); + void *buf; + + buf = kzalloc(reg->size, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto err_map_addr; + } + + memcpy_fromio(buf, src_addr, reg->size); + memcpy_toio(dst_addr, buf, reg->size); + kfree(buf); } ktime_get_ts64(&end); pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma); @@ -441,7 +451,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test) if (!epf_test->dma_supported) { dev_err(dev, "Cannot transfer data using DMA\n"); ret = -EINVAL; - goto err_map_addr; + goto err_dma_map; } src_phys_addr = dma_map_single(dma_dev, buf, reg->size, @@ -613,7 +623,6 @@ static void pci_epf_test_unbind(struct pci_epf *epf) cancel_delayed_work(&epf_test->cmd_handler); pci_epf_test_clean_dma_chan(epf_test); - pci_epc_stop(epc); for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { epf_bar = &epf->bar[bar]; diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index af4c4cc837fc..dda952357747 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -1060,6 +1060,8 @@ static void quirk_cmd_compl(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110, + PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400, PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401, diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 9028493dd5c9..c909cc72f801 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -2837,6 +2837,18 @@ static const struct dmi_system_id bridge_d3_blacklist[] = { DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"), }, }, + { + /* + * Downstream device is not accessible after putting a root port + * into D3cold and back into D0 on Elo i2. + */ + .ident = "Elo i2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"), + DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"), + DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"), + }, + }, #endif { } }; @@ -4972,18 +4984,18 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe) static void pci_dev_lock(struct pci_dev *dev) { - pci_cfg_access_lock(dev); /* block PM suspend, driver probe, etc. */ device_lock(&dev->dev); + pci_cfg_access_lock(dev); } /* Return 1 on successful lock, 0 on contention */ static int pci_dev_trylock(struct pci_dev *dev) { - if (pci_cfg_access_trylock(dev)) { - if (device_trylock(&dev->dev)) + if (device_trylock(&dev->dev)) { + if (pci_cfg_access_trylock(dev)) return 1; - pci_cfg_access_unlock(dev); + device_unlock(&dev->dev); } return 0; @@ -4991,8 +5003,8 @@ static int pci_dev_trylock(struct pci_dev *dev) static void pci_dev_unlock(struct pci_dev *dev) { - device_unlock(&dev->dev); pci_cfg_access_unlock(dev); + device_unlock(&dev->dev); } static void pci_dev_save_and_disable(struct pci_dev *dev) diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 197075f71fd6..4e38fdb61144 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -565,8 +565,8 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev) /* PCI error reporting and recovery */ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, - pci_channel_state_t state, - pci_ers_result_t (*reset_link)(struct pci_dev *pdev)); + pci_channel_state_t state, + pci_ers_result_t (*reset_subordinates)(struct pci_dev *pdev)); bool pcie_wait_for_link(struct pci_dev *pdev, bool active); #ifdef CONFIG_PCIEASPM @@ -591,11 +591,8 @@ static inline void pcie_ecrc_get_policy(char *str) { } #ifdef CONFIG_PCIE_PTM void pci_ptm_init(struct pci_dev *dev); -int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); #else static inline void pci_ptm_init(struct pci_dev *dev) { } -static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) -{ return -EINVAL; } #endif struct pci_dev_reset_methods { diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index 65dff5f3457a..9564b74003f0 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -101,6 +101,11 @@ struct aer_stats { #define ERR_COR_ID(d) (d & 0xffff) #define ERR_UNCOR_ID(d) (d >> 16) +#define AER_ERR_STATUS_MASK (PCI_ERR_ROOT_UNCOR_RCV | \ + PCI_ERR_ROOT_COR_RCV | \ + PCI_ERR_ROOT_MULTI_COR_RCV | \ + PCI_ERR_ROOT_MULTI_UNCOR_RCV) + static int pcie_aer_disable; static pci_ers_result_t aer_root_reset(struct pci_dev *dev); @@ -300,7 +305,8 @@ int pci_aer_raw_clear_status(struct pci_dev *dev) return -EIO; port_type = pci_pcie_type(dev); - if (port_type == PCI_EXP_TYPE_ROOT_PORT) { + if (port_type == PCI_EXP_TYPE_ROOT_PORT || + port_type == PCI_EXP_TYPE_RC_EC) { pci_read_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, &status); pci_write_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, status); } @@ -532,7 +538,7 @@ static const char *aer_agent_string[] = { struct pci_dev *pdev = to_pci_dev(dev); \ u64 *stats = pdev->aer_stats->stats_array; \ \ - for (i = 0; i < ARRAY_SIZE(strings_array); i++) { \ + for (i = 0; i < ARRAY_SIZE(pdev->aer_stats->stats_array); i++) {\ if (strings_array[i]) \ str += sprintf(str, "%s %llu\n", \ strings_array[i], stats[i]); \ @@ -595,7 +601,8 @@ static umode_t aer_stats_attrs_are_visible(struct kobject *kobj, if ((a == &dev_attr_aer_rootport_total_err_cor.attr || a == &dev_attr_aer_rootport_total_err_fatal.attr || a == &dev_attr_aer_rootport_total_err_nonfatal.attr) && - pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) + ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) && + (pci_pcie_type(pdev) != PCI_EXP_TYPE_RC_EC))) return 0; return a->mode; @@ -1034,6 +1041,7 @@ EXPORT_SYMBOL_GPL(aer_recover_queue); */ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) { + int type = pci_pcie_type(dev); int aer = dev->aer_cap; int temp; @@ -1052,8 +1060,8 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) &info->mask); if (!(info->status & ~info->mask)) return 0; - } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || - pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM || + } else if (type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_DOWNSTREAM || info->severity == AER_NONFATAL) { /* Link is still healthy for IO reads */ @@ -1187,7 +1195,7 @@ static irqreturn_t aer_irq(int irq, void *context) struct aer_err_source e_src = {}; pci_read_config_dword(rp, aer + PCI_ERR_ROOT_STATUS, &e_src.status); - if (!(e_src.status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) + if (!(e_src.status & AER_ERR_STATUS_MASK)) return IRQ_NONE; pci_read_config_dword(rp, aer + PCI_ERR_ROOT_ERR_SRC, &e_src.id); @@ -1205,6 +1213,7 @@ static int set_device_error_reporting(struct pci_dev *dev, void *data) int type = pci_pcie_type(dev); if ((type == PCI_EXP_TYPE_ROOT_PORT) || + (type == PCI_EXP_TYPE_RC_EC) || (type == PCI_EXP_TYPE_UPSTREAM) || (type == PCI_EXP_TYPE_DOWNSTREAM)) { if (enable) @@ -1329,6 +1338,16 @@ static int aer_probe(struct pcie_device *dev) struct device *device = &dev->device; struct pci_dev *port = dev->port; + BUILD_BUG_ON(ARRAY_SIZE(aer_correctable_error_string) < + AER_MAX_TYPEOF_COR_ERRS); + BUILD_BUG_ON(ARRAY_SIZE(aer_uncorrectable_error_string) < + AER_MAX_TYPEOF_UNCOR_ERRS); + + /* Limit to Root Ports or Root Complex Event Collectors */ + if ((pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC) && + (pci_pcie_type(port) != PCI_EXP_TYPE_ROOT_PORT)) + return -ENODEV; + rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL); if (!rpc) return -ENOMEM; @@ -1350,41 +1369,60 @@ static int aer_probe(struct pcie_device *dev) } /** - * aer_root_reset - reset link on Root Port - * @dev: pointer to Root Port's pci_dev data structure + * aer_root_reset - reset Root Port hierarchy or RCEC + * @dev: pointer to Root Port or RCEC * - * Invoked by Port Bus driver when performing link reset at Root Port. + * Invoked by Port Bus driver when performing reset. */ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) { - int aer = dev->aer_cap; + int type = pci_pcie_type(dev); + struct pci_dev *root; + int aer; + struct pci_host_bridge *host = pci_find_host_bridge(dev->bus); u32 reg32; int rc; + root = dev; /* device with Root Error registers */ + aer = root->aer_cap; - /* Disable Root's interrupt in response to error messages */ - pci_read_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, ®32); - reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; - pci_write_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, reg32); + if ((host->native_aer || pcie_ports_native) && aer) { + /* Disable Root's interrupt in response to error messages */ + pci_read_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, ®32); + reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; + pci_write_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, reg32); + } - rc = pci_bus_error_reset(dev); - pci_info(dev, "Root Port link has been reset\n"); + if (type == PCI_EXP_TYPE_RC_EC) { + if (pcie_has_flr(dev)) { + rc = pcie_flr(dev); + pci_info(dev, "has been reset (%d)\n", rc); + } else { + pci_info(dev, "not reset (no FLR support)\n"); + rc = -ENOTTY; + } + } else { + rc = pci_bus_error_reset(dev); + pci_info(dev, "Root Port link has been reset (%d)\n", rc); + } - /* Clear Root Error Status */ - pci_read_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, ®32); - pci_write_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, reg32); + if ((host->native_aer || pcie_ports_native) && aer) { + /* Clear Root Error Status */ + pci_read_config_dword(root, aer + PCI_ERR_ROOT_STATUS, ®32); + pci_write_config_dword(root, aer + PCI_ERR_ROOT_STATUS, reg32); - /* Enable Root Port's interrupt in response to error messages */ - pci_read_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, ®32); - reg32 |= ROOT_PORT_INTR_ON_MESG_MASK; - pci_write_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, reg32); + /* Enable Root Port's interrupt in response to error messages */ + pci_read_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, ®32); + reg32 |= ROOT_PORT_INTR_ON_MESG_MASK; + pci_write_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, reg32); + } return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } static struct pcie_port_service_driver aerdriver = { .name = "aer", - .port_type = PCI_EXP_TYPE_ROOT_PORT, + .port_type = PCIE_ANY_PORT, .service = PCIE_PORT_SERVICE_AER, .probe = aer_probe, diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index c543f419d8f9..984aa023c753 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -146,38 +146,68 @@ out: return 0; } -pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, - pci_channel_state_t state, - pci_ers_result_t (*reset_link)(struct pci_dev *pdev)) +/** + * pci_walk_bridge - walk bridges potentially AER affected + * @bridge: bridge which may be a Port or an RCEC + * @cb: callback to be called for each device found + * @userdata: arbitrary pointer to be passed to callback + * + * If the device provided is a bridge, walk the subordinate bus, including + * any bridged devices on buses under this bus. Call the provided callback + * on each device found. + * + * If the device provided has no subordinate bus, e.g., an RCEC, call the + * callback on the device itself. + */ +static void pci_walk_bridge(struct pci_dev *bridge, + int (*cb)(struct pci_dev *, void *), + void *userdata) { + if (bridge->subordinate) + pci_walk_bus(bridge->subordinate, cb, userdata); + else + cb(bridge, userdata); +} + +pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, + pci_channel_state_t state, + pci_ers_result_t (*reset_subordinates)(struct pci_dev *pdev)) +{ + int type = pci_pcie_type(dev); + struct pci_dev *bridge; pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER; - struct pci_bus *bus; /* - * Error recovery runs on all subordinates of the first downstream port. - * If the downstream port detected the error, it is cleared at the end. + * If the error was detected by a Root Port, Downstream Port, or + * RCEC, recovery runs on the device itself. For Ports, that also + * includes any subordinate devices. + * + * If it was detected by another device (Endpoint, etc), recovery + * runs on the device and anything else under the same Port, i.e., + * everything under "bridge". */ - if (!(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || - pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)) - dev = dev->bus->self; - bus = dev->subordinate; + if (type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_DOWNSTREAM || + type == PCI_EXP_TYPE_RC_EC) + bridge = dev; + else + bridge = pci_upstream_bridge(dev); - pci_dbg(dev, "broadcast error_detected message\n"); + pci_dbg(bridge, "broadcast error_detected message\n"); if (state == pci_channel_io_frozen) { - pci_walk_bus(bus, report_frozen_detected, &status); - status = reset_link(dev); - if (status != PCI_ERS_RESULT_RECOVERED) { - pci_warn(dev, "link reset failed\n"); + pci_walk_bridge(bridge, report_frozen_detected, &status); + if (reset_subordinates(bridge) != PCI_ERS_RESULT_RECOVERED) { + pci_warn(bridge, "subordinate device reset failed\n"); goto failed; } } else { - pci_walk_bus(bus, report_normal_detected, &status); + pci_walk_bridge(bridge, report_normal_detected, &status); } if (status == PCI_ERS_RESULT_CAN_RECOVER) { status = PCI_ERS_RESULT_RECOVERED; - pci_dbg(dev, "broadcast mmio_enabled message\n"); - pci_walk_bus(bus, report_mmio_enabled, &status); + pci_dbg(bridge, "broadcast mmio_enabled message\n"); + pci_walk_bridge(bridge, report_mmio_enabled, &status); } if (status == PCI_ERS_RESULT_NEED_RESET) { @@ -187,27 +217,27 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, * drivers' slot_reset callbacks? */ status = PCI_ERS_RESULT_RECOVERED; - pci_dbg(dev, "broadcast slot_reset message\n"); - pci_walk_bus(bus, report_slot_reset, &status); + pci_dbg(bridge, "broadcast slot_reset message\n"); + pci_walk_bridge(bridge, report_slot_reset, &status); } if (status != PCI_ERS_RESULT_RECOVERED) goto failed; - pci_dbg(dev, "broadcast resume message\n"); - pci_walk_bus(bus, report_resume, &status); + pci_dbg(bridge, "broadcast resume message\n"); + pci_walk_bridge(bridge, report_resume, &status); - if (pcie_aer_is_native(dev)) - pcie_clear_device_status(dev); - pci_aer_clear_nonfatal_status(dev); - pci_info(dev, "device recovery successful\n"); + if (pcie_aer_is_native(bridge)) + pcie_clear_device_status(bridge); + pci_aer_clear_nonfatal_status(bridge); + pci_info(bridge, "device recovery successful\n"); return status; failed: - pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT); + pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT); /* TODO: Should kernel panic here? */ - pci_info(dev, "device recovery failed\n"); + pci_info(bridge, "device recovery failed\n"); return status; } diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index d4559cf88f79..aac1a6828b4f 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -101,12 +101,14 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { static int pcie_portdrv_probe(struct pci_dev *dev, const struct pci_device_id *id) { + int type = pci_pcie_type(dev); int status; if (!pci_is_pcie(dev) || - ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) && - (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) && - (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM))) + ((type != PCI_EXP_TYPE_ROOT_PORT) && + (type != PCI_EXP_TYPE_UPSTREAM) && + (type != PCI_EXP_TYPE_DOWNSTREAM) && + (type != PCI_EXP_TYPE_RC_EC))) return -ENODEV; status = pcie_port_device_register(dev); @@ -195,6 +197,8 @@ static const struct pci_device_id port_pci_ids[] = { { PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0) }, /* subtractive decode PCI-to-PCI bridge, class type is 060401h */ { PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x01), ~0) }, + /* handle any Root Complex Event Collector */ + { PCI_DEVICE_CLASS(((PCI_CLASS_SYSTEM_RCEC << 8) | 0x00), ~0) }, { }, }; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 0ad6bf1807b2..439991270ed1 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4904,6 +4904,9 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs }, /* Broadcom multi-function device */ { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_BROADCOM, 0x1751, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_BROADCOM, 0x1752, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs }, /* Amazon Annapurna Labs */ { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs }, diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 7f1acb3918d0..875d50c16f19 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -210,6 +210,17 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, root = pci_find_parent_resource(dev, res); if (!root) { + /* + * If dev is behind a bridge, accesses will only reach it + * if res is inside the relevant bridge window. + */ + if (pci_upstream_bridge(dev)) + return -ENXIO; + + /* + * On the root bus, assume the host bridge will forward + * everything. + */ if (res->flags & IORESOURCE_IO) root = &ioport_resource; else diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index 82d10b6661c7..73508fca520c 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig @@ -151,7 +151,7 @@ config TCIC config PCMCIA_ALCHEMY_DEVBOARD tristate "Alchemy Db/Pb1xxx PCMCIA socket services" - depends on MIPS_ALCHEMY && PCMCIA + depends on MIPS_DB1XXX && PCMCIA help Enable this driver of you want PCMCIA support on your Alchemy Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200, DB1300 diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index cb2f55f450e4..7fd11ef5cb8a 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -398,6 +398,9 @@ validate_group(struct perf_event *event) if (!validate_event(event->pmu, &fake_pmu, leader)) return -EINVAL; + if (event == leader) + return 0; + for_each_sibling_event(sibling, leader) { if (!validate_event(event->pmu, &fake_pmu, sibling)) return -EINVAL; @@ -487,12 +490,7 @@ __hw_perf_event_init(struct perf_event *event) local64_set(&hwc->period_left, hwc->sample_period); } - if (event->group_leader != event) { - if (validate_group(event) != 0) - return -EINVAL; - } - - return 0; + return validate_group(event); } static int armpmu_event_init(struct perf_event *event) diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c index ef9676418c9f..2e1f3680d846 100644 --- a/drivers/perf/arm_pmu_platform.c +++ b/drivers/perf/arm_pmu_platform.c @@ -117,7 +117,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) if (num_irqs == 1) { int irq = platform_get_irq(pdev, 0); - if (irq && irq_is_percpu_devid(irq)) + if ((irq > 0) && irq_is_percpu_devid(irq)) return pmu_parse_percpu_irq(pmu, irq); } diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index cc00915ad6d1..6fbfcab4918c 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -39,6 +39,24 @@ #include #include +/* + * Cache if the event is allowed to trace Context information. + * This allows us to perform the check, i.e, perfmon_capable(), + * in the context of the event owner, once, during the event_init(). + */ +#define SPE_PMU_HW_FLAGS_CX BIT(0) + +static void set_spe_event_has_cx(struct perf_event *event) +{ + if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable()) + event->hw.flags |= SPE_PMU_HW_FLAGS_CX; +} + +static bool get_spe_event_has_cx(struct perf_event *event) +{ + return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX); +} + #define ARM_SPE_BUF_PAD_BYTE 0 struct arm_spe_pmu_buf { @@ -274,7 +292,7 @@ static u64 arm_spe_event_to_pmscr(struct perf_event *event) if (!attr->exclude_kernel) reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT); - if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable()) + if (get_spe_event_has_cx(event)) reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT); return reg; @@ -699,10 +717,10 @@ static int arm_spe_pmu_event_init(struct perf_event *event) !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT)) return -EOPNOTSUPP; + set_spe_event_has_cx(event); reg = arm_spe_event_to_pmscr(event); if (!perfmon_capable() && (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) | - BIT(SYS_PMSCR_EL1_CX_SHIFT) | BIT(SYS_PMSCR_EL1_PCT_SHIFT)))) return -EACCES; diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 7f7bc0993670..e09bbf3890c4 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -29,7 +29,7 @@ #define CNTL_OVER_MASK 0xFFFFFFFE #define CNTL_CSV_SHIFT 24 -#define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT) +#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT) #define EVENT_CYCLES_ID 0 #define EVENT_CYCLES_COUNTER 0 diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index 23a0e008dafa..d58810f53727 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -739,7 +739,7 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster( { u64 mpidr; int cpu_cluster_id; - struct cluster_pmu *cluster = NULL; + struct cluster_pmu *cluster; /* * This assumes that the cluster_id is in MPIDR[aff1] for @@ -761,10 +761,10 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster( cluster->cluster_id); cpumask_set_cpu(cpu, &cluster->cluster_cpus); *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster; - break; + return cluster; } - return cluster; + return NULL; } static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) diff --git a/drivers/phy/amlogic/phy-meson8b-usb2.c b/drivers/phy/amlogic/phy-meson8b-usb2.c index 03c061dd5f0d..8f40b9342a97 100644 --- a/drivers/phy/amlogic/phy-meson8b-usb2.c +++ b/drivers/phy/amlogic/phy-meson8b-usb2.c @@ -261,8 +261,9 @@ static int phy_meson8b_usb2_probe(struct platform_device *pdev) return PTR_ERR(priv->clk_usb); priv->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL); - if (PTR_ERR(priv->reset) == -EPROBE_DEFER) - return PTR_ERR(priv->reset); + if (IS_ERR(priv->reset)) + return dev_err_probe(&pdev->dev, PTR_ERR(priv->reset), + "Failed to get the reset line"); priv->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1); if (priv->dr_mode == USB_DR_MODE_UNKNOWN) { diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c index 5172971f4c36..3cd4d51c247c 100644 --- a/drivers/phy/motorola/phy-mapphone-mdm6600.c +++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c @@ -629,7 +629,8 @@ idle: cleanup: if (error < 0) phy_mdm6600_device_power_off(ddata); - + pm_runtime_disable(ddata->dev); + pm_runtime_dont_use_autosuspend(ddata->dev); return error; } diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index 0cda16846962..afcc82ab3202 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -3141,7 +3141,7 @@ static int qcom_qmp_phy_power_on(struct phy *phy) ret = reset_control_deassert(qmp->ufs_reset); if (ret) - goto err_lane_rst; + goto err_pcs_ready; qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl, cfg->pcs_misc_tbl_num); @@ -3717,6 +3717,11 @@ static const struct phy_ops qcom_qmp_pcie_ufs_ops = { .owner = THIS_MODULE, }; +static void qcom_qmp_reset_control_put(void *data) +{ + reset_control_put(data); +} + static int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id, void __iomem *serdes, const struct qmp_phy_cfg *cfg) @@ -3789,7 +3794,7 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id, * all phys that don't need this. */ snprintf(prop_name, sizeof(prop_name), "pipe%d", id); - qphy->pipe_clk = of_clk_get_by_name(np, prop_name); + qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name); if (IS_ERR(qphy->pipe_clk)) { if (cfg->type == PHY_TYPE_PCIE || cfg->type == PHY_TYPE_USB3) { @@ -3811,6 +3816,10 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id, dev_err(dev, "failed to get lane%d reset\n", id); return PTR_ERR(qphy->lane_rst); } + ret = devm_add_action_or_reset(dev, qcom_qmp_reset_control_put, + qphy->lane_rst); + if (ret) + return ret; } if (cfg->type == PHY_TYPE_UFS || cfg->type == PHY_TYPE_PCIE) diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c index 04d18d52f700..d4741c2dbbb5 100644 --- a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c +++ b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c @@ -54,8 +54,10 @@ static int qcom_usb_hsic_phy_power_on(struct phy *phy) /* Configure pins for HSIC functionality */ pins_default = pinctrl_lookup_state(uphy->pctl, PINCTRL_STATE_DEFAULT); - if (IS_ERR(pins_default)) - return PTR_ERR(pins_default); + if (IS_ERR(pins_default)) { + ret = PTR_ERR(pins_default); + goto err_ulpi; + } ret = pinctrl_select_state(uphy->pctl, pins_default); if (ret) diff --git a/drivers/phy/samsung/phy-exynos5250-sata.c b/drivers/phy/samsung/phy-exynos5250-sata.c index 4dd7324d91b2..ea46576404b8 100644 --- a/drivers/phy/samsung/phy-exynos5250-sata.c +++ b/drivers/phy/samsung/phy-exynos5250-sata.c @@ -190,6 +190,7 @@ static int exynos_sata_phy_probe(struct platform_device *pdev) return -EINVAL; sata_phy->client = of_find_i2c_device_by_node(node); + of_node_put(node); if (!sata_phy->client) return -EPROBE_DEFER; @@ -198,20 +199,21 @@ static int exynos_sata_phy_probe(struct platform_device *pdev) sata_phy->phyclk = devm_clk_get(dev, "sata_phyctrl"); if (IS_ERR(sata_phy->phyclk)) { dev_err(dev, "failed to get clk for PHY\n"); - return PTR_ERR(sata_phy->phyclk); + ret = PTR_ERR(sata_phy->phyclk); + goto put_dev; } ret = clk_prepare_enable(sata_phy->phyclk); if (ret < 0) { dev_err(dev, "failed to enable source clk\n"); - return ret; + goto put_dev; } sata_phy->phy = devm_phy_create(dev, NULL, &exynos_sata_phy_ops); if (IS_ERR(sata_phy->phy)) { - clk_disable_unprepare(sata_phy->phyclk); dev_err(dev, "failed to create PHY\n"); - return PTR_ERR(sata_phy->phy); + ret = PTR_ERR(sata_phy->phy); + goto clk_disable; } phy_set_drvdata(sata_phy->phy, sata_phy); @@ -219,11 +221,18 @@ static int exynos_sata_phy_probe(struct platform_device *pdev) phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); if (IS_ERR(phy_provider)) { - clk_disable_unprepare(sata_phy->phyclk); - return PTR_ERR(phy_provider); + ret = PTR_ERR(phy_provider); + goto clk_disable; } return 0; + +clk_disable: + clk_disable_unprepare(sata_phy->phyclk); +put_dev: + put_device(&sata_phy->client->dev); + + return ret; } static const struct of_device_id exynos_sata_phy_of_match[] = { diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c index 2b3639cba51a..03fc567e9f18 100644 --- a/drivers/phy/st/phy-stm32-usbphyc.c +++ b/drivers/phy/st/phy-stm32-usbphyc.c @@ -393,6 +393,8 @@ static int stm32_usbphyc_probe(struct platform_device *pdev) ret = of_property_read_u32(child, "reg", &index); if (ret || index > usbphyc->nphys) { dev_err(&phy->dev, "invalid reg property: %d\n", ret); + if (!ret) + ret = -EINVAL; goto put_child; } diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c index 2ff56ce77b30..21c0088f5ca9 100644 --- a/drivers/phy/ti/phy-am654-serdes.c +++ b/drivers/phy/ti/phy-am654-serdes.c @@ -838,7 +838,7 @@ static int serdes_am654_probe(struct platform_device *pdev) clk_err: of_clk_del_provider(node); - + pm_runtime_disable(dev); return ret; } diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c index 4fec90d2624f..f77ac041d836 100644 --- a/drivers/phy/ti/phy-omap-usb2.c +++ b/drivers/phy/ti/phy-omap-usb2.c @@ -215,7 +215,7 @@ static int omap_usb2_enable_clocks(struct omap_usb *phy) return 0; err1: - clk_disable(phy->wkupclk); + clk_disable_unprepare(phy->wkupclk); err0: return ret; diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c index 5c1a109842a7..c2ba4064ce5b 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c @@ -1224,18 +1224,12 @@ FUNC_GROUP_DECL(SALT8, AA12); FUNC_GROUP_DECL(WDTRST4, AA12); #define AE12 196 -SIG_EXPR_LIST_DECL_SEMG(AE12, FWSPIDQ2, FWQSPID, FWSPID, - SIG_DESC_SET(SCU438, 4)); SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4); -PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIDQ2), - SIG_EXPR_LIST_PTR(AE12, GPIOY4)); +PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, GPIOY4)); #define AF12 197 -SIG_EXPR_LIST_DECL_SEMG(AF12, FWSPIDQ3, FWQSPID, FWSPID, - SIG_DESC_SET(SCU438, 5)); SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5); -PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIDQ3), - SIG_EXPR_LIST_PTR(AF12, GPIOY5)); +PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, GPIOY5)); #define AC12 198 SSSF_PIN_DECL(AC12, GPIOY6, FWSPIABR, SIG_DESC_SET(SCU438, 6)); @@ -1508,9 +1502,8 @@ SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3)); PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7); GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4); -GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12); GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4); -FUNC_DECL_2(FWSPID, FWSPID, FWQSPID); +FUNC_DECL_1(FWSPID, FWSPID); FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4); FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8); /* @@ -1906,7 +1899,6 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = { ASPEED_PINCTRL_GROUP(FSI2), ASPEED_PINCTRL_GROUP(FWSPIABR), ASPEED_PINCTRL_GROUP(FWSPID), - ASPEED_PINCTRL_GROUP(FWQSPID), ASPEED_PINCTRL_GROUP(FWSPIWP), ASPEED_PINCTRL_GROUP(GPIT0), ASPEED_PINCTRL_GROUP(GPIT1), diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c index 9c65d560d48f..e792318c3894 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c @@ -235,11 +235,11 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, const struct aspeed_sig_expr **funcs; const struct aspeed_sig_expr ***prios; - pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name); - if (!pdesc) return -EINVAL; + pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name); + prios = pdesc->prios; if (!prios) diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index 3fb238714718..eac55fee5281 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -220,6 +220,8 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev) for (state = 0; ; state++) { /* Retrieve the pinctrl-* property */ propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state); + if (!propname) + return -ENOMEM; prop = of_find_property(np, propname, &size); kfree(propname); if (!prop) { diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 348c670a7b07..db9087c129c0 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -426,9 +426,14 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input) writel(value, padcfg0); } +static int __intel_gpio_get_gpio_mode(u32 value) +{ + return (value & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT; +} + static int intel_gpio_get_gpio_mode(void __iomem *padcfg0) { - return (readl(padcfg0) & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT; + return __intel_gpio_get_gpio_mode(readl(padcfg0)); } static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) @@ -1571,16 +1576,14 @@ EXPORT_SYMBOL_GPL(intel_pinctrl_probe_by_uid); const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_device *pdev) { + const struct intel_pinctrl_soc_data * const *table; const struct intel_pinctrl_soc_data *data = NULL; - const struct intel_pinctrl_soc_data **table; - struct acpi_device *adev; - unsigned int i; - adev = ACPI_COMPANION(&pdev->dev); - if (adev) { - const void *match = device_get_match_data(&pdev->dev); + table = device_get_match_data(&pdev->dev); + if (table) { + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); + unsigned int i; - table = (const struct intel_pinctrl_soc_data **)match; for (i = 0; table[i]; i++) { if (!strcmp(adev->pnp.unique_id, table[i]->uid)) { data = table[i]; @@ -1594,7 +1597,7 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_ if (!id) return ERR_PTR(-ENODEV); - table = (const struct intel_pinctrl_soc_data **)id->driver_data; + table = (const struct intel_pinctrl_soc_data * const *)id->driver_data; data = table[pdev->id]; } @@ -1606,6 +1609,7 @@ EXPORT_SYMBOL_GPL(intel_pinctrl_get_soc_data); static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int pin) { const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin); + u32 value; if (!pd || !intel_pad_usable(pctrl, pin)) return false; @@ -1620,6 +1624,25 @@ static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int gpiochip_line_is_irq(&pctrl->chip, intel_pin_to_gpio(pctrl, pin))) return true; + /* + * The firmware on some systems may configure GPIO pins to be + * an interrupt source in so called "direct IRQ" mode. In such + * cases the GPIO controller driver has no idea if those pins + * are being used or not. At the same time, there is a known bug + * in the firmwares that don't restore the pin settings correctly + * after suspend, i.e. by an unknown reason the Rx value becomes + * inverted. + * + * Hence, let's save and restore the pins that are configured + * as GPIOs in the input mode with GPIROUTIOXAPIC bit set. + * + * See https://bugzilla.kernel.org/show_bug.cgi?id=214749. + */ + value = readl(intel_get_padcfg(pctrl, pin, PADCFG0)); + if ((value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) && + (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO)) + return true; + return false; } diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig index eef17f228669..4ed41d361589 100644 --- a/drivers/pinctrl/mediatek/Kconfig +++ b/drivers/pinctrl/mediatek/Kconfig @@ -30,6 +30,7 @@ config PINCTRL_MTK_MOORE select GENERIC_PINMUX_FUNCTIONS select GPIOLIB select OF_GPIO + select EINT_MTK select PINCTRL_MTK_V2 config PINCTRL_MTK_PARIS diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c index 22736f60c16c..64a32d3ca481 100644 --- a/drivers/pinctrl/mediatek/mtk-eint.c +++ b/drivers/pinctrl/mediatek/mtk-eint.c @@ -278,12 +278,15 @@ static struct irq_chip mtk_eint_irq_chip = { static unsigned int mtk_eint_hw_init(struct mtk_eint *eint) { - void __iomem *reg = eint->base + eint->regs->dom_en; + void __iomem *dom_en = eint->base + eint->regs->dom_en; + void __iomem *mask_set = eint->base + eint->regs->mask_set; unsigned int i; for (i = 0; i < eint->hw->ap_num; i += 32) { - writel(0xffffffff, reg); - reg += 4; + writel(0xffffffff, dom_en); + writel(0xffffffff, mask_set); + dom_en += 4; + mask_set += 4; } return 0; diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index 5cb018f98800..85a0052bb0e6 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -781,7 +781,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev, for (i = 0; i < nr_irq_parent; i++) { int irq = irq_of_parse_and_map(np, i); - if (irq < 0) + if (!irq) continue; girq->parents[i] = irq; } diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index 6d77feda9090..d80ec0d8eaad 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -1421,8 +1421,10 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, has_config = nmk_pinctrl_dt_get_config(np, &configs); np_config = of_parse_phandle(np, "ste,config", 0); - if (np_config) + if (np_config) { has_config |= nmk_pinctrl_dt_get_config(np_config, &configs); + of_node_put(np_config); + } if (has_config) { const char *gpio_name; const char *pin; diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index e20bcc835d6a..82b658a3c220 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -815,6 +815,7 @@ static int amd_gpio_suspend(struct device *dev) { struct amd_gpio *gpio_dev = dev_get_drvdata(dev); struct pinctrl_desc *desc = gpio_dev->pctrl->desc; + unsigned long flags; int i; for (i = 0; i < desc->npins; i++) { @@ -823,7 +824,9 @@ static int amd_gpio_suspend(struct device *dev) if (!amd_gpio_should_save(gpio_dev, pin)) continue; - gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4); + raw_spin_lock_irqsave(&gpio_dev->lock, flags); + gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING; + raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); } return 0; @@ -833,6 +836,7 @@ static int amd_gpio_resume(struct device *dev) { struct amd_gpio *gpio_dev = dev_get_drvdata(dev); struct pinctrl_desc *desc = gpio_dev->pctrl->desc; + unsigned long flags; int i; for (i = 0; i < desc->npins; i++) { @@ -841,7 +845,10 @@ static int amd_gpio_resume(struct device *dev) if (!amd_gpio_should_save(gpio_dev, pin)) continue; - writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4); + raw_spin_lock_irqsave(&gpio_dev->lock, flags); + gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING; + writel(gpio_dev->saved_regs[i], gpio_dev->base + pin * 4); + raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); } return 0; diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c index ec761ba2a2da..989a37fb402d 100644 --- a/drivers/pinctrl/pinctrl-pistachio.c +++ b/drivers/pinctrl/pinctrl-pistachio.c @@ -1374,10 +1374,10 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl) } irq = irq_of_parse_and_map(child, 0); - if (irq < 0) { - dev_err(pctl->dev, "No IRQ for bank %u: %d\n", i, irq); + if (!irq) { + dev_err(pctl->dev, "No IRQ for bank %u\n", i); of_node_put(child); - ret = irq; + ret = -EINVAL; goto err; } diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 17aa0d542d92..d139cd9e6d13 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -726,7 +726,7 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs) mux_bytes = pcs->width / BITS_PER_BYTE; - if (pcs->bits_per_mux) { + if (pcs->bits_per_mux && pcs->fmask) { pcs->bits_per_pin = fls(pcs->fmask); nr_pins = (pcs->size * BITS_PER_BYTE) / pcs->bits_per_pin; num_pins_in_register = pcs->width / pcs->bits_per_pin; diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c index 396db12ae904..bf68913ba821 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm8916.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c @@ -844,8 +844,8 @@ static const struct msm_pingroup msm8916_groups[] = { PINGROUP(28, pwr_modem_enabled_a, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac), PINGROUP(29, cci_i2c, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, atest_combodac), PINGROUP(30, cci_i2c, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b), - PINGROUP(31, cci_timer0, NA, NA, NA, NA, NA, NA, NA, NA), - PINGROUP(32, cci_timer1, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(31, cci_timer0, flash_strobe, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(32, cci_timer1, flash_strobe, NA, NA, NA, NA, NA, NA, NA), PINGROUP(33, cci_async, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b), PINGROUP(34, pwr_nav_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b), PINGROUP(35, pwr_crypto_enabled_a, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b), diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c index af144e724bd9..3bd7f9fedcc3 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8250.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c @@ -1316,7 +1316,7 @@ static const struct msm_pingroup sm8250_groups[] = { static const struct msm_gpio_wakeirq_map sm8250_pdc_map[] = { { 0, 79 }, { 1, 84 }, { 2, 80 }, { 3, 82 }, { 4, 107 }, { 7, 43 }, { 11, 42 }, { 14, 44 }, { 15, 52 }, { 19, 67 }, { 23, 68 }, { 24, 105 }, - { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 37 }, + { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 73 }, { 40, 108 }, { 43, 71 }, { 45, 72 }, { 47, 83 }, { 51, 74 }, { 55, 77 }, { 59, 78 }, { 63, 75 }, { 64, 81 }, { 65, 87 }, { 66, 88 }, { 67, 89 }, { 68, 54 }, { 70, 85 }, { 77, 46 }, { 80, 90 }, { 81, 91 }, { 83, 97 }, diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c index 258972672eda..54f1a7334027 100644 --- a/drivers/pinctrl/renesas/core.c +++ b/drivers/pinctrl/renesas/core.c @@ -71,12 +71,11 @@ static int sh_pfc_map_resources(struct sh_pfc *pfc, /* Fill them. */ for (i = 0; i < num_windows; i++) { - res = platform_get_resource(pdev, IORESOURCE_MEM, i); - windows->phys = res->start; - windows->size = resource_size(res); - windows->virt = devm_ioremap_resource(pfc->dev, res); + windows->virt = devm_platform_get_and_ioremap_resource(pdev, i, &res); if (IS_ERR(windows->virt)) return -ENOMEM; + windows->phys = res->start; + windows->size = resource_size(res); windows++; } for (i = 0; i < num_irqs; i++) diff --git a/drivers/pinctrl/renesas/pinctrl-rzn1.c b/drivers/pinctrl/renesas/pinctrl-rzn1.c index ef5fb25b6016..849d091205d4 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzn1.c +++ b/drivers/pinctrl/renesas/pinctrl-rzn1.c @@ -865,17 +865,15 @@ static int rzn1_pinctrl_probe(struct platform_device *pdev) ipctl->mdio_func[0] = -1; ipctl->mdio_func[1] = -1; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ipctl->lev1_protect_phys = (u32)res->start + 0x400; - ipctl->lev1 = devm_ioremap_resource(&pdev->dev, res); + ipctl->lev1 = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(ipctl->lev1)) return PTR_ERR(ipctl->lev1); + ipctl->lev1_protect_phys = (u32)res->start + 0x400; - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - ipctl->lev2_protect_phys = (u32)res->start + 0x400; - ipctl->lev2 = devm_ioremap_resource(&pdev->dev, res); + ipctl->lev2 = devm_platform_get_and_ioremap_resource(pdev, 1, &res); if (IS_ERR(ipctl->lev2)) return PTR_ERR(ipctl->lev2); + ipctl->lev2_protect_phys = (u32)res->start + 0x400; ipctl->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(ipctl->clk)) diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig index dfd805e76862..7b0576f71376 100644 --- a/drivers/pinctrl/samsung/Kconfig +++ b/drivers/pinctrl/samsung/Kconfig @@ -4,14 +4,13 @@ # config PINCTRL_SAMSUNG bool - depends on OF_GPIO + select GPIOLIB select PINMUX select PINCONF config PINCTRL_EXYNOS bool "Pinctrl common driver part for Samsung Exynos SoCs" - depends on OF_GPIO - depends on ARCH_EXYNOS || ARCH_S5PV210 || COMPILE_TEST + depends on ARCH_EXYNOS || ARCH_S5PV210 || (COMPILE_TEST && OF) select PINCTRL_SAMSUNG select PINCTRL_EXYNOS_ARM if ARM && (ARCH_EXYNOS || ARCH_S5PV210) select PINCTRL_EXYNOS_ARM64 if ARM64 && ARCH_EXYNOS @@ -26,12 +25,10 @@ config PINCTRL_EXYNOS_ARM64 config PINCTRL_S3C24XX bool "Samsung S3C24XX SoC pinctrl driver" - depends on OF_GPIO - depends on ARCH_S3C24XX || COMPILE_TEST + depends on ARCH_S3C24XX || (COMPILE_TEST && OF) select PINCTRL_SAMSUNG config PINCTRL_S3C64XX bool "Samsung S3C64XX SoC pinctrl driver" - depends on OF_GPIO - depends on ARCH_S3C64XX || COMPILE_TEST + depends on ARCH_S3C64XX || (COMPILE_TEST && OF) select PINCTRL_SAMSUNG diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index e13723bb2be4..60406f1f8337 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -225,6 +225,13 @@ static void stm32_gpio_free(struct gpio_chip *chip, unsigned offset) pinctrl_gpio_free(chip->base + offset); } +static int stm32_gpio_get_noclk(struct gpio_chip *chip, unsigned int offset) +{ + struct stm32_gpio_bank *bank = gpiochip_get_data(chip); + + return !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset)); +} + static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset) { struct stm32_gpio_bank *bank = gpiochip_get_data(chip); @@ -232,7 +239,7 @@ static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset) clk_enable(bank->clk); - ret = !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset)); + ret = stm32_gpio_get_noclk(chip, offset); clk_disable(bank->clk); @@ -311,8 +318,12 @@ static void stm32_gpio_irq_trigger(struct irq_data *d) struct stm32_gpio_bank *bank = d->domain->host_data; int level; + /* Do not access the GPIO if this is not LEVEL triggered IRQ. */ + if (!(bank->irq_type[d->hwirq] & IRQ_TYPE_LEVEL_MASK)) + return; + /* If level interrupt type then retrig */ - level = stm32_gpio_get(&bank->gpio_chip, d->hwirq); + level = stm32_gpio_get_noclk(&bank->gpio_chip, d->hwirq); if ((level == 0 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_LOW) || (level == 1 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_HIGH)) irq_chip_retrigger_hierarchy(d); @@ -354,6 +365,7 @@ static int stm32_gpio_irq_request_resources(struct irq_data *irq_data) { struct stm32_gpio_bank *bank = irq_data->domain->host_data; struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent); + unsigned long flags; int ret; ret = stm32_gpio_direction_input(&bank->gpio_chip, irq_data->hwirq); @@ -367,6 +379,10 @@ static int stm32_gpio_irq_request_resources(struct irq_data *irq_data) return ret; } + flags = irqd_get_trigger_type(irq_data); + if (flags & IRQ_TYPE_LEVEL_MASK) + clk_enable(bank->clk); + return 0; } @@ -374,6 +390,9 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data) { struct stm32_gpio_bank *bank = irq_data->domain->host_data; + if (bank->irq_type[irq_data->hwirq] & IRQ_TYPE_LEVEL_MASK) + clk_disable(bank->clk); + gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq); } @@ -1284,15 +1303,17 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, bank->bank_ioport_nr = bank_ioport_nr; spin_lock_init(&bank->lock); - /* create irq hierarchical domain */ - bank->fwnode = of_node_to_fwnode(np); + if (pctl->domain) { + /* create irq hierarchical domain */ + bank->fwnode = of_node_to_fwnode(np); - bank->domain = irq_domain_create_hierarchy(pctl->domain, 0, - STM32_GPIO_IRQ_LINE, bank->fwnode, - &stm32_gpio_domain_ops, bank); + bank->domain = irq_domain_create_hierarchy(pctl->domain, 0, STM32_GPIO_IRQ_LINE, + bank->fwnode, &stm32_gpio_domain_ops, + bank); - if (!bank->domain) - return -ENODEV; + if (!bank->domain) + return -ENODEV; + } err = gpiochip_add_data(&bank->gpio_chip, bank); if (err) { @@ -1462,6 +1483,8 @@ int stm32_pctl_probe(struct platform_device *pdev) pctl->domain = stm32_pctrl_get_irq_domain(np); if (IS_ERR(pctl->domain)) return PTR_ERR(pctl->domain); + if (!pctl->domain) + dev_warn(dev, "pinctrl without interrupt support\n"); /* hwspinlock is optional */ hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c index 21054fcacd34..18088f6f44b2 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c @@ -98,7 +98,7 @@ MODULE_DEVICE_TABLE(of, a100_r_pinctrl_match); static struct platform_driver a100_r_pinctrl_driver = { .probe = a100_r_pinctrl_probe, .driver = { - .name = "sun50iw10p1-r-pinctrl", + .name = "sun50i-a100-r-pinctrl", .of_match_table = a100_r_pinctrl_match, }, }; diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c index 4557e18d5989..12c40f9c1a24 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6-r.c @@ -105,6 +105,7 @@ static const struct sunxi_pinctrl_desc sun50i_h6_r_pinctrl_data = { .npins = ARRAY_SIZE(sun50i_h6_r_pins), .pin_base = PL_BASE, .irq_banks = 2, + .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL, }; static int sun50i_h6_r_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c index 4ada80317a3b..b5c1a8f363f3 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c @@ -158,26 +158,26 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = { SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand"), /* DQ6 */ + SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */ SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand"), /* DQ7 */ + SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */ SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand"), /* DQS */ + SUNXI_FUNCTION(0x2, "nand0"), /* DQS */ SUNXI_FUNCTION(0x3, "mmc2")), /* RST */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand")), /* CE2 */ + SUNXI_FUNCTION(0x2, "nand0")), /* CE2 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 18), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "nand")), /* CE3 */ + SUNXI_FUNCTION(0x2, "nand0")), /* CE3 */ /* Hole */ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2), SUNXI_FUNCTION(0x0, "gpio_in"), diff --git a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c index 2801ca706273..68a5b627fb9b 100644 --- a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c +++ b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c @@ -204,7 +204,7 @@ static const struct sunxi_desc_pin suniv_f1c100s_pins[] = { SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "lcd"), /* D20 */ - SUNXI_FUNCTION(0x3, "lvds1"), /* RX */ + SUNXI_FUNCTION(0x3, "uart2"), /* RX */ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15), SUNXI_FUNCTION(0x0, "gpio_in"), diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index be7f4f95f455..e4b41cc6c586 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -544,6 +544,8 @@ static int sunxi_pconf_set(struct pinctrl_dev *pctldev, unsigned pin, struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); int i; + pin -= pctl->desc->pin_base; + for (i = 0; i < num_configs; i++) { enum pin_config_param param; unsigned long flags; @@ -622,7 +624,7 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl, unsigned pin, struct regulator *supply) { - unsigned short bank = pin / PINS_PER_BANK; + unsigned short bank; unsigned long flags; u32 val, reg; int uV; @@ -638,6 +640,9 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl, if (uV == 0) return 0; + pin -= pctl->desc->pin_base; + bank = pin / PINS_PER_BANK; + switch (pctl->desc->io_bias_cfg_variant) { case BIAS_VOLTAGE_GRP_CONFIG: /* @@ -655,8 +660,6 @@ static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl, else val = 0xD; /* 3.3V */ - pin -= pctl->desc->pin_base; - reg = readl(pctl->membase + sunxi_grp_config_reg(pin)); reg &= ~IO_BIAS_MASK; writel(reg | val, pctl->membase + sunxi_grp_config_reg(pin)); diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index 472a03daa869..109c191d35cf 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c @@ -718,6 +718,7 @@ static int __init chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop, const struct chromeos_laptop *src) { + struct i2c_peripheral *i2c_peripherals; struct i2c_peripheral *i2c_dev; struct i2c_board_info *info; int i; @@ -726,17 +727,15 @@ chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop, if (!src->num_i2c_peripherals) return 0; - cros_laptop->i2c_peripherals = kmemdup(src->i2c_peripherals, - src->num_i2c_peripherals * - sizeof(*src->i2c_peripherals), - GFP_KERNEL); - if (!cros_laptop->i2c_peripherals) + i2c_peripherals = kmemdup(src->i2c_peripherals, + src->num_i2c_peripherals * + sizeof(*src->i2c_peripherals), + GFP_KERNEL); + if (!i2c_peripherals) return -ENOMEM; - cros_laptop->num_i2c_peripherals = src->num_i2c_peripherals; - - for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) { - i2c_dev = &cros_laptop->i2c_peripherals[i]; + for (i = 0; i < src->num_i2c_peripherals; i++) { + i2c_dev = &i2c_peripherals[i]; info = &i2c_dev->board_info; error = chromeos_laptop_setup_irq(i2c_dev); @@ -754,16 +753,19 @@ chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop, } } + cros_laptop->i2c_peripherals = i2c_peripherals; + cros_laptop->num_i2c_peripherals = src->num_i2c_peripherals; + return 0; err_out: while (--i >= 0) { - i2c_dev = &cros_laptop->i2c_peripherals[i]; + i2c_dev = &i2c_peripherals[i]; info = &i2c_dev->board_info; if (info->properties) property_entries_free(info->properties); } - kfree(cros_laptop->i2c_peripherals); + kfree(i2c_peripherals); return error; } diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c index 3104680b7485..5a622666a075 100644 --- a/drivers/platform/chrome/cros_ec.c +++ b/drivers/platform/chrome/cros_ec.c @@ -121,16 +121,16 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event) buf.msg.command = EC_CMD_HOST_SLEEP_EVENT; ret = cros_ec_cmd_xfer_status(ec_dev, &buf.msg); - - /* For now, report failure to transition to S0ix with a warning. */ + /* Report failure to transition to system wide suspend with a warning. */ if (ret >= 0 && ec_dev->host_sleep_v1 && - (sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME)) { + (sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME || + sleep_event == HOST_SLEEP_EVENT_S3_RESUME)) { ec_dev->last_resume_result = buf.u.resp1.resume_response.sleep_transitions; WARN_ONCE(buf.u.resp1.resume_response.sleep_transitions & EC_HOST_RESUME_SLEEP_TIMEOUT, - "EC detected sleep transition timeout. Total slp_s0 transitions: %d", + "EC detected sleep transition timeout. Total sleep transitions: %d", buf.u.resp1.resume_response.sleep_transitions & EC_HOST_RESUME_SLEEP_TRANSITIONS_MASK); } @@ -175,6 +175,8 @@ int cros_ec_register(struct cros_ec_device *ec_dev) ec_dev->max_request = sizeof(struct ec_params_hello); ec_dev->max_response = sizeof(struct ec_response_get_protocol_info); ec_dev->max_passthru = 0; + ec_dev->ec = NULL; + ec_dev->pd = NULL; ec_dev->din = devm_kzalloc(dev, ec_dev->din_size, GFP_KERNEL); if (!ec_dev->din) @@ -231,18 +233,16 @@ int cros_ec_register(struct cros_ec_device *ec_dev) if (IS_ERR(ec_dev->pd)) { dev_err(ec_dev->dev, "Failed to create CrOS PD platform device\n"); - platform_device_unregister(ec_dev->ec); - return PTR_ERR(ec_dev->pd); + err = PTR_ERR(ec_dev->pd); + goto exit; } } if (IS_ENABLED(CONFIG_OF) && dev->of_node) { err = devm_of_platform_populate(dev); if (err) { - platform_device_unregister(ec_dev->pd); - platform_device_unregister(ec_dev->ec); dev_err(dev, "Failed to register sub-devices\n"); - return err; + goto exit; } } @@ -264,12 +264,16 @@ int cros_ec_register(struct cros_ec_device *ec_dev) err = blocking_notifier_chain_register(&ec_dev->event_notifier, &ec_dev->notifier_ready); if (err) - return err; + goto exit; } dev_info(dev, "Chrome EC device registered\n"); return 0; +exit: + platform_device_unregister(ec_dev->ec); + platform_device_unregister(ec_dev->pd); + return err; } EXPORT_SYMBOL(cros_ec_register); @@ -328,10 +332,16 @@ EXPORT_SYMBOL(cros_ec_suspend); static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev) { + bool wake_event; + while (ec_dev->mkbp_event_supported && - cros_ec_get_next_event(ec_dev, NULL, NULL) > 0) + cros_ec_get_next_event(ec_dev, &wake_event, NULL) > 0) { blocking_notifier_call_chain(&ec_dev->event_notifier, 1, ec_dev); + + if (wake_event && device_may_wakeup(ec_dev->dev)) + pm_wakeup_event(ec_dev->dev, 0); + } } /** diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c index e0bce869c49a..0de7c255254e 100644 --- a/drivers/platform/chrome/cros_ec_chardev.c +++ b/drivers/platform/chrome/cros_ec_chardev.c @@ -301,7 +301,7 @@ static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg) } s_cmd->command += ec->cmd_offset; - ret = cros_ec_cmd_xfer_status(ec->ec_dev, s_cmd); + ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd); /* Only copy data to userland if data was received. */ if (ret < 0) goto exit; @@ -327,6 +327,9 @@ static long cros_ec_chardev_ioctl_readmem(struct cros_ec_dev *ec, if (copy_from_user(&s_mem, arg, sizeof(s_mem))) return -EFAULT; + if (s_mem.bytes > sizeof(s_mem.buffer)) + return -EINVAL; + num = ec_dev->cmd_readmem(ec_dev, s_mem.offset, s_mem.bytes, s_mem.buffer); if (num <= 0) diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c index 272c89837d74..0dbceee87a4b 100644 --- a/drivers/platform/chrome/cros_ec_debugfs.c +++ b/drivers/platform/chrome/cros_ec_debugfs.c @@ -25,6 +25,9 @@ #define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1)) +/* waitqueue for log readers */ +static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq); + /** * struct cros_ec_debugfs - EC debugging information. * @@ -33,7 +36,6 @@ * @log_buffer: circular buffer for console log information * @read_msg: preallocated EC command and buffer to read console log * @log_mutex: mutex to protect circular buffer - * @log_wq: waitqueue for log readers * @log_poll_work: recurring task to poll EC for new console log data * @panicinfo_blob: panicinfo debugfs blob */ @@ -44,7 +46,6 @@ struct cros_ec_debugfs { struct circ_buf log_buffer; struct cros_ec_command *read_msg; struct mutex log_mutex; - wait_queue_head_t log_wq; struct delayed_work log_poll_work; /* EC panicinfo */ struct debugfs_blob_wrapper panicinfo_blob; @@ -107,7 +108,7 @@ static void cros_ec_console_log_work(struct work_struct *__work) buf_space--; } - wake_up(&debug_info->log_wq); + wake_up(&cros_ec_debugfs_log_wq); } mutex_unlock(&debug_info->log_mutex); @@ -141,7 +142,7 @@ static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf, mutex_unlock(&debug_info->log_mutex); - ret = wait_event_interruptible(debug_info->log_wq, + ret = wait_event_interruptible(cros_ec_debugfs_log_wq, CIRC_CNT(cb->head, cb->tail, LOG_SIZE)); if (ret < 0) return ret; @@ -173,7 +174,7 @@ static __poll_t cros_ec_console_log_poll(struct file *file, struct cros_ec_debugfs *debug_info = file->private_data; __poll_t mask = 0; - poll_wait(file, &debug_info->log_wq, wait); + poll_wait(file, &cros_ec_debugfs_log_wq, wait); mutex_lock(&debug_info->log_mutex); if (CIRC_CNT(debug_info->log_buffer.head, @@ -377,7 +378,6 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info) debug_info->log_buffer.tail = 0; mutex_init(&debug_info->log_mutex); - init_waitqueue_head(&debug_info->log_wq); debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir, debug_info, &cros_ec_console_log_fops); diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index 9f698a7aad12..8ffbf92ec1e0 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -507,13 +507,13 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev) ret = cros_ec_get_host_command_version_mask(ec_dev, EC_CMD_GET_NEXT_EVENT, &ver_mask); - if (ret < 0 || ver_mask == 0) + if (ret < 0 || ver_mask == 0) { ec_dev->mkbp_event_supported = 0; - else + } else { ec_dev->mkbp_event_supported = fls(ver_mask); - dev_dbg(ec_dev->dev, "MKBP support version %u\n", - ec_dev->mkbp_event_supported - 1); + dev_dbg(ec_dev->dev, "MKBP support version %u\n", ec_dev->mkbp_event_supported - 1); + } /* Probe if host sleep v1 is supported for S0ix failure detection. */ ret = cros_ec_get_host_command_version_mask(ec_dev, @@ -560,22 +560,28 @@ exit: EXPORT_SYMBOL(cros_ec_query_all); /** - * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC. + * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC. * @ec_dev: EC device. * @msg: Message to write. * - * Call this to send a command to the ChromeOS EC. This should be used instead of calling the EC's - * cmd_xfer() callback directly. It returns success status only if both the command was transmitted - * successfully and the EC replied with success status. + * Call this to send a command to the ChromeOS EC. This should be used instead + * of calling the EC's cmd_xfer() callback directly. This function does not + * convert EC command execution error codes to Linux error codes. Most + * in-kernel users will want to use cros_ec_cmd_xfer_status() instead since + * that function implements the conversion. * * Return: - * >=0 - The number of bytes transferred - * <0 - Linux error code + * >0 - EC command was executed successfully. The return value is the number + * of bytes returned by the EC (excluding the header). + * =0 - EC communication was successful. EC command execution results are + * reported in msg->result. The result will be EC_RES_SUCCESS if the + * command was executed successfully or report an EC command execution + * error. + * <0 - EC communication error. Return value is the Linux error code. */ -int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, - struct cros_ec_command *msg) +int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg) { - int ret, mapped; + int ret; mutex_lock(&ec_dev->lock); if (ec_dev->proto_version == EC_PROTO_VERSION_UNKNOWN) { @@ -616,6 +622,32 @@ int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, ret = send_command(ec_dev, msg); mutex_unlock(&ec_dev->lock); + return ret; +} +EXPORT_SYMBOL(cros_ec_cmd_xfer); + +/** + * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC. + * @ec_dev: EC device. + * @msg: Message to write. + * + * Call this to send a command to the ChromeOS EC. This should be used instead of calling the EC's + * cmd_xfer() callback directly. It returns success status only if both the command was transmitted + * successfully and the EC replied with success status. + * + * Return: + * >=0 - The number of bytes transferred. + * <0 - Linux error code + */ +int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg) +{ + int ret, mapped; + + ret = cros_ec_cmd_xfer(ec_dev, msg); + if (ret < 0) + return ret; + mapped = cros_ec_map_error(msg->result); if (mapped) { dev_dbg(ec_dev->dev, "Command result (err: %d [%d])\n", @@ -716,6 +748,7 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, u8 event_type; u32 host_event; int ret; + u32 ver_mask; /* * Default value for wake_event. @@ -737,6 +770,37 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, return get_keyboard_state_event(ec_dev); ret = get_next_event(ec_dev); + /* + * -ENOPROTOOPT is returned when EC returns EC_RES_INVALID_VERSION. + * This can occur when EC based device (e.g. Fingerprint MCU) jumps to + * the RO image which doesn't support newer version of the command. In + * this case we will attempt to update maximum supported version of the + * EC_CMD_GET_NEXT_EVENT. + */ + if (ret == -ENOPROTOOPT) { + dev_dbg(ec_dev->dev, + "GET_NEXT_EVENT returned invalid version error.\n"); + ret = cros_ec_get_host_command_version_mask(ec_dev, + EC_CMD_GET_NEXT_EVENT, + &ver_mask); + if (ret < 0 || ver_mask == 0) + /* + * Do not change the MKBP supported version if we can't + * obtain supported version correctly. Please note that + * calling EC_CMD_GET_NEXT_EVENT returned + * EC_RES_INVALID_VERSION which means that the command + * is present. + */ + return -ENOPROTOOPT; + + ec_dev->mkbp_event_supported = fls(ver_mask); + dev_dbg(ec_dev->dev, "MKBP support version changed to %u\n", + ec_dev->mkbp_event_supported - 1); + + /* Try to get next event with new MKBP support version set. */ + ret = get_next_event(ec_dev); + } + if (ret <= 0) return ret; diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig index 8ac149173c64..495da331ca2d 100644 --- a/drivers/platform/mips/Kconfig +++ b/drivers/platform/mips/Kconfig @@ -17,7 +17,7 @@ menuconfig MIPS_PLATFORM_DEVICES if MIPS_PLATFORM_DEVICES config CPU_HWMON - tristate "Loongson-3 CPU HWMon Driver" + bool "Loongson-3 CPU HWMon Driver" depends on MACH_LOONGSON64 select HWMON default y diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c index 386389ffec41..d8c5f9195f85 100644 --- a/drivers/platform/mips/cpu_hwmon.c +++ b/drivers/platform/mips/cpu_hwmon.c @@ -55,55 +55,6 @@ out: static int nr_packages; static struct device *cpu_hwmon_dev; -static SENSOR_DEVICE_ATTR(name, 0444, NULL, NULL, 0); - -static struct attribute *cpu_hwmon_attributes[] = { - &sensor_dev_attr_name.dev_attr.attr, - NULL -}; - -/* Hwmon device attribute group */ -static struct attribute_group cpu_hwmon_attribute_group = { - .attrs = cpu_hwmon_attributes, -}; - -static ssize_t get_cpu_temp(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t cpu_temp_label(struct device *dev, - struct device_attribute *attr, char *buf); - -static SENSOR_DEVICE_ATTR(temp1_input, 0444, get_cpu_temp, NULL, 1); -static SENSOR_DEVICE_ATTR(temp1_label, 0444, cpu_temp_label, NULL, 1); -static SENSOR_DEVICE_ATTR(temp2_input, 0444, get_cpu_temp, NULL, 2); -static SENSOR_DEVICE_ATTR(temp2_label, 0444, cpu_temp_label, NULL, 2); -static SENSOR_DEVICE_ATTR(temp3_input, 0444, get_cpu_temp, NULL, 3); -static SENSOR_DEVICE_ATTR(temp3_label, 0444, cpu_temp_label, NULL, 3); -static SENSOR_DEVICE_ATTR(temp4_input, 0444, get_cpu_temp, NULL, 4); -static SENSOR_DEVICE_ATTR(temp4_label, 0444, cpu_temp_label, NULL, 4); - -static const struct attribute *hwmon_cputemp[4][3] = { - { - &sensor_dev_attr_temp1_input.dev_attr.attr, - &sensor_dev_attr_temp1_label.dev_attr.attr, - NULL - }, - { - &sensor_dev_attr_temp2_input.dev_attr.attr, - &sensor_dev_attr_temp2_label.dev_attr.attr, - NULL - }, - { - &sensor_dev_attr_temp3_input.dev_attr.attr, - &sensor_dev_attr_temp3_label.dev_attr.attr, - NULL - }, - { - &sensor_dev_attr_temp4_input.dev_attr.attr, - &sensor_dev_attr_temp4_label.dev_attr.attr, - NULL - } -}; - static ssize_t cpu_temp_label(struct device *dev, struct device_attribute *attr, char *buf) { @@ -121,23 +72,46 @@ static ssize_t get_cpu_temp(struct device *dev, return sprintf(buf, "%d\n", value); } -static int create_sysfs_cputemp_files(struct kobject *kobj) +static SENSOR_DEVICE_ATTR(temp1_input, 0444, get_cpu_temp, NULL, 1); +static SENSOR_DEVICE_ATTR(temp1_label, 0444, cpu_temp_label, NULL, 1); +static SENSOR_DEVICE_ATTR(temp2_input, 0444, get_cpu_temp, NULL, 2); +static SENSOR_DEVICE_ATTR(temp2_label, 0444, cpu_temp_label, NULL, 2); +static SENSOR_DEVICE_ATTR(temp3_input, 0444, get_cpu_temp, NULL, 3); +static SENSOR_DEVICE_ATTR(temp3_label, 0444, cpu_temp_label, NULL, 3); +static SENSOR_DEVICE_ATTR(temp4_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp4_label, 0444, cpu_temp_label, NULL, 4); + +static struct attribute *cpu_hwmon_attributes[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_label.dev_attr.attr, + &sensor_dev_attr_temp2_input.dev_attr.attr, + &sensor_dev_attr_temp2_label.dev_attr.attr, + &sensor_dev_attr_temp3_input.dev_attr.attr, + &sensor_dev_attr_temp3_label.dev_attr.attr, + &sensor_dev_attr_temp4_input.dev_attr.attr, + &sensor_dev_attr_temp4_label.dev_attr.attr, + NULL +}; + +static umode_t cpu_hwmon_is_visible(struct kobject *kobj, + struct attribute *attr, int i) { - int i, ret = 0; + int id = i / 2; - for (i = 0; i < nr_packages; i++) - ret = sysfs_create_files(kobj, hwmon_cputemp[i]); - - return ret; + if (id < nr_packages) + return attr->mode; + return 0; } -static void remove_sysfs_cputemp_files(struct kobject *kobj) -{ - int i; +static struct attribute_group cpu_hwmon_group = { + .attrs = cpu_hwmon_attributes, + .is_visible = cpu_hwmon_is_visible, +}; - for (i = 0; i < nr_packages; i++) - sysfs_remove_files(kobj, hwmon_cputemp[i]); -} +static const struct attribute_group *cpu_hwmon_groups[] = { + &cpu_hwmon_group, + NULL +}; #define CPU_THERMAL_THRESHOLD 90000 static struct delayed_work thermal_work; @@ -159,50 +133,31 @@ static void do_thermal_timer(struct work_struct *work) static int __init loongson_hwmon_init(void) { - int ret; - pr_info("Loongson Hwmon Enter...\n"); if (cpu_has_csr()) csr_temp_enable = csr_readl(LOONGSON_CSR_FEATURES) & LOONGSON_CSRF_TEMP; - cpu_hwmon_dev = hwmon_device_register_with_info(NULL, "cpu_hwmon", NULL, NULL, NULL); - if (IS_ERR(cpu_hwmon_dev)) { - ret = PTR_ERR(cpu_hwmon_dev); - pr_err("hwmon_device_register fail!\n"); - goto fail_hwmon_device_register; - } - nr_packages = loongson_sysconf.nr_cpus / loongson_sysconf.cores_per_package; - ret = create_sysfs_cputemp_files(&cpu_hwmon_dev->kobj); - if (ret) { - pr_err("fail to create cpu temperature interface!\n"); - goto fail_create_sysfs_cputemp_files; + cpu_hwmon_dev = hwmon_device_register_with_groups(NULL, "cpu_hwmon", + NULL, cpu_hwmon_groups); + if (IS_ERR(cpu_hwmon_dev)) { + pr_err("hwmon_device_register fail!\n"); + return PTR_ERR(cpu_hwmon_dev); } INIT_DEFERRABLE_WORK(&thermal_work, do_thermal_timer); schedule_delayed_work(&thermal_work, msecs_to_jiffies(20000)); - return ret; - -fail_create_sysfs_cputemp_files: - sysfs_remove_group(&cpu_hwmon_dev->kobj, - &cpu_hwmon_attribute_group); - hwmon_device_unregister(cpu_hwmon_dev); - -fail_hwmon_device_register: - return ret; + return 0; } static void __exit loongson_hwmon_exit(void) { cancel_delayed_work_sync(&thermal_work); - remove_sysfs_cputemp_files(&cpu_hwmon_dev->kobj); - sysfs_remove_group(&cpu_hwmon_dev->kobj, - &cpu_hwmon_attribute_group); hwmon_device_unregister(cpu_hwmon_dev); } diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c index 2db7113383fd..89d9fca02fe9 100644 --- a/drivers/platform/olpc/olpc-ec.c +++ b/drivers/platform/olpc/olpc-ec.c @@ -265,7 +265,7 @@ static ssize_t ec_dbgfs_cmd_write(struct file *file, const char __user *buf, int i, m; unsigned char ec_cmd[EC_MAX_CMD_ARGS]; unsigned int ec_cmd_int[EC_MAX_CMD_ARGS]; - char cmdbuf[64]; + char cmdbuf[64] = ""; int ec_cmd_bytes; mutex_lock(&ec_dbgfs_lock); diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 80983f9dfcd5..ebec49957ed0 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -93,6 +93,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = { {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */ {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */ {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */ + {KE_KEY, 0x27, {KEY_HELP} }, {KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */ {KE_IGNORE, 0x41, {KEY_MUTE} }, {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} }, @@ -106,7 +107,13 @@ static const struct key_entry acer_wmi_keymap[] __initconst = { {KE_IGNORE, 0x48, {KEY_VOLUMEUP} }, {KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} }, {KE_IGNORE, 0x4a, {KEY_VOLUMEDOWN} }, - {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} }, + /* + * 0x61 is KEY_SWITCHVIDEOMODE. Usually this is a duplicate input event + * with the "Video Bus" input device events. But sometimes it is not + * a dup. Map it to KEY_UNKNOWN instead of using KE_IGNORE so that + * udev/hwdb can override it on systems where it is not a dup. + */ + {KE_KEY, 0x61, {KEY_UNKNOWN} }, {KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} }, {KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} }, {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */ @@ -529,6 +536,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = { }, .driver_data = (void *)ACER_CAP_KBD_DOCK, }, + { + .callback = set_force_caps, + .ident = "Acer Aspire Switch V 10 SW5-017", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"), + }, + .driver_data = (void *)ACER_CAP_KBD_DOCK, + }, { .callback = set_force_caps, .ident = "Acer One 10 (S1003)", diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 39e1a6396e08..db369cf26111 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -1212,6 +1212,8 @@ static void asus_wmi_set_xusb2pr(struct asus_wmi *asus) pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, cpu_to_le32(ports_available)); + pci_dev_put(xhci_pdev); + pr_info("set USB_INTEL_XUSB2PR old: 0x%04x, new: 0x%04x\n", orig_ports_available, ports_available); } diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index e94e59283ecb..6642d09b17b5 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -62,6 +62,8 @@ enum hp_wmi_event_ids { HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D, HPWMI_PEAKSHIFT_PERIOD = 0x0F, HPWMI_BATTERY_CHARGE_PERIOD = 0x10, + HPWMI_SANITIZATION_MODE = 0x17, + HPWMI_SMART_EXPERIENCE_APP = 0x21, }; struct bios_args { @@ -629,6 +631,10 @@ static void hp_wmi_notify(u32 value, void *context) break; case HPWMI_BATTERY_CHARGE_PERIOD: break; + case HPWMI_SANITIZATION_MODE: + break; + case HPWMI_SMART_EXPERIENCE_APP: + break; default: pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data); break; @@ -897,8 +903,16 @@ static int __init hp_wmi_bios_setup(struct platform_device *device) wwan_rfkill = NULL; rfkill2_count = 0; - if (hp_wmi_rfkill_setup(device)) - hp_wmi_rfkill2_setup(device); + /* + * In pre-2009 BIOS, command 1Bh return 0x4 to indicate that + * BIOS no longer controls the power for the wireless + * devices. All features supported by this command will no + * longer be supported. + */ + if (!hp_wmi_bios_2009_later()) { + if (hp_wmi_rfkill_setup(device)) + hp_wmi_rfkill2_setup(device); + } thermal_policy_setup(device); diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index 8a0cd5bf0065..cebddefba2f4 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -93,6 +93,13 @@ static const struct dmi_system_id button_array_table[] = { DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"), }, }, + { + .ident = "Microsoft Surface Go 3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"), + }, + }, { } }; diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c index 15ca8afdd973..ddfba38c2104 100644 --- a/drivers/platform/x86/intel_pmc_core_pltdrv.c +++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c @@ -18,6 +18,8 @@ #include #include +#include + static void intel_pmc_core_release(struct device *dev) { kfree(dev); @@ -53,6 +55,13 @@ static int __init pmc_core_platform_init(void) if (acpi_dev_present("INT33A1", NULL, -1)) return -ENODEV; + /* + * Skip forcefully attaching the device for VMs. Make an exception for + * Xen dom0, which does have full hardware access. + */ + if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR) && !xen_initial_domain()) + return -ENODEV; + if (!x86_match_cpu(intel_pmc_core_platform_ids)) return -ENODEV; diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index 24ffc8e2d2d1..0e804b6c2d24 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c @@ -596,11 +596,10 @@ static const struct dmi_system_id msi_dmi_table[] __initconst = { { .ident = "MSI S270", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD"), + DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT"), DMI_MATCH(DMI_PRODUCT_NAME, "MS-1013"), DMI_MATCH(DMI_PRODUCT_VERSION, "0131"), - DMI_MATCH(DMI_CHASSIS_VENDOR, - "MICRO-STAR INT'L CO.,LTD") + DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT") }, .driver_data = &quirk_old_ec_model, .callback = dmi_check_cb @@ -633,8 +632,7 @@ static const struct dmi_system_id msi_dmi_table[] __initconst = { DMI_MATCH(DMI_SYS_VENDOR, "NOTEBOOK"), DMI_MATCH(DMI_PRODUCT_NAME, "SAM2000"), DMI_MATCH(DMI_PRODUCT_VERSION, "0131"), - DMI_MATCH(DMI_CHASSIS_VENDOR, - "MICRO-STAR INT'L CO.,LTD") + DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT") }, .driver_data = &quirk_old_ec_model, .callback = dmi_check_cb @@ -1048,8 +1046,7 @@ static int __init msi_init(void) return -EINVAL; /* Register backlight stuff */ - - if (quirks->old_ec_model || + if (quirks->old_ec_model && acpi_video_get_backlight_type() == acpi_backlight_vendor) { struct backlight_properties props; memset(&props, 0, sizeof(struct backlight_properties)); @@ -1117,6 +1114,8 @@ fail_create_attr: fail_create_group: if (quirks->load_scm_model) { i8042_remove_filter(msi_laptop_i8042_filter); + cancel_delayed_work_sync(&msi_touchpad_dwork); + input_unregister_device(msi_laptop_input_dev); cancel_delayed_work_sync(&msi_rfkill_dwork); cancel_work_sync(&msi_rfkill_work); rfkill_cleanup(); @@ -1137,6 +1136,7 @@ static void __exit msi_cleanup(void) { if (quirks->load_scm_model) { i8042_remove_filter(msi_laptop_i8042_filter); + cancel_delayed_work_sync(&msi_touchpad_dwork); input_unregister_device(msi_laptop_input_dev); cancel_delayed_work_sync(&msi_rfkill_dwork); cancel_work_sync(&msi_rfkill_work); diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c index a9d2a4b98e57..4b0739f95f8b 100644 --- a/drivers/platform/x86/pmc_atom.c +++ b/drivers/platform/x86/pmc_atom.c @@ -244,7 +244,7 @@ static void pmc_power_off(void) pm1_cnt_port = acpi_base_addr + PM1_CNT; pm1_cnt_value = inl(pm1_cnt_port); - pm1_cnt_value &= SLEEP_TYPE_MASK; + pm1_cnt_value &= ~SLEEP_TYPE_MASK; pm1_cnt_value |= SLEEP_TYPE_S5; pm1_cnt_value |= SLEEP_ENABLE; diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index d5cec6e35bb8..0e456c39a603 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev, if (value > samsung->kbd_led.max_brightness) value = samsung->kbd_led.max_brightness; - else if (value < 0) - value = 0; samsung->kbd_led_wk = value; queue_work(samsung->led_workqueue, &samsung->kbd_led_work); diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index ab6a9369649d..110ff1e6ef81 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -756,6 +756,22 @@ static const struct ts_dmi_data predia_basic_data = { .properties = predia_basic_props, }; +static const struct property_entry rca_cambio_w101_v2_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 4), + PROPERTY_ENTRY_U32("touchscreen-min-y", 20), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1644), + PROPERTY_ENTRY_U32("touchscreen-size-y", 874), + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rca-cambio-w101-v2.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct ts_dmi_data rca_cambio_w101_v2_data = { + .acpi_name = "MSSL1680:00", + .properties = rca_cambio_w101_v2_props, +}; + static const struct property_entry rwc_nanote_p8_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-y", 46), PROPERTY_ENTRY_U32("touchscreen-size-x", 1728), @@ -1341,6 +1357,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"), }, }, + { + /* RCA Cambio W101 v2 */ + /* https://github.com/onitake/gsl-firmware/discussions/193 */ + .driver_data = (void *)&rca_cambio_w101_v2_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "RCA"), + DMI_MATCH(DMI_PRODUCT_NAME, "W101SA23T1"), + }, + }, { /* RWC NANOTE P8 */ .driver_data = (void *)&rwc_nanote_p8_data, diff --git a/drivers/power/reset/arm-versatile-reboot.c b/drivers/power/reset/arm-versatile-reboot.c index 08d0a07b58ef..c7624d7611a7 100644 --- a/drivers/power/reset/arm-versatile-reboot.c +++ b/drivers/power/reset/arm-versatile-reboot.c @@ -146,6 +146,7 @@ static int __init versatile_reboot_probe(void) versatile_reboot_type = (enum versatile_reboot)reboot_id->data; syscon_regmap = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(syscon_regmap)) return PTR_ERR(syscon_regmap); diff --git a/drivers/power/supply/adp5061.c b/drivers/power/supply/adp5061.c index 003557043ab3..daee1161c305 100644 --- a/drivers/power/supply/adp5061.c +++ b/drivers/power/supply/adp5061.c @@ -427,11 +427,11 @@ static int adp5061_get_chg_type(struct adp5061_state *st, if (ret < 0) return ret; - chg_type = adp5061_chg_type[ADP5061_CHG_STATUS_1_CHG_STATUS(status1)]; - if (chg_type > ADP5061_CHG_FAST_CV) + chg_type = ADP5061_CHG_STATUS_1_CHG_STATUS(status1); + if (chg_type >= ARRAY_SIZE(adp5061_chg_type)) val->intval = POWER_SUPPLY_STATUS_UNKNOWN; else - val->intval = chg_type; + val->intval = adp5061_chg_type[chg_type]; return ret; } diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c index e84b6e4da14a..9fda98b950ba 100644 --- a/drivers/power/supply/axp20x_battery.c +++ b/drivers/power/supply/axp20x_battery.c @@ -185,7 +185,6 @@ static int axp20x_battery_get_prop(struct power_supply *psy, union power_supply_propval *val) { struct axp20x_batt_ps *axp20x_batt = power_supply_get_drvdata(psy); - struct iio_channel *chan; int ret = 0, reg, val1; switch (psp) { @@ -265,12 +264,12 @@ static int axp20x_battery_get_prop(struct power_supply *psy, if (ret) return ret; - if (reg & AXP20X_PWR_STATUS_BAT_CHARGING) - chan = axp20x_batt->batt_chrg_i; - else - chan = axp20x_batt->batt_dischrg_i; - - ret = iio_read_channel_processed(chan, &val->intval); + if (reg & AXP20X_PWR_STATUS_BAT_CHARGING) { + ret = iio_read_channel_processed(axp20x_batt->batt_chrg_i, &val->intval); + } else { + ret = iio_read_channel_processed(axp20x_batt->batt_dischrg_i, &val1); + val->intval = -val1; + } if (ret) return ret; diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index a4df1ea92386..f65bf7b295c5 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -41,11 +41,11 @@ #define VBUS_ISPOUT_CUR_LIM_1500MA 0x1 /* 1500mA */ #define VBUS_ISPOUT_CUR_LIM_2000MA 0x2 /* 2000mA */ #define VBUS_ISPOUT_CUR_NO_LIM 0x3 /* 2500mA */ -#define VBUS_ISPOUT_VHOLD_SET_MASK 0x31 +#define VBUS_ISPOUT_VHOLD_SET_MASK 0x38 #define VBUS_ISPOUT_VHOLD_SET_BIT_POS 0x3 #define VBUS_ISPOUT_VHOLD_SET_OFFSET 4000 /* 4000mV */ #define VBUS_ISPOUT_VHOLD_SET_LSB_RES 100 /* 100mV */ -#define VBUS_ISPOUT_VHOLD_SET_4300MV 0x3 /* 4300mV */ +#define VBUS_ISPOUT_VHOLD_SET_4400MV 0x4 /* 4400mV */ #define VBUS_ISPOUT_VBUS_PATH_DIS BIT(7) #define CHRG_CCCV_CC_MASK 0xf /* 4 bits */ @@ -744,6 +744,16 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info) ret = axp288_charger_vbus_path_select(info, true); if (ret < 0) return ret; + } else { + /* Set Vhold to the factory default / recommended 4.4V */ + val = VBUS_ISPOUT_VHOLD_SET_4400MV << VBUS_ISPOUT_VHOLD_SET_BIT_POS; + ret = regmap_update_bits(info->regmap, AXP20X_VBUS_IPSOUT_MGMT, + VBUS_ISPOUT_VHOLD_SET_MASK, val); + if (ret < 0) { + dev_err(&info->pdev->dev, "register(%x) write error(%d)\n", + AXP20X_VBUS_IPSOUT_MGMT, ret); + return ret; + } } /* Read current charge voltage and current limit */ diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 70d6d52bc1e2..285420c1eb7c 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -938,6 +938,9 @@ static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value, y = value & 0x1f; value = (1 << y) * (4 + f) * rp->time_unit / 4; } else { + if (value < rp->time_unit) + return 0; + do_div(value, rp->time_unit); y = ilog2(value); f = div64_u64(4 * (value - (1 << y)), 1 << y); @@ -979,7 +982,6 @@ static const struct rapl_defaults rapl_defaults_spr_server = { .check_unit = rapl_check_unit_core, .set_floor_freq = set_floor_freq_default, .compute_time_window = rapl_compute_time_window_core, - .dram_domain_energy_unit = 15300, .psys_domain_energy_unit = 1000000000, }; diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index be076a91e20e..8cd59e848163 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -13,7 +13,7 @@ static ssize_t clock_name_show(struct device *dev, struct device_attribute *attr, char *page) { struct ptp_clock *ptp = dev_get_drvdata(dev); - return snprintf(page, PAGE_SIZE-1, "%s\n", ptp->info->name); + return sysfs_emit(page, "%s\n", ptp->info->name); } static DEVICE_ATTR_RO(clock_name); @@ -227,7 +227,7 @@ static ssize_t ptp_pin_show(struct device *dev, struct device_attribute *attr, mutex_unlock(&ptp->pincfg_mux); - return snprintf(page, PAGE_SIZE, "%u %u\n", func, chan); + return sysfs_emit(page, "%u %u\n", func, chan); } static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c index bf3f14fb5f24..05e4120fd702 100644 --- a/drivers/pwm/pwm-lp3943.c +++ b/drivers/pwm/pwm-lp3943.c @@ -125,6 +125,7 @@ static int lp3943_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, if (err) return err; + duty_ns = min(duty_ns, period_ns); val = (u8)(duty_ns * LP3943_MAX_DUTY / period_ns); return lp3943_write_byte(lp3943, reg_duty, val); diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c index 9b15b6a79082..f32a9e0692ad 100644 --- a/drivers/pwm/pwm-lpc18xx-sct.c +++ b/drivers/pwm/pwm-lpc18xx-sct.c @@ -325,7 +325,6 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev) { struct lpc18xx_pwm_chip *lpc18xx_pwm; struct pwm_device *pwm; - struct resource *res; int ret, i; u64 val; @@ -336,8 +335,7 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev) lpc18xx_pwm->dev = &pdev->dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - lpc18xx_pwm->base = devm_ioremap_resource(&pdev->dev, res); + lpc18xx_pwm->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(lpc18xx_pwm->base)) return PTR_ERR(lpc18xx_pwm->base); diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c index 2485fbaaead2..9cc0612f0849 100644 --- a/drivers/pwm/pwm-sifive.c +++ b/drivers/pwm/pwm-sifive.c @@ -23,7 +23,7 @@ #define PWM_SIFIVE_PWMCFG 0x0 #define PWM_SIFIVE_PWMCOUNT 0x8 #define PWM_SIFIVE_PWMS 0x10 -#define PWM_SIFIVE_PWMCMP0 0x20 +#define PWM_SIFIVE_PWMCMP(i) (0x20 + 4 * (i)) /* PWMCFG fields */ #define PWM_SIFIVE_PWMCFG_SCALE GENMASK(3, 0) @@ -36,8 +36,6 @@ #define PWM_SIFIVE_PWMCFG_GANG BIT(24) #define PWM_SIFIVE_PWMCFG_IP BIT(28) -/* PWM_SIFIVE_SIZE_PWMCMP is used to calculate offset for pwmcmpX registers */ -#define PWM_SIFIVE_SIZE_PWMCMP 4 #define PWM_SIFIVE_CMPWIDTH 16 #define PWM_SIFIVE_DEFAULT_PERIOD 10000000 @@ -112,8 +110,7 @@ static void pwm_sifive_get_state(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip); u32 duty, val; - duty = readl(ddata->regs + PWM_SIFIVE_PWMCMP0 + - pwm->hwpwm * PWM_SIFIVE_SIZE_PWMCMP); + duty = readl(ddata->regs + PWM_SIFIVE_PWMCMP(pwm->hwpwm)); state->enabled = duty > 0; @@ -194,8 +191,7 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm, pwm_sifive_update_clock(ddata, clk_get_rate(ddata->clk)); } - writel(frac, ddata->regs + PWM_SIFIVE_PWMCMP0 + - pwm->hwpwm * PWM_SIFIVE_SIZE_PWMCMP); + writel(frac, ddata->regs + PWM_SIFIVE_PWMCMP(pwm->hwpwm)); if (state->enabled != enabled) pwm_sifive_enable(chip, state->enabled); @@ -234,6 +230,8 @@ static int pwm_sifive_probe(struct platform_device *pdev) struct pwm_chip *chip; struct resource *res; int ret; + u32 val; + unsigned int enabled_pwms = 0, enabled_clks = 1; ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) @@ -264,6 +262,33 @@ static int pwm_sifive_probe(struct platform_device *pdev) return ret; } + val = readl(ddata->regs + PWM_SIFIVE_PWMCFG); + if (val & PWM_SIFIVE_PWMCFG_EN_ALWAYS) { + unsigned int i; + + for (i = 0; i < chip->npwm; ++i) { + val = readl(ddata->regs + PWM_SIFIVE_PWMCMP(i)); + if (val > 0) + ++enabled_pwms; + } + } + + /* The clk should be on once for each running PWM. */ + if (enabled_pwms) { + while (enabled_clks < enabled_pwms) { + /* This is not expected to fail as the clk is already on */ + ret = clk_enable(ddata->clk); + if (unlikely(ret)) { + dev_err_probe(dev, ret, "Failed to enable clk\n"); + goto disable_clk; + } + ++enabled_clks; + } + } else { + clk_disable(ddata->clk); + enabled_clks = 0; + } + /* Watch for changes to underlying clock frequency */ ddata->notifier.notifier_call = pwm_sifive_clock_notifier; ret = clk_notifier_register(ddata->clk, &ddata->notifier); @@ -286,7 +311,11 @@ static int pwm_sifive_probe(struct platform_device *pdev) unregister_clk: clk_notifier_unregister(ddata->clk, &ddata->notifier); disable_clk: - clk_disable_unprepare(ddata->clk); + while (enabled_clks) { + clk_disable(ddata->clk); + --enabled_clks; + } + clk_unprepare(ddata->clk); return ret; } @@ -294,25 +323,21 @@ disable_clk: static int pwm_sifive_remove(struct platform_device *dev) { struct pwm_sifive_ddata *ddata = platform_get_drvdata(dev); - bool is_enabled = false; struct pwm_device *pwm; - int ret, ch; + int ch; + + pwmchip_remove(&ddata->chip); + clk_notifier_unregister(ddata->clk, &ddata->notifier); for (ch = 0; ch < ddata->chip.npwm; ch++) { pwm = &ddata->chip.pwms[ch]; - if (pwm->state.enabled) { - is_enabled = true; - break; - } + if (pwm->state.enabled) + clk_disable(ddata->clk); } - if (is_enabled) - clk_disable(ddata->clk); - clk_disable_unprepare(ddata->clk); - ret = pwmchip_remove(&ddata->chip); - clk_notifier_unregister(ddata->clk, &ddata->notifier); + clk_unprepare(ddata->clk); - return ret; + return 0; } static const struct of_device_id pwm_sifive_of_match[] = { diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index fe51869b8693..0e2824fcac9c 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -2033,10 +2033,13 @@ struct regulator *_regulator_get(struct device *dev, const char *id, rdev->exclusive = 1; ret = _regulator_is_enabled(rdev); - if (ret > 0) + if (ret > 0) { rdev->use_count = 1; - else + regulator->enable_count = 1; + } else { rdev->use_count = 0; + regulator->enable_count = 0; + } } link = device_link_add(dev, &rdev->dev, DL_FLAG_STATELESS); @@ -2547,7 +2550,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev) * expired, return -ETIMEDOUT. */ if (rdev->desc->poll_enabled_time) { - unsigned int time_remaining = delay; + int time_remaining = delay; while (time_remaining > 0) { _regulator_enable_delay(rdev->desc->poll_enabled_time); @@ -2599,13 +2602,18 @@ static int _regulator_do_enable(struct regulator_dev *rdev) */ static int _regulator_handle_consumer_enable(struct regulator *regulator) { + int ret; struct regulator_dev *rdev = regulator->rdev; lockdep_assert_held_once(&rdev->mutex.base); regulator->enable_count++; - if (regulator->uA_load && regulator->enable_count == 1) - return drms_uA_update(rdev); + if (regulator->uA_load && regulator->enable_count == 1) { + ret = drms_uA_update(rdev); + if (ret) + regulator->enable_count--; + return ret; + } return 0; } @@ -4922,10 +4930,19 @@ static const struct attribute_group *regulator_dev_groups[] = { NULL }; +#ifdef CONFIG_DEBUG_FS +static void rdev_deinit_debugfs(struct regulator_dev *rdev); +#else +static inline void rdev_deinit_debugfs(struct regulator_dev *rdev) +{ +} +#endif + static void regulator_dev_release(struct device *dev) { struct regulator_dev *rdev = dev_get_drvdata(dev); + rdev_deinit_debugfs(rdev); kfree(rdev->constraints); of_node_put(rdev->dev.of_node); kfree(rdev); @@ -5283,10 +5300,6 @@ static void rdev_init_debugfs(struct regulator_dev *rdev) } #else -static inline void rdev_deinit_debugfs(struct regulator_dev *rdev) -{ -} - static inline void rdev_init_debugfs(struct regulator_dev *rdev) { } @@ -5733,11 +5746,15 @@ wash: mutex_lock(®ulator_list_mutex); regulator_ena_gpio_free(rdev); mutex_unlock(®ulator_list_mutex); + put_device(&rdev->dev); + rdev = NULL; clean: if (dangling_of_gpiod) gpiod_put(config->ena_gpiod); + if (rdev && rdev->dev.of_node) + of_node_put(rdev->dev.of_node); + kfree(rdev); kfree(config); - put_device(&rdev->dev); rinse: if (dangling_cfg_gpiod) gpiod_put(cfg->ena_gpiod); @@ -5766,7 +5783,6 @@ void regulator_unregister(struct regulator_dev *rdev) mutex_lock(®ulator_list_mutex); - rdev_deinit_debugfs(rdev); WARN_ON(rdev->open_count); regulator_remove_coupling(rdev); unset_regulator_supplies(rdev); diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 564f928eb1db..b11a434ec9fd 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -206,8 +206,12 @@ static int of_get_regulation_constraints(struct device *dev, } suspend_np = of_get_child_by_name(np, regulator_states[i]); - if (!suspend_np || !suspend_state) + if (!suspend_np) continue; + if (!suspend_state) { + of_node_put(suspend_np); + continue; + } if (!of_property_read_u32(suspend_np, "regulator-mode", &pval)) { diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index 01a12cfcea7c..44a8e500fb30 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c @@ -531,6 +531,7 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip) parent = of_get_child_by_name(np, "regulators"); if (!parent) { dev_err(dev, "regulators node not found\n"); + of_node_put(np); return -EINVAL; } @@ -560,6 +561,7 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip) } of_node_put(parent); + of_node_put(np); if (ret < 0) { dev_err(dev, "Error parsing regulator init data: %d\n", ret); @@ -789,7 +791,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client, ((pfuze_chip->chip_id == PFUZE3000) ? "3000" : "3001")))); memcpy(pfuze_chip->regulator_descs, pfuze_chip->pfuze_regulators, - sizeof(pfuze_chip->regulator_descs)); + regulator_num * sizeof(struct pfuze_regulator)); ret = pfuze_parse_regulators_dt(pfuze_chip); if (ret) diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c index 7f9d66ac37ff..3c41b71a1f52 100644 --- a/drivers/regulator/qcom_rpm-regulator.c +++ b/drivers/regulator/qcom_rpm-regulator.c @@ -802,6 +802,12 @@ static const struct rpm_regulator_data rpm_pm8018_regulators[] = { }; static const struct rpm_regulator_data rpm_pm8058_regulators[] = { + { "s0", QCOM_RPM_PM8058_SMPS0, &pm8058_smps, "vdd_s0" }, + { "s1", QCOM_RPM_PM8058_SMPS1, &pm8058_smps, "vdd_s1" }, + { "s2", QCOM_RPM_PM8058_SMPS2, &pm8058_smps, "vdd_s2" }, + { "s3", QCOM_RPM_PM8058_SMPS3, &pm8058_smps, "vdd_s3" }, + { "s4", QCOM_RPM_PM8058_SMPS4, &pm8058_smps, "vdd_s4" }, + { "l0", QCOM_RPM_PM8058_LDO0, &pm8058_nldo, "vdd_l0_l1_lvs" }, { "l1", QCOM_RPM_PM8058_LDO1, &pm8058_nldo, "vdd_l0_l1_lvs" }, { "l2", QCOM_RPM_PM8058_LDO2, &pm8058_pldo, "vdd_l2_l11_l12" }, @@ -829,12 +835,6 @@ static const struct rpm_regulator_data rpm_pm8058_regulators[] = { { "l24", QCOM_RPM_PM8058_LDO24, &pm8058_nldo, "vdd_l23_l24_l25" }, { "l25", QCOM_RPM_PM8058_LDO25, &pm8058_nldo, "vdd_l23_l24_l25" }, - { "s0", QCOM_RPM_PM8058_SMPS0, &pm8058_smps, "vdd_s0" }, - { "s1", QCOM_RPM_PM8058_SMPS1, &pm8058_smps, "vdd_s1" }, - { "s2", QCOM_RPM_PM8058_SMPS2, &pm8058_smps, "vdd_s2" }, - { "s3", QCOM_RPM_PM8058_SMPS3, &pm8058_smps, "vdd_s3" }, - { "s4", QCOM_RPM_PM8058_SMPS4, &pm8058_smps, "vdd_s4" }, - { "lvs0", QCOM_RPM_PM8058_LVS0, &pm8058_switch, "vdd_l0_l1_lvs" }, { "lvs1", QCOM_RPM_PM8058_LVS1, &pm8058_switch, "vdd_l0_l1_lvs" }, @@ -843,6 +843,12 @@ static const struct rpm_regulator_data rpm_pm8058_regulators[] = { }; static const struct rpm_regulator_data rpm_pm8901_regulators[] = { + { "s0", QCOM_RPM_PM8901_SMPS0, &pm8901_ftsmps, "vdd_s0" }, + { "s1", QCOM_RPM_PM8901_SMPS1, &pm8901_ftsmps, "vdd_s1" }, + { "s2", QCOM_RPM_PM8901_SMPS2, &pm8901_ftsmps, "vdd_s2" }, + { "s3", QCOM_RPM_PM8901_SMPS3, &pm8901_ftsmps, "vdd_s3" }, + { "s4", QCOM_RPM_PM8901_SMPS4, &pm8901_ftsmps, "vdd_s4" }, + { "l0", QCOM_RPM_PM8901_LDO0, &pm8901_nldo, "vdd_l0" }, { "l1", QCOM_RPM_PM8901_LDO1, &pm8901_pldo, "vdd_l1" }, { "l2", QCOM_RPM_PM8901_LDO2, &pm8901_pldo, "vdd_l2" }, @@ -851,12 +857,6 @@ static const struct rpm_regulator_data rpm_pm8901_regulators[] = { { "l5", QCOM_RPM_PM8901_LDO5, &pm8901_pldo, "vdd_l5" }, { "l6", QCOM_RPM_PM8901_LDO6, &pm8901_pldo, "vdd_l6" }, - { "s0", QCOM_RPM_PM8901_SMPS0, &pm8901_ftsmps, "vdd_s0" }, - { "s1", QCOM_RPM_PM8901_SMPS1, &pm8901_ftsmps, "vdd_s1" }, - { "s2", QCOM_RPM_PM8901_SMPS2, &pm8901_ftsmps, "vdd_s2" }, - { "s3", QCOM_RPM_PM8901_SMPS3, &pm8901_ftsmps, "vdd_s3" }, - { "s4", QCOM_RPM_PM8901_SMPS4, &pm8901_ftsmps, "vdd_s4" }, - { "lvs0", QCOM_RPM_PM8901_LVS0, &pm8901_switch, "lvs0_in" }, { "lvs1", QCOM_RPM_PM8901_LVS1, &pm8901_switch, "lvs1_in" }, { "lvs2", QCOM_RPM_PM8901_LVS2, &pm8901_switch, "lvs2_in" }, diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index 8d784a2a09d8..0295d7b160e5 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -313,10 +313,10 @@ static const struct regulator_desc pm8941_switch = { static const struct regulator_desc pm8916_pldo = { .linear_ranges = (struct linear_range[]) { - REGULATOR_LINEAR_RANGE(750000, 0, 208, 12500), + REGULATOR_LINEAR_RANGE(1750000, 0, 127, 12500), }, .n_linear_ranges = 1, - .n_voltages = 209, + .n_voltages = 128, .ops = &rpm_smps_ldo_ops, }; @@ -844,32 +844,31 @@ static const struct rpm_regulator_data rpm_pm8950_regulators[] = { { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8950_hfsmps, "vdd_s2" }, { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8950_hfsmps, "vdd_s3" }, { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8950_hfsmps, "vdd_s4" }, - { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8950_ftsmps2p5, "vdd_s5" }, + /* S5 is managed via SPMI. */ { "s6", QCOM_SMD_RPM_SMPA, 6, &pm8950_hfsmps, "vdd_s6" }, { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8950_ult_nldo, "vdd_l1_l19" }, { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8950_ult_nldo, "vdd_l2_l23" }, { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8950_ult_nldo, "vdd_l3" }, - { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" }, - { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8950_pldo_lv, "vdd_l4_l5_l6_l7_l16" }, - { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8950_pldo_lv, "vdd_l4_l5_l6_l7_l16" }, - { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8950_pldo_lv, "vdd_l4_l5_l6_l7_l16" }, + /* L4 seems not to exist. */ + { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8950_pldo_lv, "vdd_l5_l6_l7_l16" }, + { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8950_pldo_lv, "vdd_l5_l6_l7_l16" }, + { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8950_pldo_lv, "vdd_l5_l6_l7_l16" }, { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" }, { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" }, { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8950_ult_nldo, "vdd_l9_l10_l13_l14_l15_l18"}, - { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22"}, - { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22"}, - { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"}, - { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"}, - { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"}, - { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16"}, - { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22"}, - { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"}, - { "l19", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l1_l19"}, - { "l20", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l20"}, - { "l21", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l21"}, - { "l22", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l8_l11_l12_l17_l22"}, - { "l23", QCOM_SMD_RPM_LDOA, 18, &pm8950_pldo, "vdd_l2_l23"}, + { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" }, + { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" }, + { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" }, + { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" }, + { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" }, + { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8950_ult_pldo, "vdd_l5_l6_l7_l16" }, + { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" }, + /* L18 seems not to exist. */ + { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8950_pldo, "vdd_l1_l19" }, + /* L20 & L21 seem not to exist. */ + { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8950_pldo, "vdd_l8_l11_l12_l17_l22" }, + { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8950_pldo, "vdd_l2_l23" }, {} }; diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c index 14d846bbf0bd..c48f7e134465 100644 --- a/drivers/regulator/scmi-regulator.c +++ b/drivers/regulator/scmi-regulator.c @@ -350,7 +350,7 @@ static int scmi_regulator_probe(struct scmi_device *sdev) if (ret == -ENOMEM) return ret; } - + of_node_put(np); /* * Register a regulator for each valid regulator-DT-entry that we * can successfully reach via SCMI and has a valid associated voltage diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c index 75a941fb3c2b..1b2eee95ad3f 100644 --- a/drivers/regulator/slg51000-regulator.c +++ b/drivers/regulator/slg51000-regulator.c @@ -457,6 +457,8 @@ static int slg51000_i2c_probe(struct i2c_client *client) chip->cs_gpiod = cs_gpiod; } + usleep_range(10000, 11000); + i2c_set_clientdata(client, chip); chip->chip_irq = client->irq; chip->dev = dev; diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c index 430265c404d6..f3856750944f 100644 --- a/drivers/regulator/twl6030-regulator.c +++ b/drivers/regulator/twl6030-regulator.c @@ -67,6 +67,7 @@ struct twlreg_info { #define TWL6030_CFG_STATE_SLEEP 0x03 #define TWL6030_CFG_STATE_GRP_SHIFT 5 #define TWL6030_CFG_STATE_APP_SHIFT 2 +#define TWL6030_CFG_STATE_MASK 0x03 #define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT) #define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\ TWL6030_CFG_STATE_APP_SHIFT) @@ -128,13 +129,14 @@ static int twl6030reg_is_enabled(struct regulator_dev *rdev) if (grp < 0) return grp; grp &= P1_GRP_6030; + val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE); + val = TWL6030_CFG_STATE_APP(val); } else { + val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE); + val &= TWL6030_CFG_STATE_MASK; grp = 1; } - val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE); - val = TWL6030_CFG_STATE_APP(val); - return grp && (val == TWL6030_CFG_STATE_ON); } @@ -187,7 +189,12 @@ static int twl6030reg_get_status(struct regulator_dev *rdev) val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE); - switch (TWL6030_CFG_STATE_APP(val)) { + if (info->features & TWL6032_SUBCLASS) + val &= TWL6030_CFG_STATE_MASK; + else + val = TWL6030_CFG_STATE_APP(val); + + switch (val) { case TWL6030_CFG_STATE_ON: return REGULATOR_STATUS_NORMAL; @@ -530,6 +537,7 @@ static const struct twlreg_info TWL6030_INFO_##label = { \ #define TWL6032_ADJUSTABLE_LDO(label, offset) \ static const struct twlreg_info TWL6032_INFO_##label = { \ .base = offset, \ + .features = TWL6032_SUBCLASS, \ .desc = { \ .name = #label, \ .id = TWL6032_REG_##label, \ @@ -562,6 +570,7 @@ static const struct twlreg_info TWLFIXED_INFO_##label = { \ #define TWL6032_ADJUSTABLE_SMPS(label, offset) \ static const struct twlreg_info TWLSMPS_INFO_##label = { \ .base = offset, \ + .features = TWL6032_SUBCLASS, \ .desc = { \ .name = #label, \ .id = TWL6032_REG_##label, \ diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c index cadea0344486..40befdd9dfa9 100644 --- a/drivers/regulator/wm8994-regulator.c +++ b/drivers/regulator/wm8994-regulator.c @@ -71,6 +71,35 @@ static const struct regulator_ops wm8994_ldo2_ops = { }; static const struct regulator_desc wm8994_ldo_desc[] = { + { + .name = "LDO1", + .id = 1, + .type = REGULATOR_VOLTAGE, + .n_voltages = WM8994_LDO1_MAX_SELECTOR + 1, + .vsel_reg = WM8994_LDO_1, + .vsel_mask = WM8994_LDO1_VSEL_MASK, + .ops = &wm8994_ldo1_ops, + .min_uV = 2400000, + .uV_step = 100000, + .enable_time = 3000, + .off_on_delay = 36000, + .owner = THIS_MODULE, + }, + { + .name = "LDO2", + .id = 2, + .type = REGULATOR_VOLTAGE, + .n_voltages = WM8994_LDO2_MAX_SELECTOR + 1, + .vsel_reg = WM8994_LDO_2, + .vsel_mask = WM8994_LDO2_VSEL_MASK, + .ops = &wm8994_ldo2_ops, + .enable_time = 3000, + .off_on_delay = 36000, + .owner = THIS_MODULE, + }, +}; + +static const struct regulator_desc wm8958_ldo_desc[] = { { .name = "LDO1", .id = 1, @@ -172,9 +201,16 @@ static int wm8994_ldo_probe(struct platform_device *pdev) * regulator core and we need not worry about it on the * error path. */ - ldo->regulator = devm_regulator_register(&pdev->dev, - &wm8994_ldo_desc[id], - &config); + if (ldo->wm8994->type == WM8994) { + ldo->regulator = devm_regulator_register(&pdev->dev, + &wm8994_ldo_desc[id], + &config); + } else { + ldo->regulator = devm_regulator_register(&pdev->dev, + &wm8958_ldo_desc[id], + &config); + } + if (IS_ERR(ldo->regulator)) { ret = PTR_ERR(ldo->regulator); dev_err(wm8994->dev, "Failed to register LDO%d: %d\n", diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c index b37b111b15b3..a26221a6f6c2 100644 --- a/drivers/remoteproc/qcom_sysmon.c +++ b/drivers/remoteproc/qcom_sysmon.c @@ -41,6 +41,7 @@ struct qcom_sysmon { struct completion comp; struct completion ind_comp; struct completion shutdown_comp; + struct completion ssctl_comp; struct mutex lock; bool ssr_ack; @@ -422,6 +423,8 @@ static int ssctl_new_server(struct qmi_handle *qmi, struct qmi_service *svc) svc->priv = sysmon; + complete(&sysmon->ssctl_comp); + return 0; } @@ -478,6 +481,7 @@ static int sysmon_start(struct rproc_subdev *subdev) .ssr_event = SSCTL_SSR_EVENT_AFTER_POWERUP }; + reinit_completion(&sysmon->ssctl_comp); mutex_lock(&sysmon->state_lock); sysmon->state = SSCTL_SSR_EVENT_AFTER_POWERUP; blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event); @@ -520,6 +524,11 @@ static void sysmon_stop(struct rproc_subdev *subdev, bool crashed) if (crashed) return; + if (sysmon->ssctl_instance) { + if (!wait_for_completion_timeout(&sysmon->ssctl_comp, HZ / 2)) + dev_err(sysmon->dev, "timeout waiting for ssctl service\n"); + } + if (sysmon->ssctl_version) ssctl_request_shutdown(sysmon); else if (sysmon->ept) @@ -606,6 +615,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc, init_completion(&sysmon->comp); init_completion(&sysmon->ind_comp); init_completion(&sysmon->shutdown_comp); + init_completion(&sysmon->ssctl_comp); mutex_init(&sysmon->lock); mutex_init(&sysmon->state_lock); diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index 4e2527fcae3c..b745d4e681fd 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c @@ -415,6 +415,7 @@ static int wcnss_request_irq(struct qcom_wcnss *wcnss, irq_handler_t thread_fn) { int ret; + int irq_number; ret = platform_get_irq_byname(pdev, name); if (ret < 0 && optional) { @@ -425,14 +426,19 @@ static int wcnss_request_irq(struct qcom_wcnss *wcnss, return ret; } + irq_number = ret; + ret = devm_request_threaded_irq(&pdev->dev, ret, NULL, thread_fn, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "wcnss", wcnss); - if (ret) + if (ret) { dev_err(&pdev->dev, "request %s IRQ failed\n", name); + return ret; + } - return ret; + /* Return the IRQ number if the IRQ was successfully acquired */ + return irq_number; } static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss) diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 83cd040a2993..45cc99d42765 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -2000,7 +2000,7 @@ int rproc_set_firmware(struct rproc *rproc, const char *fw_name) goto out; } - kfree(rproc->firmware); + kfree_const(rproc->firmware); rproc->firmware = p; out: diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c index 7db908986814..0233dce3015a 100644 --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c @@ -1283,6 +1283,7 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev) if (!cpdev) { ret = -ENODEV; dev_err(dev, "could not get R5 core platform device\n"); + of_node_put(child); goto fail; } @@ -1291,6 +1292,7 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev) dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n", ret); put_device(&cpdev->dev); + of_node_put(child); goto fail; } diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c index 185a333df66c..d2408725eb2c 100644 --- a/drivers/reset/reset-imx7.c +++ b/drivers/reset/reset-imx7.c @@ -329,6 +329,7 @@ static int imx8mp_reset_set(struct reset_controller_dev *rcdev, break; case IMX8MP_RESET_PCIE_CTRL_APPS_EN: + case IMX8MP_RESET_PCIEPHY_PERST: value = assert ? 0 : bit; break; } diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c index 24d3395964cc..4c5bba52b105 100644 --- a/drivers/reset/tegra/reset-bpmp.c +++ b/drivers/reset/tegra/reset-bpmp.c @@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc, struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc); struct mrq_reset_request request; struct tegra_bpmp_message msg; + int err; memset(&request, 0, sizeof(request)); request.cmd = command; @@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc, msg.tx.data = &request; msg.tx.size = sizeof(request); - return tegra_bpmp_transfer(bpmp, &msg); + err = tegra_bpmp_transfer(bpmp, &msg); + if (err) + return err; + if (msg.rx.ret) + return -EINVAL; + + return 0; } static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc, diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c index 96a17ec29140..2d8cb596ad69 100644 --- a/drivers/rpmsg/mtk_rpmsg.c +++ b/drivers/rpmsg/mtk_rpmsg.c @@ -234,7 +234,9 @@ static void mtk_register_device_work_function(struct work_struct *register_work) if (info->registered) continue; + mutex_unlock(&subdev->channels_lock); ret = mtk_rpmsg_register_device(subdev, &info->info); + mutex_lock(&subdev->channels_lock); if (ret) { dev_err(&pdev->dev, "Can't create rpmsg_device\n"); continue; diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 4840886532ff..7cbed0310c09 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -1472,7 +1472,7 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid) cancel_work_sync(&channel->intent_work); if (channel->rpdev) { - strncpy(chinfo.name, channel->name, sizeof(chinfo.name)); + strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name)); chinfo.src = RPMSG_ADDR_ANY; chinfo.dst = RPMSG_ADDR_ANY; diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index 19903de6268d..b5167ef93abf 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -1073,7 +1073,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel) /* Assign public information to the rpmsg_device */ rpdev = &qsdev->rpdev; - strncpy(rpdev->id.name, channel->name, RPMSG_NAME_SIZE); + strscpy_pad(rpdev->id.name, channel->name, RPMSG_NAME_SIZE); rpdev->src = RPMSG_ADDR_ANY; rpdev->dst = RPMSG_ADDR_ANY; @@ -1304,7 +1304,7 @@ static void qcom_channel_state_worker(struct work_struct *work) spin_unlock_irqrestore(&edge->channels_lock, flags); - strncpy(chinfo.name, channel->name, sizeof(chinfo.name)); + strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name)); chinfo.src = RPMSG_ADDR_ANY; chinfo.dst = RPMSG_ADDR_ANY; rpmsg_unregister_device(&edge->dev, &chinfo); @@ -1364,6 +1364,7 @@ static int qcom_smd_parse_edge(struct device *dev, } edge->ipc_regmap = syscon_node_to_regmap(syscon_np); + of_node_put(syscon_np); if (IS_ERR(edge->ipc_regmap)) { ret = PTR_ERR(edge->ipc_regmap); goto put_node; @@ -1388,9 +1389,9 @@ static int qcom_smd_parse_edge(struct device *dev, edge->name = node->name; irq = irq_of_parse_and_map(node, 0); - if (irq < 0) { + if (!irq) { dev_err(dev, "required smd interrupt missing\n"); - ret = irq; + ret = -EINVAL; goto put_node; } diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 7c88d190c51f..625effe6cb65 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -26,6 +26,15 @@ struct class *rtc_class; static void rtc_device_release(struct device *dev) { struct rtc_device *rtc = to_rtc_device(dev); + struct timerqueue_head *head = &rtc->timerqueue; + struct timerqueue_node *node; + + mutex_lock(&rtc->ops_lock); + while ((node = timerqueue_getnext(head))) + timerqueue_del(head, node); + mutex_unlock(&rtc->ops_lock); + + cancel_work_sync(&rtc->irqwork); ida_simple_remove(&rtc_ida, rtc->id); kfree(rtc); diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 58c6382a2807..d4f6c4dd42c4 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -222,6 +222,8 @@ static inline void cmos_write_bank2(unsigned char val, unsigned char addr) static int cmos_read_time(struct device *dev, struct rtc_time *t) { + int ret; + /* * If pm_trace abused the RTC for storage, set the timespec to 0, * which tells the caller that this RTC value is unusable. @@ -229,29 +231,64 @@ static int cmos_read_time(struct device *dev, struct rtc_time *t) if (!pm_trace_rtc_valid()) return -EIO; - /* REVISIT: if the clock has a "century" register, use - * that instead of the heuristic in mc146818_get_time(). - * That'll make Y3K compatility (year > 2070) easy! - */ - mc146818_get_time(t); + ret = mc146818_get_time(t); + if (ret < 0) { + dev_err_ratelimited(dev, "unable to read current time\n"); + return ret; + } + return 0; } static int cmos_set_time(struct device *dev, struct rtc_time *t) { - /* REVISIT: set the "century" register if available - * - * NOTE: this ignores the issue whereby updating the seconds + /* NOTE: this ignores the issue whereby updating the seconds * takes effect exactly 500ms after we write the register. * (Also queueing and other delays before we get this far.) */ return mc146818_set_time(t); } +struct cmos_read_alarm_callback_param { + struct cmos_rtc *cmos; + struct rtc_time *time; + unsigned char rtc_control; +}; + +static void cmos_read_alarm_callback(unsigned char __always_unused seconds, + void *param_in) +{ + struct cmos_read_alarm_callback_param *p = + (struct cmos_read_alarm_callback_param *)param_in; + struct rtc_time *time = p->time; + + time->tm_sec = CMOS_READ(RTC_SECONDS_ALARM); + time->tm_min = CMOS_READ(RTC_MINUTES_ALARM); + time->tm_hour = CMOS_READ(RTC_HOURS_ALARM); + + if (p->cmos->day_alrm) { + /* ignore upper bits on readback per ACPI spec */ + time->tm_mday = CMOS_READ(p->cmos->day_alrm) & 0x3f; + if (!time->tm_mday) + time->tm_mday = -1; + + if (p->cmos->mon_alrm) { + time->tm_mon = CMOS_READ(p->cmos->mon_alrm); + if (!time->tm_mon) + time->tm_mon = -1; + } + } + + p->rtc_control = CMOS_READ(RTC_CONTROL); +} + static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t) { struct cmos_rtc *cmos = dev_get_drvdata(dev); - unsigned char rtc_control; + struct cmos_read_alarm_callback_param p = { + .cmos = cmos, + .time = &t->time, + }; /* This not only a rtc_op, but also called directly */ if (!is_valid_irq(cmos->irq)) @@ -262,28 +299,18 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t) * the future. */ - spin_lock_irq(&rtc_lock); - t->time.tm_sec = CMOS_READ(RTC_SECONDS_ALARM); - t->time.tm_min = CMOS_READ(RTC_MINUTES_ALARM); - t->time.tm_hour = CMOS_READ(RTC_HOURS_ALARM); + /* Some Intel chipsets disconnect the alarm registers when the clock + * update is in progress - during this time reads return bogus values + * and writes may fail silently. See for example "7th Generation Intel® + * Processor Family I/O for U/Y Platforms [...] Datasheet", section + * 27.7.1 + * + * Use the mc146818_avoid_UIP() function to avoid this. + */ + if (!mc146818_avoid_UIP(cmos_read_alarm_callback, &p)) + return -EIO; - if (cmos->day_alrm) { - /* ignore upper bits on readback per ACPI spec */ - t->time.tm_mday = CMOS_READ(cmos->day_alrm) & 0x3f; - if (!t->time.tm_mday) - t->time.tm_mday = -1; - - if (cmos->mon_alrm) { - t->time.tm_mon = CMOS_READ(cmos->mon_alrm); - if (!t->time.tm_mon) - t->time.tm_mon = -1; - } - } - - rtc_control = CMOS_READ(RTC_CONTROL); - spin_unlock_irq(&rtc_lock); - - if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + if (!(p.rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { if (((unsigned)t->time.tm_sec) < 0x60) t->time.tm_sec = bcd2bin(t->time.tm_sec); else @@ -312,7 +339,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t) } } - t->enabled = !!(rtc_control & RTC_AIE); + t->enabled = !!(p.rtc_control & RTC_AIE); t->pending = 0; return 0; @@ -443,10 +470,57 @@ static int cmos_validate_alarm(struct device *dev, struct rtc_wkalrm *t) return 0; } +struct cmos_set_alarm_callback_param { + struct cmos_rtc *cmos; + unsigned char mon, mday, hrs, min, sec; + struct rtc_wkalrm *t; +}; + +/* Note: this function may be executed by mc146818_avoid_UIP() more then + * once + */ +static void cmos_set_alarm_callback(unsigned char __always_unused seconds, + void *param_in) +{ + struct cmos_set_alarm_callback_param *p = + (struct cmos_set_alarm_callback_param *)param_in; + + /* next rtc irq must not be from previous alarm setting */ + cmos_irq_disable(p->cmos, RTC_AIE); + + /* update alarm */ + CMOS_WRITE(p->hrs, RTC_HOURS_ALARM); + CMOS_WRITE(p->min, RTC_MINUTES_ALARM); + CMOS_WRITE(p->sec, RTC_SECONDS_ALARM); + + /* the system may support an "enhanced" alarm */ + if (p->cmos->day_alrm) { + CMOS_WRITE(p->mday, p->cmos->day_alrm); + if (p->cmos->mon_alrm) + CMOS_WRITE(p->mon, p->cmos->mon_alrm); + } + + if (use_hpet_alarm()) { + /* + * FIXME the HPET alarm glue currently ignores day_alrm + * and mon_alrm ... + */ + hpet_set_alarm_time(p->t->time.tm_hour, p->t->time.tm_min, + p->t->time.tm_sec); + } + + if (p->t->enabled) + cmos_irq_enable(p->cmos, RTC_AIE); +} + static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) { struct cmos_rtc *cmos = dev_get_drvdata(dev); - unsigned char mon, mday, hrs, min, sec, rtc_control; + struct cmos_set_alarm_callback_param p = { + .cmos = cmos, + .t = t + }; + unsigned char rtc_control; int ret; /* This not only a rtc_op, but also called directly */ @@ -457,11 +531,11 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) if (ret < 0) return ret; - mon = t->time.tm_mon + 1; - mday = t->time.tm_mday; - hrs = t->time.tm_hour; - min = t->time.tm_min; - sec = t->time.tm_sec; + p.mon = t->time.tm_mon + 1; + p.mday = t->time.tm_mday; + p.hrs = t->time.tm_hour; + p.min = t->time.tm_min; + p.sec = t->time.tm_sec; spin_lock_irq(&rtc_lock); rtc_control = CMOS_READ(RTC_CONTROL); @@ -469,43 +543,21 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { /* Writing 0xff means "don't care" or "match all". */ - mon = (mon <= 12) ? bin2bcd(mon) : 0xff; - mday = (mday >= 1 && mday <= 31) ? bin2bcd(mday) : 0xff; - hrs = (hrs < 24) ? bin2bcd(hrs) : 0xff; - min = (min < 60) ? bin2bcd(min) : 0xff; - sec = (sec < 60) ? bin2bcd(sec) : 0xff; + p.mon = (p.mon <= 12) ? bin2bcd(p.mon) : 0xff; + p.mday = (p.mday >= 1 && p.mday <= 31) ? bin2bcd(p.mday) : 0xff; + p.hrs = (p.hrs < 24) ? bin2bcd(p.hrs) : 0xff; + p.min = (p.min < 60) ? bin2bcd(p.min) : 0xff; + p.sec = (p.sec < 60) ? bin2bcd(p.sec) : 0xff; } - spin_lock_irq(&rtc_lock); - - /* next rtc irq must not be from previous alarm setting */ - cmos_irq_disable(cmos, RTC_AIE); - - /* update alarm */ - CMOS_WRITE(hrs, RTC_HOURS_ALARM); - CMOS_WRITE(min, RTC_MINUTES_ALARM); - CMOS_WRITE(sec, RTC_SECONDS_ALARM); - - /* the system may support an "enhanced" alarm */ - if (cmos->day_alrm) { - CMOS_WRITE(mday, cmos->day_alrm); - if (cmos->mon_alrm) - CMOS_WRITE(mon, cmos->mon_alrm); - } - - if (use_hpet_alarm()) { - /* - * FIXME the HPET alarm glue currently ignores day_alrm - * and mon_alrm ... - */ - hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min, - t->time.tm_sec); - } - - if (t->enabled) - cmos_irq_enable(cmos, RTC_AIE); - - spin_unlock_irq(&rtc_lock); + /* + * Some Intel chipsets disconnect the alarm registers when the clock + * update is in progress - during this time writes fail silently. + * + * Use mc146818_avoid_UIP() to avoid this. + */ + if (!mc146818_avoid_UIP(cmos_set_alarm_callback, &p)) + return -EIO; cmos->alarm_expires = rtc_tm_to_time64(&t->time); @@ -652,11 +704,10 @@ static struct cmos_rtc cmos_rtc; static irqreturn_t cmos_interrupt(int irq, void *p) { - unsigned long flags; u8 irqstat; u8 rtc_control; - spin_lock_irqsave(&rtc_lock, flags); + spin_lock(&rtc_lock); /* When the HPET interrupt handler calls us, the interrupt * status is passed as arg1 instead of the irq number. But @@ -690,7 +741,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p) hpet_mask_rtc_irq_bit(RTC_AIE); CMOS_READ(RTC_INTR_FLAGS); } - spin_unlock_irqrestore(&rtc_lock, flags); + spin_unlock(&rtc_lock); if (is_intr(irqstat)) { rtc_update_irq(p, 1, irqstat); @@ -806,6 +857,12 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) rename_region(ports, dev_name(&cmos_rtc.rtc->dev)); + if (!mc146818_does_rtc_work()) { + dev_warn(dev, "broken or not accessible\n"); + retval = -ENXIO; + goto cleanup1; + } + spin_lock_irq(&rtc_lock); if (!(flags & CMOS_RTC_FLAGS_NOFREQ)) { @@ -1054,7 +1111,9 @@ static void cmos_check_wkalrm(struct device *dev) * ACK the rtc irq here */ if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) { + local_irq_disable(); cmos_interrupt(0, (void *)cmos->rtc); + local_irq_enable(); return; } diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index 5add637c9ad2..347655d24b5d 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -9,40 +9,143 @@ #endif /* - * Returns true if a clock update is in progress + * Execute a function while the UIP (Update-in-progress) bit of the RTC is + * unset. + * + * Warning: callback may be executed more then once. */ -static inline unsigned char mc146818_is_updating(void) +bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param), + void *param) { - unsigned char uip; + int i; + unsigned long flags; + unsigned char seconds; + + for (i = 0; i < 10; i++) { + spin_lock_irqsave(&rtc_lock, flags); + + /* + * Check whether there is an update in progress during which the + * readout is unspecified. The maximum update time is ~2ms. Poll + * every msec for completion. + * + * Store the second value before checking UIP so a long lasting + * NMI which happens to hit after the UIP check cannot make + * an update cycle invisible. + */ + seconds = CMOS_READ(RTC_SECONDS); + + if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) { + spin_unlock_irqrestore(&rtc_lock, flags); + mdelay(1); + continue; + } + + /* Revalidate the above readout */ + if (seconds != CMOS_READ(RTC_SECONDS)) { + spin_unlock_irqrestore(&rtc_lock, flags); + continue; + } + + if (callback) + callback(seconds, param); + + /* + * Check for the UIP bit again. If it is set now then + * the above values may contain garbage. + */ + if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) { + spin_unlock_irqrestore(&rtc_lock, flags); + mdelay(1); + continue; + } + + /* + * A NMI might have interrupted the above sequence so check + * whether the seconds value has changed which indicates that + * the NMI took longer than the UIP bit was set. Unlikely, but + * possible and there is also virt... + */ + if (seconds != CMOS_READ(RTC_SECONDS)) { + spin_unlock_irqrestore(&rtc_lock, flags); + continue; + } + spin_unlock_irqrestore(&rtc_lock, flags); + + return true; + } + return false; +} +EXPORT_SYMBOL_GPL(mc146818_avoid_UIP); + +/* + * If the UIP (Update-in-progress) bit of the RTC is set for more then + * 10ms, the RTC is apparently broken or not present. + */ +bool mc146818_does_rtc_work(void) +{ + int i; + unsigned char val; unsigned long flags; - spin_lock_irqsave(&rtc_lock, flags); - uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); - spin_unlock_irqrestore(&rtc_lock, flags); - return uip; -} + for (i = 0; i < 10; i++) { + spin_lock_irqsave(&rtc_lock, flags); + val = CMOS_READ(RTC_FREQ_SELECT); + spin_unlock_irqrestore(&rtc_lock, flags); -unsigned int mc146818_get_time(struct rtc_time *time) + if ((val & RTC_UIP) == 0) + return true; + + mdelay(1); + } + + return false; +} +EXPORT_SYMBOL_GPL(mc146818_does_rtc_work); + +int mc146818_get_time(struct rtc_time *time) { unsigned char ctrl; unsigned long flags; + unsigned int iter_count = 0; unsigned char century = 0; + bool retry; #ifdef CONFIG_MACH_DECSTATION unsigned int real_year; #endif +again: + if (iter_count > 10) { + memset(time, 0, sizeof(*time)); + return -EIO; + } + iter_count++; + + spin_lock_irqsave(&rtc_lock, flags); + /* - * read RTC once any update in progress is done. The update - * can take just over 2ms. We wait 20ms. There is no need to - * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. - * If you need to know *exactly* when a second has started, enable - * periodic update complete interrupts, (via ioctl) and then - * immediately read /dev/rtc which will block until you get the IRQ. - * Once the read clears, read the RTC time (again via ioctl). Easy. + * Check whether there is an update in progress during which the + * readout is unspecified. The maximum update time is ~2ms. Poll + * every msec for completion. + * + * Store the second value before checking UIP so a long lasting NMI + * which happens to hit after the UIP check cannot make an update + * cycle invisible. */ - if (mc146818_is_updating()) - mdelay(20); + time->tm_sec = CMOS_READ(RTC_SECONDS); + + if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) { + spin_unlock_irqrestore(&rtc_lock, flags); + mdelay(1); + goto again; + } + + /* Revalidate the above readout */ + if (time->tm_sec != CMOS_READ(RTC_SECONDS)) { + spin_unlock_irqrestore(&rtc_lock, flags); + goto again; + } /* * Only the values that we read from the RTC are set. We leave @@ -50,8 +153,6 @@ unsigned int mc146818_get_time(struct rtc_time *time) * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated * by the RTC when initially set to a non-zero value. */ - spin_lock_irqsave(&rtc_lock, flags); - time->tm_sec = CMOS_READ(RTC_SECONDS); time->tm_min = CMOS_READ(RTC_MINUTES); time->tm_hour = CMOS_READ(RTC_HOURS); time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH); @@ -66,8 +167,24 @@ unsigned int mc146818_get_time(struct rtc_time *time) century = CMOS_READ(acpi_gbl_FADT.century); #endif ctrl = CMOS_READ(RTC_CONTROL); + /* + * Check for the UIP bit again. If it is set now then + * the above values may contain garbage. + */ + retry = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP; + /* + * A NMI might have interrupted the above sequence so check whether + * the seconds value has changed which indicates that the NMI took + * longer than the UIP bit was set. Unlikely, but possible and + * there is also virt... + */ + retry |= time->tm_sec != CMOS_READ(RTC_SECONDS); + spin_unlock_irqrestore(&rtc_lock, flags); + if (retry) + goto again; + if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { time->tm_sec = bcd2bin(time->tm_sec); @@ -95,10 +212,21 @@ unsigned int mc146818_get_time(struct rtc_time *time) time->tm_mon--; - return RTC_24H; + return 0; } EXPORT_SYMBOL_GPL(mc146818_get_time); +/* AMD systems don't allow access to AltCentury with DV1 */ +static bool apply_amd_register_a_behavior(void) +{ +#ifdef CONFIG_X86 + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + return true; +#endif + return false; +} + /* Set the current date and time in the real time clock. */ int mc146818_set_time(struct rtc_time *time) { @@ -121,7 +249,6 @@ int mc146818_set_time(struct rtc_time *time) if (yrs > 255) /* They are unsigned */ return -EINVAL; - spin_lock_irqsave(&rtc_lock, flags); #ifdef CONFIG_MACH_DECSTATION real_yrs = yrs; leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) || @@ -150,16 +277,16 @@ int mc146818_set_time(struct rtc_time *time) /* These limits and adjustments are independent of * whether the chip is in binary mode or not. */ - if (yrs > 169) { - spin_unlock_irqrestore(&rtc_lock, flags); + if (yrs > 169) return -EINVAL; - } if (yrs >= 100) yrs -= 100; - if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) - || RTC_ALWAYS_BCD) { + spin_lock_irqsave(&rtc_lock, flags); + save_control = CMOS_READ(RTC_CONTROL); + spin_unlock_irqrestore(&rtc_lock, flags); + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { sec = bin2bcd(sec); min = bin2bcd(min); hrs = bin2bcd(hrs); @@ -169,10 +296,14 @@ int mc146818_set_time(struct rtc_time *time) century = bin2bcd(century); } + spin_lock_irqsave(&rtc_lock, flags); save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + if (apply_amd_register_a_behavior()) + CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT); + else + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); #ifdef CONFIG_MACH_DECSTATION CMOS_WRITE(real_yrs, RTC_DEC_YEAR); diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c index 1894aded4c85..acfcb378767d 100644 --- a/drivers/rtc/rtc-mt6397.c +++ b/drivers/rtc/rtc-mt6397.c @@ -269,6 +269,8 @@ static int mtk_rtc_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; rtc->addr_base = res->start; rtc->data = of_device_get_match_data(&pdev->dev); diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c index f0a6861ff3ae..715513311ece 100644 --- a/drivers/rtc/rtc-pcf2127.c +++ b/drivers/rtc/rtc-pcf2127.c @@ -366,7 +366,8 @@ static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127) static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pcf2127 *pcf2127 = dev_get_drvdata(dev); - unsigned int buf[5], ctrl2; + u8 buf[5]; + unsigned int ctrl2; int ret; ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2); diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index f2818cdd11d8..52b36b7c6129 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -138,7 +138,7 @@ struct sun6i_rtc_dev { const struct sun6i_rtc_clk_data *data; void __iomem *base; int irq; - unsigned long alarm; + time64_t alarm; struct clk_hw hw; struct clk_hw *int_osc; @@ -510,10 +510,8 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm) struct sun6i_rtc_dev *chip = dev_get_drvdata(dev); struct rtc_time *alrm_tm = &wkalrm->time; struct rtc_time tm_now; - unsigned long time_now = 0; - unsigned long time_set = 0; - unsigned long time_gap = 0; - int ret = 0; + time64_t time_now, time_set; + int ret; ret = sun6i_rtc_gettime(dev, &tm_now); if (ret < 0) { @@ -528,9 +526,7 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm) return -EINVAL; } - time_gap = time_set - time_now; - - if (time_gap > U32_MAX) { + if ((time_set - time_now) > U32_MAX) { dev_err(dev, "Date too far in the future\n"); return -EINVAL; } @@ -539,7 +535,7 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm) writel(0, chip->base + SUN6I_ALRM_COUNTER); usleep_range(100, 300); - writel(time_gap, chip->base + SUN6I_ALRM_COUNTER); + writel(time_set - time_now, chip->base + SUN6I_ALRM_COUNTER); chip->alarm = time_set; sun6i_rtc_setaie(wkalrm->enabled, chip); diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c index 2018614f258f..6eaa9321c074 100644 --- a/drivers/rtc/rtc-wm8350.c +++ b/drivers/rtc/rtc-wm8350.c @@ -432,14 +432,21 @@ static int wm8350_rtc_probe(struct platform_device *pdev) return ret; } - wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC, + ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350_rtc_update_handler, 0, "RTC Seconds", wm8350); + if (ret) + return ret; + wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC); - wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM, + ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM, wm8350_rtc_alarm_handler, 0, "RTC Alarm", wm8350); + if (ret) { + wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350); + return ret; + } return 0; } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 2adfab552e22..f4edfe383e9d 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1462,6 +1462,13 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) if (!cqr->lpm) cqr->lpm = dasd_path_get_opm(device); } + /* + * remember the amount of formatted tracks to prevent double format on + * ESE devices + */ + if (cqr->block) + cqr->trkcount = atomic_read(&cqr->block->trkcount); + if (cqr->cpmode == 1) { rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, (long) cqr, cqr->lpm); @@ -1680,6 +1687,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, unsigned long now; int nrf_suppressed = 0; int fp_suppressed = 0; + struct request *req; u8 *sense = NULL; int expires; @@ -1780,7 +1788,12 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, } if (dasd_ese_needs_format(cqr->block, irb)) { - if (rq_data_dir((struct request *)cqr->callback_data) == READ) { + req = dasd_get_callback_data(cqr); + if (!req) { + cqr->status = DASD_CQR_ERROR; + return; + } + if (rq_data_dir(req) == READ) { device->discipline->ese_read(cqr, irb); cqr->status = DASD_CQR_SUCCESS; cqr->stopclk = now; @@ -2799,8 +2812,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) * complete a request partially. */ if (proc_bytes) { - blk_update_request(req, BLK_STS_OK, - blk_rq_bytes(req) - proc_bytes); + blk_update_request(req, BLK_STS_OK, proc_bytes); blk_mq_requeue_request(req, true); } else if (likely(!blk_should_fake_timeout(req->q))) { blk_mq_complete_request(req); diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index dc78a523a69f..b6b938aa6615 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -675,12 +675,12 @@ int dasd_alias_remove_device(struct dasd_device *device) struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) { struct dasd_eckd_private *alias_priv, *private = base_device->private; - struct alias_pav_group *group = private->pavgroup; struct alias_lcu *lcu = private->lcu; struct dasd_device *alias_device; + struct alias_pav_group *group; unsigned long flags; - if (!group || !lcu) + if (!lcu) return NULL; if (lcu->pav == NO_PAV || lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING)) @@ -697,6 +697,11 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) } spin_lock_irqsave(&lcu->lock, flags); + group = private->pavgroup; + if (!group) { + spin_unlock_irqrestore(&lcu->lock, flags); + return NULL; + } alias_device = group->next; if (!alias_device) { if (list_empty(&group->aliaslist)) { diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index ad44d22e8859..53d22975a32f 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -3026,13 +3026,24 @@ static int dasd_eckd_format_device(struct dasd_device *base, } static bool test_and_set_format_track(struct dasd_format_entry *to_format, - struct dasd_block *block) + struct dasd_ccw_req *cqr) { + struct dasd_block *block = cqr->block; struct dasd_format_entry *format; unsigned long flags; bool rc = false; spin_lock_irqsave(&block->format_lock, flags); + if (cqr->trkcount != atomic_read(&block->trkcount)) { + /* + * The number of formatted tracks has changed after request + * start and we can not tell if the current track was involved. + * To avoid data corruption treat it as if the current track is + * involved + */ + rc = true; + goto out; + } list_for_each_entry(format, &block->format_list, list) { if (format->track == to_format->track) { rc = true; @@ -3052,6 +3063,7 @@ static void clear_format_track(struct dasd_format_entry *format, unsigned long flags; spin_lock_irqsave(&block->format_lock, flags); + atomic_inc(&block->trkcount); list_del_init(&format->list); spin_unlock_irqrestore(&block->format_lock, flags); } @@ -3088,7 +3100,7 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr, sector_t curr_trk; int rc; - req = cqr->callback_data; + req = dasd_get_callback_data(cqr); block = cqr->block; base = block->base; private = base->private; @@ -3113,8 +3125,11 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr, } format->track = curr_trk; /* test if track is already in formatting by another thread */ - if (test_and_set_format_track(format, block)) + if (test_and_set_format_track(format, cqr)) { + /* this is no real error so do not count down retries */ + cqr->retries++; return ERR_PTR(-EEXIST); + } fdata.start_unit = curr_trk; fdata.stop_unit = curr_trk; @@ -3213,12 +3228,11 @@ static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb) cqr->proc_bytes = blk_count * blksize; return 0; } - if (dst && !skip_block) { - dst += off; + if (dst && !skip_block) memset(dst, 0, blksize); - } else { + else skip_block--; - } + dst += blksize; blk_count++; } } @@ -4613,7 +4627,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, struct dasd_device *basedev; struct req_iterator iter; struct dasd_ccw_req *cqr; - unsigned int first_offs; unsigned int trkcount; unsigned long *idaws; unsigned int size; @@ -4647,7 +4660,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) / DASD_RAW_SECTORS_PER_TRACK; trkcount = last_trk - first_trk + 1; - first_offs = 0; if (rq_data_dir(req) == READ) cmd = DASD_ECKD_CCW_READ_TRACK; @@ -4691,13 +4703,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, if (use_prefix) { prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev, - startdev, 1, first_offs + 1, trkcount, 0, 0); + startdev, 1, 0, trkcount, 0, 0); } else { define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0); ccw[-1].flags |= CCW_FLAG_CC; data += sizeof(struct DE_eckd_data); - locate_record_ext(ccw++, data, first_trk, first_offs + 1, + locate_record_ext(ccw++, data, first_trk, 0, trkcount, cmd, basedev, 0, 0); } diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index fa552f9f1666..9d9685c25253 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -188,6 +188,7 @@ struct dasd_ccw_req { void (*callback)(struct dasd_ccw_req *, void *data); void *callback_data; unsigned int proc_bytes; /* bytes for partial completion */ + unsigned int trkcount; /* count formatted tracks */ }; /* @@ -575,6 +576,7 @@ struct dasd_block { struct list_head format_list; spinlock_t format_lock; + atomic_t trkcount; }; struct dasd_attention_data { @@ -723,6 +725,18 @@ dasd_check_blocksize(int bsize) return 0; } +/* + * return the callback data of the original request in case there are + * ERP requests build on top of it + */ +static inline void *dasd_get_callback_data(struct dasd_ccw_req *cqr) +{ + while (cqr->refers) + cqr = cqr->refers; + + return cqr->callback_data; +} + /* externals in dasd.c */ #define DASD_PROFILE_OFF 0 #define DASD_PROFILE_ON 1 diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h index c467589c7f45..c06d399b9b1f 100644 --- a/drivers/s390/char/keyboard.h +++ b/drivers/s390/char/keyboard.h @@ -56,7 +56,7 @@ static inline void kbd_put_queue(struct tty_port *port, int ch) { tty_insert_flip_char(port, ch, 0); - tty_schedule_flip(port); + tty_flip_buffer_push(port); } static inline void @@ -64,5 +64,5 @@ kbd_puts_queue(struct tty_port *port, char *cp) { while (*cp) tty_insert_flip_char(port, *cp++, 0); - tty_schedule_flip(port); + tty_flip_buffer_push(port); } diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 1515fdc3c1ab..3841c0e77df6 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -48,6 +48,7 @@ static struct dentry *zcore_reipl_file; static struct dentry *zcore_hsa_file; static struct ipl_parameter_block *zcore_ipl_block; +static DEFINE_MUTEX(hsa_buf_mutex); static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE); /* @@ -64,19 +65,24 @@ int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count) if (!hsa_available) return -ENODATA; + mutex_lock(&hsa_buf_mutex); while (count) { if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) { TRACE("sclp_sdias_copy() failed\n"); + mutex_unlock(&hsa_buf_mutex); return -EIO; } offset = src % PAGE_SIZE; bytes = min(PAGE_SIZE - offset, count); - if (copy_to_user(dest, hsa_buf + offset, bytes)) + if (copy_to_user(dest, hsa_buf + offset, bytes)) { + mutex_unlock(&hsa_buf_mutex); return -EFAULT; + } src += bytes; dest += bytes; count -= bytes; } + mutex_unlock(&hsa_buf_mutex); return 0; } @@ -94,9 +100,11 @@ int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count) if (!hsa_available) return -ENODATA; + mutex_lock(&hsa_buf_mutex); while (count) { if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) { TRACE("sclp_sdias_copy() failed\n"); + mutex_unlock(&hsa_buf_mutex); return -EIO; } offset = src % PAGE_SIZE; @@ -106,6 +114,7 @@ int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count) dest += bytes; count -= bytes; } + mutex_unlock(&hsa_buf_mutex); return 0; } diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 9b61e9b131ad..e3c1060b6056 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -288,19 +288,11 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process) if (work_pending(&sch->todo_work)) goto out_unlock; - if (cio_update_schib(sch)) { - vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); - rc = 0; - goto out_unlock; - } - - private = dev_get_drvdata(&sch->dev); - if (private->state == VFIO_CCW_STATE_NOT_OPER) { - private->state = private->mdev ? VFIO_CCW_STATE_IDLE : - VFIO_CCW_STATE_STANDBY; - } rc = 0; + if (cio_update_schib(sch)) + vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); + out_unlock: spin_unlock_irqrestore(sch->lock, flags); diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 85a1a4533cbe..20a6097e1b20 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c @@ -626,8 +626,6 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo) ctcm_clear_busy_do(dev); } - kfree(mpcginfo); - return; } @@ -1206,10 +1204,10 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) CTCM_FUNTAIL, dev->name); priv->stats.rx_dropped++; /* mpcginfo only used for non-data transfers */ - kfree(mpcginfo); if (do_debug_data) ctcmpc_dump_skb(pskb, -8); } + kfree(mpcginfo); } done: @@ -1991,7 +1989,6 @@ static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg) } break; } - kfree(mpcginfo); CTCM_PR_DEBUG("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n", __func__, ch->id, grp->outstanding_xid2, @@ -2052,7 +2049,6 @@ static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg) mpc_validate_xid(mpcginfo); break; } - kfree(mpcginfo); return; } diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c index ded1930a00b2..e3813a7aa5e6 100644 --- a/drivers/s390/net/ctcm_sysfs.c +++ b/drivers/s390/net/ctcm_sysfs.c @@ -39,11 +39,12 @@ static ssize_t ctcm_buffer_write(struct device *dev, struct ctcm_priv *priv = dev_get_drvdata(dev); int rc; - ndev = priv->channel[CTCM_READ]->netdev; - if (!(priv && priv->channel[CTCM_READ] && ndev)) { + if (!(priv && priv->channel[CTCM_READ] && + priv->channel[CTCM_READ]->netdev)) { CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev"); return -ENODEV; } + ndev = priv->channel[CTCM_READ]->netdev; rc = kstrtouint(buf, 0, &bs1); if (rc) diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 440219bcaa2b..06a322bdced6 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -1735,10 +1735,11 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd) lcs_schedule_recovery(card); break; case LCS_CMD_STOPLAN: - pr_warn("Stoplan for %s initiated by LGW\n", - card->dev->name); - if (card->dev) + if (card->dev) { + pr_warn("Stoplan for %s initiated by LGW\n", + card->dev->name); netif_carrier_off(card->dev); + } break; default: LCS_DBF_TEXT(5, trace, "noLGWcmd"); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 511bf8e0a436..b61acbb09be3 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -145,27 +145,33 @@ void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter, static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port) { + int ret = -EIO; + if (mutex_lock_interruptible(&wka_port->mutex)) return -ERESTARTSYS; if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE || wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) { wka_port->status = ZFCP_FC_WKA_PORT_OPENING; - if (zfcp_fsf_open_wka_port(wka_port)) + if (zfcp_fsf_open_wka_port(wka_port)) { + /* could not even send request, nothing to wait for */ wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; + goto out; + } } - mutex_unlock(&wka_port->mutex); - - wait_event(wka_port->completion_wq, + wait_event(wka_port->opened, wka_port->status == ZFCP_FC_WKA_PORT_ONLINE || wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE); if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) { atomic_inc(&wka_port->refcount); - return 0; + ret = 0; + goto out; } - return -EIO; +out: + mutex_unlock(&wka_port->mutex); + return ret; } static void zfcp_fc_wka_port_offline(struct work_struct *work) @@ -181,9 +187,12 @@ static void zfcp_fc_wka_port_offline(struct work_struct *work) wka_port->status = ZFCP_FC_WKA_PORT_CLOSING; if (zfcp_fsf_close_wka_port(wka_port)) { + /* could not even send request, nothing to wait for */ wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; - wake_up(&wka_port->completion_wq); + goto out; } + wait_event(wka_port->closed, + wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE); out: mutex_unlock(&wka_port->mutex); } @@ -193,13 +202,15 @@ static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port) if (atomic_dec_return(&wka_port->refcount) != 0) return; /* wait 10 milliseconds, other reqs might pop in */ - schedule_delayed_work(&wka_port->work, HZ / 100); + queue_delayed_work(wka_port->adapter->work_queue, &wka_port->work, + msecs_to_jiffies(10)); } static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id, struct zfcp_adapter *adapter) { - init_waitqueue_head(&wka_port->completion_wq); + init_waitqueue_head(&wka_port->opened); + init_waitqueue_head(&wka_port->closed); wka_port->adapter = adapter; wka_port->d_id = d_id; diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index 6902ae1f8e4f..25bebfaa8cbc 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -185,7 +185,8 @@ enum zfcp_fc_wka_status { /** * struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port * @adapter: Pointer to adapter structure this WKA port belongs to - * @completion_wq: Wait for completion of open/close command + * @opened: Wait for completion of open command + * @closed: Wait for completion of close command * @status: Current status of WKA port * @refcount: Reference count to keep port open as long as it is in use * @d_id: FC destination id or well-known-address @@ -195,7 +196,8 @@ enum zfcp_fc_wka_status { */ struct zfcp_fc_wka_port { struct zfcp_adapter *adapter; - wait_queue_head_t completion_wq; + wait_queue_head_t opened; + wait_queue_head_t closed; enum zfcp_fc_wka_status status; atomic_t refcount; u32 d_id; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 6cb963a06777..7bd5d6555d05 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -866,7 +866,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req); struct zfcp_adapter *adapter = req->adapter; struct zfcp_qdio *qdio = adapter->qdio; - int req_id = req->req_id; + unsigned long req_id = req->req_id; zfcp_reqlist_add(adapter->req_list, req); @@ -1889,7 +1889,7 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) wka_port->status = ZFCP_FC_WKA_PORT_ONLINE; } out: - wake_up(&wka_port->completion_wq); + wake_up(&wka_port->opened); } /** @@ -1948,7 +1948,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) } wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; - wake_up(&wka_port->completion_wq); + wake_up(&wka_port->closed); } /** diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 3337b1e80412..f6f92033132a 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -2014,7 +2014,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) retval = pci_enable_device(pdev); if (retval) { TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device"); - goto out_disable_device; + return -ENODEV; } pci_set_master(pdev); diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index d8e19afa7a14..c6607c4686bb 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -3367,13 +3367,11 @@ static int __init aha152x_setup(char *str) setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 1; setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT; setup[setup_count].ext_trans = ints[0] >= 8 ? ints[8] : 0; - if (ints[0] > 8) { /*}*/ + if (ints[0] > 8) printk(KERN_NOTICE "aha152x: usage: aha152x=[,[," "[,[,[,[,[,]]]]]]]\n"); - } else { + else setup_count++; - return 0; - } return 1; } diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index a13c203ef7a9..c4881657a807 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -182,6 +182,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, struct beiscsi_endpoint *beiscsi_ep; struct iscsi_endpoint *ep; uint16_t cri_index; + int rc = 0; ep = iscsi_lookup_endpoint(transport_fd); if (!ep) @@ -189,15 +190,17 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, beiscsi_ep = ep->dd_data; - if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) - return -EINVAL; + if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) { + rc = -EINVAL; + goto put_ep; + } if (beiscsi_ep->phba != phba) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n", beiscsi_ep->phba, phba); - - return -EEXIST; + rc = -EEXIST; + goto put_ep; } cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid); if (phba->conn_table[cri_index]) { @@ -209,7 +212,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, beiscsi_ep->ep_cid, beiscsi_conn, phba->conn_table[cri_index]); - return -EINVAL; + rc = -EINVAL; + goto put_ep; } } @@ -226,7 +230,10 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, "BS_%d : cid %d phba->conn_table[%u]=%p\n", beiscsi_ep->ep_cid, cri_index, beiscsi_conn); phba->conn_table[cri_index] = beiscsi_conn; - return 0; + +put_ep: + iscsi_put_endpoint(ep); + return rc; } static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 987dc8135a9b..b977e039bb78 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -5810,6 +5810,7 @@ struct iscsi_transport beiscsi_iscsi_transport = { .destroy_session = beiscsi_session_destroy, .create_conn = beiscsi_conn_create, .bind_conn = beiscsi_conn_bind, + .unbind_conn = iscsi_conn_unbind, .destroy_conn = iscsi_conn_teardown, .attr_is_visible = beiscsi_attr_is_visible, .set_iface_param = beiscsi_iface_set_param, diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 3486e402bfc1..98292025b347 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -1408,7 +1408,7 @@ static void bfa_fcs_lport_fdmi_rpa_response(void *fcsarg, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_lport_fdmi_timeout(void *arg); -static u16 bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, +static int bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld); static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld); @@ -1887,6 +1887,8 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) bfa_fcs_lport_fdmi_build_rhba_pyld(fdmi, (u8 *) ((struct ct_hdr_s *) pyld + 1)); + if (attr_len < 0) + return; bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, (len + attr_len), &fchs, @@ -1896,17 +1898,20 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT); } -static u16 +static int bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) { struct bfa_fcs_lport_s *port = fdmi->ms->port; - struct bfa_fcs_fdmi_hba_attr_s hba_attr; - struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr; + struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr; struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld; struct fdmi_attr_s *attr; + int len; u8 *curr_ptr; - u16 len, count; - u16 templen; + u16 templen, count; + + fcs_hba_attr = kzalloc(sizeof(*fcs_hba_attr), GFP_KERNEL); + if (!fcs_hba_attr) + return -ENOMEM; /* * get hba attributes @@ -2148,6 +2153,9 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) len += ((sizeof(attr->type) + sizeof(attr->len)) * count); rhba->hba_attr_blk.attr_count = cpu_to_be32(count); + + kfree(fcs_hba_attr); + return len; } diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index 5ae1e3f78910..e049cdb3c286 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -711,7 +711,7 @@ bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr, char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; bfa_get_adapter_serial_num(&bfad->bfa, serial_num); - return snprintf(buf, PAGE_SIZE, "%s\n", serial_num); + return sysfs_emit(buf, "%s\n", serial_num); } static ssize_t @@ -725,7 +725,7 @@ bfad_im_model_show(struct device *dev, struct device_attribute *attr, char model[BFA_ADAPTER_MODEL_NAME_LEN]; bfa_get_adapter_model(&bfad->bfa, model); - return snprintf(buf, PAGE_SIZE, "%s\n", model); + return sysfs_emit(buf, "%s\n", model); } static ssize_t @@ -805,7 +805,7 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr, snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Invalid Model"); - return snprintf(buf, PAGE_SIZE, "%s\n", model_descr); + return sysfs_emit(buf, "%s\n", model_descr); } static ssize_t @@ -819,7 +819,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr, u64 nwwn; nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port); - return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn)); + return sysfs_emit(buf, "0x%llx\n", cpu_to_be64(nwwn)); } static ssize_t @@ -836,7 +836,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr, bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); strlcpy(symname, port_attr.port_cfg.sym_name.symname, BFA_SYMNAME_MAXLEN); - return snprintf(buf, PAGE_SIZE, "%s\n", symname); + return sysfs_emit(buf, "%s\n", symname); } static ssize_t @@ -850,14 +850,14 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr, char hw_ver[BFA_VERSION_LEN]; bfa_get_pci_chip_rev(&bfad->bfa, hw_ver); - return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver); + return sysfs_emit(buf, "%s\n", hw_ver); } static ssize_t bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_VERSION); + return sysfs_emit(buf, "%s\n", BFAD_DRIVER_VERSION); } static ssize_t @@ -871,7 +871,7 @@ bfad_im_optionrom_version_show(struct device *dev, char optrom_ver[BFA_VERSION_LEN]; bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver); - return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver); + return sysfs_emit(buf, "%s\n", optrom_ver); } static ssize_t @@ -885,7 +885,7 @@ bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr, char fw_ver[BFA_VERSION_LEN]; bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); - return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver); + return sysfs_emit(buf, "%s\n", fw_ver); } static ssize_t @@ -897,7 +897,7 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr, (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; - return snprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", bfa_get_nports(&bfad->bfa)); } @@ -905,7 +905,7 @@ static ssize_t bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_NAME); + return sysfs_emit(buf, "%s\n", BFAD_DRIVER_NAME); } static ssize_t @@ -924,14 +924,14 @@ bfad_im_num_of_discovered_ports_show(struct device *dev, rports = kcalloc(nrports, sizeof(struct bfa_rport_qualifier_s), GFP_ATOMIC); if (rports == NULL) - return snprintf(buf, PAGE_SIZE, "Failed\n"); + return sysfs_emit(buf, "Failed\n"); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports); spin_unlock_irqrestore(&bfad->bfad_lock, flags); kfree(rports); - return snprintf(buf, PAGE_SIZE, "%d\n", nrports); + return sysfs_emit(buf, "%d\n", nrports); } static DEVICE_ATTR(serial_number, S_IRUGO, diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 21efc73b87be..649664dc6da4 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1422,17 +1422,23 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, * Forcefully terminate all in progress connection recovery at the * earliest, either in bind(), send_pdu(LOGIN), or conn_start() */ - if (bnx2i_adapter_ready(hba)) - return -EIO; + if (bnx2i_adapter_ready(hba)) { + ret_code = -EIO; + goto put_ep; + } bnx2i_ep = ep->dd_data; if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) || - (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) + (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) { /* Peer disconnect via' FIN or RST */ - return -EINVAL; + ret_code = -EINVAL; + goto put_ep; + } - if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) - return -EINVAL; + if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) { + ret_code = -EINVAL; + goto put_ep; + } if (bnx2i_ep->hba != hba) { /* Error - TCP connection does not belong to this device @@ -1443,7 +1449,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, "belong to hba (%s)\n", hba->netdev->name); - return -EEXIST; + ret_code = -EEXIST; + goto put_ep; } bnx2i_ep->conn = bnx2i_conn; bnx2i_conn->ep = bnx2i_ep; @@ -1460,6 +1467,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, bnx2i_put_rq_buf(bnx2i_conn, 0); bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); +put_ep: + iscsi_put_endpoint(ep); return ret_code; } @@ -2278,6 +2287,7 @@ struct iscsi_transport bnx2i_iscsi_transport = { .destroy_session = bnx2i_session_destroy, .create_conn = bnx2i_conn_create, .bind_conn = bnx2i_conn_bind, + .unbind_conn = iscsi_conn_unbind, .destroy_conn = bnx2i_conn_destroy, .attr_is_visible = bnx2i_attr_is_visible, .set_param = iscsi_set_param, diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 37d99357120f..edcd3fab6973 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -117,6 +117,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = { /* connection management */ .create_conn = cxgbi_create_conn, .bind_conn = cxgbi_bind_conn, + .unbind_conn = iscsi_conn_unbind, .destroy_conn = iscsi_tcp_conn_teardown, .start_conn = iscsi_conn_start, .stop_conn = iscsi_conn_stop, diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 2c3491528d42..efb3e2b3398e 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -134,6 +134,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = { /* connection management */ .create_conn = cxgbi_create_conn, .bind_conn = cxgbi_bind_conn, + .unbind_conn = iscsi_conn_unbind, .destroy_conn = iscsi_tcp_conn_teardown, .start_conn = iscsi_conn_start, .stop_conn = iscsi_conn_stop, diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index ecb134b4699f..506b561670af 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -2690,11 +2690,13 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, ppm->tformat.pgsz_idx_dflt); if (err < 0) - return err; + goto put_ep; err = iscsi_conn_bind(cls_session, cls_conn, is_leading); - if (err) - return -EINVAL; + if (err) { + err = -EINVAL; + goto put_ep; + } /* calculate the tag idx bits needed for this conn based on cmds_max */ cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; @@ -2715,7 +2717,9 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, /* init recv engine */ iscsi_tcp_hdr_recv_prep(tcp_conn); - return 0; +put_ep: + iscsi_put_endpoint(ep); + return err; } EXPORT_SYMBOL_GPL(cxgbi_bind_conn); diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 6cb48ae8e124..d8967bf72ef2 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -3631,10 +3631,19 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb, #endif if (dcb->target_lun != 0) { /* Copy settings */ - struct DeviceCtlBlk *p; - list_for_each_entry(p, &acb->dcb_list, list) - if (p->target_id == dcb->target_id) + struct DeviceCtlBlk *p = NULL, *iter; + + list_for_each_entry(iter, &acb->dcb_list, list) + if (iter->target_id == dcb->target_id) { + p = iter; break; + } + + if (!p) { + kfree(dcb); + return NULL; + } + dprintkdbg(DBG_1, "device_alloc: <%02i-%i> copy from <%02i-%i>\n", dcb->target_id, dcb->target_lun, diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 5ea426effa60..bbc5d6b9be73 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -1969,7 +1969,7 @@ EXPORT_SYMBOL(fcoe_ctlr_recv_flogi); * * Returns: u64 fc world wide name */ -u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], +u64 fcoe_wwn_from_mac(unsigned char mac[ETH_ALEN], unsigned int scheme, unsigned int port) { u64 wwn; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index a8998b016b86..cd41dc061d87 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -2372,17 +2372,25 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) return IRQ_WAKE_THREAD; } +static void hisi_sas_v3_free_vectors(void *data) +{ + struct pci_dev *pdev = data; + + pci_free_irq_vectors(pdev); +} + static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba) { int vectors; int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi; struct Scsi_Host *shost = hisi_hba->shost; + struct pci_dev *pdev = hisi_hba->pci_dev; struct irq_affinity desc = { .pre_vectors = BASE_VECTORS_V3_HW, }; min_msi = MIN_AFFINE_VECTORS_V3_HW; - vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev, + vectors = pci_alloc_irq_vectors_affinity(pdev, min_msi, max_msi, PCI_IRQ_MSI | PCI_IRQ_AFFINITY, @@ -2394,6 +2402,7 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba) hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW; shost->nr_hw_queues = hisi_hba->cq_nvecs; + devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev); return 0; } @@ -3313,7 +3322,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_err(dev, "%d hw queues\n", shost->nr_hw_queues); rc = scsi_add_host(shost, dev); if (rc) - goto err_out_free_irq_vectors; + goto err_out_debugfs; rc = sas_register_ha(sha); if (rc) @@ -3340,8 +3349,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_out_register_ha: scsi_remove_host(shost); -err_out_free_irq_vectors: - pci_free_irq_vectors(pdev); err_out_debugfs: hisi_sas_debugfs_exit(hisi_hba); err_out_ha: @@ -3369,7 +3376,6 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq); } - pci_free_irq_vectors(pdev); } static void hisi_sas_v3_remove(struct pci_dev *pdev) diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index f6d6539c657f..b793e342ab7c 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -635,8 +635,13 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost) memset(vhost->async_crq.msgs, 0, PAGE_SIZE); vhost->async_crq.cur = 0; - list_for_each_entry(tgt, &vhost->targets, queue) - ibmvfc_del_tgt(tgt); + list_for_each_entry(tgt, &vhost->targets, queue) { + if (vhost->client_migrated) + tgt->need_login = 1; + else + ibmvfc_del_tgt(tgt); + } + scsi_block_requests(vhost->host); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); vhost->job_step = ibmvfc_npiv_login; @@ -2822,9 +2827,12 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) /* We need to re-setup the interpartition connection */ dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n"); vhost->client_migrated = 1; + + scsi_block_requests(vhost->host); ibmvfc_purge_requests(vhost, DID_REQUEUE); - ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); + ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE); + wake_up(&vhost->work_wait_q); } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) { dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format); ibmvfc_purge_requests(vhost, DID_ERROR); diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index cc3908c2d2f9..a3431485def8 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -35,7 +35,7 @@ #define IBMVSCSIS_VERSION "v0.2" -#define INITIAL_SRP_LIMIT 800 +#define INITIAL_SRP_LIMIT 1024 #define DEFAULT_MAX_SECTORS 256 #define MAX_TXU 1024 * 1024 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index b0aa58d117cc..90e8a538b078 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -9792,7 +9792,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) GFP_KERNEL); if (!ioa_cfg->hrrq[i].host_rrq) { - while (--i > 0) + while (--i >= 0) dma_free_coherent(&pdev->dev, sizeof(u32) * ioa_cfg->hrrq[i].size, ioa_cfg->hrrq[i].host_rrq, @@ -10065,7 +10065,7 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg, ioa_cfg->vectors_info[i].desc, &ioa_cfg->hrrq[i]); if (rc) { - while (--i >= 0) + while (--i > 0) free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); return rc; diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index df47557a02a3..6485c1aa9e74 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -558,6 +558,8 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session, tcp_conn = conn->dd_data; tcp_sw_conn = tcp_conn->dd_data; + mutex_init(&tcp_sw_conn->sock_lock); + tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) goto free_conn; @@ -592,11 +594,15 @@ free_conn: static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn) { - struct iscsi_session *session = conn->session; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; struct socket *sock = tcp_sw_conn->sock; + /* + * The iscsi transport class will make sure we are not called in + * parallel with start, stop, bind and destroys. However, this can be + * called twice if userspace does a stop then a destroy. + */ if (!sock) return; @@ -604,9 +610,9 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn) iscsi_sw_tcp_conn_restore_callbacks(conn); sock_put(sock->sk); - spin_lock_bh(&session->frwd_lock); + mutex_lock(&tcp_sw_conn->sock_lock); tcp_sw_conn->sock = NULL; - spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&tcp_sw_conn->sock_lock); sockfd_put(sock); } @@ -658,7 +664,6 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, int is_leading) { - struct iscsi_session *session = cls_session->dd_data; struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; @@ -678,10 +683,10 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session, if (err) goto free_socket; - spin_lock_bh(&session->frwd_lock); + mutex_lock(&tcp_sw_conn->sock_lock); /* bind iSCSI connection and socket */ tcp_sw_conn->sock = sock; - spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&tcp_sw_conn->sock_lock); /* setup Socket parameters */ sk = sock->sk; @@ -717,8 +722,15 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn, break; case ISCSI_PARAM_DATADGST_EN: iscsi_set_param(cls_conn, param, buf, buflen); + + mutex_lock(&tcp_sw_conn->sock_lock); + if (!tcp_sw_conn->sock) { + mutex_unlock(&tcp_sw_conn->sock_lock); + return -ENOTCONN; + } tcp_sw_conn->sendpage = conn->datadgst_en ? sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage; + mutex_unlock(&tcp_sw_conn->sock_lock); break; case ISCSI_PARAM_MAX_R2T: return iscsi_tcp_set_max_r2t(conn, buf); @@ -733,8 +745,8 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf) { struct iscsi_conn *conn = cls_conn->dd_data; - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn; + struct iscsi_tcp_conn *tcp_conn; struct sockaddr_in6 addr; struct socket *sock; int rc; @@ -744,21 +756,36 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn, case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_PARAM_LOCAL_PORT: spin_lock_bh(&conn->session->frwd_lock); - if (!tcp_sw_conn || !tcp_sw_conn->sock) { + if (!conn->session->leadconn) { spin_unlock_bh(&conn->session->frwd_lock); return -ENOTCONN; } - sock = tcp_sw_conn->sock; - sock_hold(sock->sk); + /* + * The conn has been setup and bound, so just grab a ref + * incase a destroy runs while we are in the net layer. + */ + iscsi_get_conn(conn->cls_conn); spin_unlock_bh(&conn->session->frwd_lock); + tcp_conn = conn->dd_data; + tcp_sw_conn = tcp_conn->dd_data; + + mutex_lock(&tcp_sw_conn->sock_lock); + sock = tcp_sw_conn->sock; + if (!sock) { + rc = -ENOTCONN; + goto sock_unlock; + } + if (param == ISCSI_PARAM_LOCAL_PORT) rc = kernel_getsockname(sock, (struct sockaddr *)&addr); else rc = kernel_getpeername(sock, (struct sockaddr *)&addr); - sock_put(sock->sk); +sock_unlock: + mutex_unlock(&tcp_sw_conn->sock_lock); + iscsi_put_conn(conn->cls_conn); if (rc < 0) return rc; @@ -796,17 +823,21 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, } tcp_conn = conn->dd_data; tcp_sw_conn = tcp_conn->dd_data; - sock = tcp_sw_conn->sock; - if (!sock) { - spin_unlock_bh(&session->frwd_lock); - return -ENOTCONN; - } - sock_hold(sock->sk); + /* + * The conn has been setup and bound, so just grab a ref + * incase a destroy runs while we are in the net layer. + */ + iscsi_get_conn(conn->cls_conn); spin_unlock_bh(&session->frwd_lock); - rc = kernel_getsockname(sock, - (struct sockaddr *)&addr); - sock_put(sock->sk); + mutex_lock(&tcp_sw_conn->sock_lock); + sock = tcp_sw_conn->sock; + if (!sock) + rc = -ENOTCONN; + else + rc = kernel_getsockname(sock, (struct sockaddr *)&addr); + mutex_unlock(&tcp_sw_conn->sock_lock); + iscsi_put_conn(conn->cls_conn); if (rc < 0) return rc; diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 791453195099..1731956326e2 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -28,6 +28,8 @@ struct iscsi_sw_tcp_send { struct iscsi_sw_tcp_conn { struct socket *sock; + /* Taken when accessing the sock from the netlink/sysfs interface */ + struct mutex sock_lock; struct iscsi_sw_tcp_send out; /* old values for socket callbacks */ diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index a50f1eef0e0c..4261380af97b 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1702,6 +1702,7 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) if (cancel_delayed_work_sync(&ep->timeout_work)) { FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n"); fc_exch_release(ep); /* release from pending timer hold */ + return; } spin_lock_bh(&ep->ex_lock); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index d4e66c595eb8..05799b41974d 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1367,23 +1367,32 @@ void iscsi_session_failure(struct iscsi_session *session, } EXPORT_SYMBOL_GPL(iscsi_session_failure); -void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) +static bool iscsi_set_conn_failed(struct iscsi_conn *conn) { struct iscsi_session *session = conn->session; - spin_lock_bh(&session->frwd_lock); - if (session->state == ISCSI_STATE_FAILED) { - spin_unlock_bh(&session->frwd_lock); - return; - } + if (session->state == ISCSI_STATE_FAILED) + return false; if (conn->stop_stage == 0) session->state = ISCSI_STATE_FAILED; - spin_unlock_bh(&session->frwd_lock); set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); - iscsi_conn_error_event(conn->cls_conn, err); + return true; +} + +void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) +{ + struct iscsi_session *session = conn->session; + bool needs_evt; + + spin_lock_bh(&session->frwd_lock); + needs_evt = iscsi_set_conn_failed(conn); + spin_unlock_bh(&session->frwd_lock); + + if (needs_evt) + iscsi_conn_error_event(conn->cls_conn, err); } EXPORT_SYMBOL_GPL(iscsi_conn_failure); @@ -2117,6 +2126,51 @@ done: spin_unlock(&session->frwd_lock); } +/** + * iscsi_conn_unbind - prevent queueing to conn. + * @cls_conn: iscsi conn ep is bound to. + * @is_active: is the conn in use for boot or is this for EH/termination + * + * This must be called by drivers implementing the ep_disconnect callout. + * It disables queueing to the connection from libiscsi in preparation for + * an ep_disconnect call. + */ +void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active) +{ + struct iscsi_session *session; + struct iscsi_conn *conn; + + if (!cls_conn) + return; + + conn = cls_conn->dd_data; + session = conn->session; + /* + * Wait for iscsi_eh calls to exit. We don't wait for the tmf to + * complete or timeout. The caller just wants to know what's running + * is everything that needs to be cleaned up, and no cmds will be + * queued. + */ + mutex_lock(&session->eh_mutex); + + iscsi_suspend_queue(conn); + iscsi_suspend_tx(conn); + + spin_lock_bh(&session->frwd_lock); + if (!is_active) { + /* + * if logout timed out before userspace could even send a PDU + * the state might still be in ISCSI_STATE_LOGGED_IN and + * allowing new cmds and TMFs. + */ + if (session->state == ISCSI_STATE_LOGGED_IN) + iscsi_set_conn_failed(conn); + } + spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&session->eh_mutex); +} +EXPORT_SYMBOL_GPL(iscsi_conn_unbind); + static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, struct iscsi_tm *hdr) { diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 8d6bcc19359f..51485d0251f2 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -85,7 +85,7 @@ static int smp_execute_task_sg(struct domain_device *dev, res = i->dft->lldd_execute_task(task, GFP_KERNEL); if (res) { - del_timer(&task->slow_task->timer); + del_timer_sync(&task->slow_task->timer); pr_notice("executing SMP task failed:%d\n", res); break; } diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index beaf3a8d206f..fbc76d69ea0b 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -2609,8 +2609,8 @@ lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf, struct lpfc_sli4_hdw_queue *qp; struct lpfc_multixri_pool *multixri_pool; - if (nbytes > 64) - nbytes = 64; + if (nbytes > sizeof(mybuf) - 1) + nbytes = sizeof(mybuf) - 1; memset(mybuf, 0, sizeof(mybuf)); @@ -2690,8 +2690,8 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf, if (!phba->targetport) return -ENXIO; - if (nbytes > 64) - nbytes = 64; + if (nbytes > sizeof(mybuf) - 1) + nbytes = sizeof(mybuf) - 1; memset(mybuf, 0, sizeof(mybuf)); @@ -2828,8 +2828,8 @@ lpfc_debugfs_ioktime_write(struct file *file, const char __user *buf, char mybuf[64]; char *pbuf; - if (nbytes > 64) - nbytes = 64; + if (nbytes > sizeof(mybuf) - 1) + nbytes = sizeof(mybuf) - 1; memset(mybuf, 0, sizeof(mybuf)); @@ -2956,8 +2956,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf, char mybuf[64]; char *pbuf; - if (nbytes > 63) - nbytes = 63; + if (nbytes > sizeof(mybuf) - 1) + nbytes = sizeof(mybuf) - 1; memset(mybuf, 0, sizeof(mybuf)); @@ -3062,8 +3062,8 @@ lpfc_debugfs_hdwqstat_write(struct file *file, const char __user *buf, char *pbuf; int i; - if (nbytes > 64) - nbytes = 64; + if (nbytes > sizeof(mybuf) - 1) + nbytes = sizeof(mybuf) - 1; memset(mybuf, 0, sizeof(mybuf)); diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 47e832b7f2c2..bfbc1c4fcab1 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -4281,6 +4281,9 @@ struct wqe_common { #define wqe_sup_SHIFT 6 #define wqe_sup_MASK 0x00000001 #define wqe_sup_WORD word11 +#define wqe_ffrq_SHIFT 6 +#define wqe_ffrq_MASK 0x00000001 +#define wqe_ffrq_WORD word11 #define wqe_wqec_SHIFT 7 #define wqe_wqec_MASK 0x00000001 #define wqe_wqec_WORD word11 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 1149bfc42fe6..17200b453cbb 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -6670,7 +6670,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* Allocate device driver memory */ rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); if (rc) - return -ENOMEM; + goto out_destroy_workqueue; /* IF Type 2 ports get initialized now. */ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= @@ -7076,6 +7076,9 @@ out_free_bsmbx: lpfc_destroy_bootstrap_mbox(phba); out_free_mem: lpfc_mem_free(phba); +out_destroy_workqueue: + destroy_workqueue(phba->wq); + phba->wq = NULL; return rc; } @@ -13614,6 +13617,8 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev) psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); + /* Init cpu_map array */ + lpfc_cpu_map_array_init(phba); /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index e33f752318c1..1e22364a31fc 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -857,7 +857,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_nvmet_invalidate_host(phba, ndlp); if (ndlp->nlp_DID == Fabric_DID) { - if (vport->port_state <= LPFC_FDISC) + if (vport->port_state <= LPFC_FDISC || + vport->fc_flag & FC_PT2PT) goto out; lpfc_linkdown_port(vport); spin_lock_irq(shost->host_lock); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 03c81cec6bc9..ef92e0b4b9cf 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1315,7 +1315,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, { struct lpfc_hba *phba = vport->phba; struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; - struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq); + struct nvme_common_command *sqe; + struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq; union lpfc_wqe128 *wqe = &pwqeq->wqe; uint32_t req_len; @@ -1371,8 +1372,14 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, cstat->control_requests++; } - if (pnode->nlp_nvme_info & NLP_NVME_NSLER) + if (pnode->nlp_nvme_info & NLP_NVME_NSLER) { bf_set(wqe_erp, &wqe->generic.wqe_com, 1); + sqe = &((struct nvme_fc_cmd_iu *) + nCmd->cmdaddr)->sqe.common; + if (sqe->opcode == nvme_admin_async_event) + bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1); + } + /* * Finish initializing those WQE fields that are independent * of the nvme_cmnd request_buffer diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index a50f870c5f72..755d68b98160 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -17445,7 +17445,6 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) case FC_RCTL_ELS_REP: /* extended link services reply */ case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ - case FC_RCTL_BA_NOP: /* basic link service NOP */ case FC_RCTL_BA_ABTS: /* basic link service abort */ case FC_RCTL_BA_RMC: /* remove connection */ case FC_RCTL_BA_ACC: /* basic accept */ @@ -17466,6 +17465,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) fc_vft_hdr = (struct fc_vft_header *)fc_hdr; fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; return lpfc_fc_frame_check(phba, fc_hdr); + case FC_RCTL_BA_NOP: /* basic link service NOP */ default: goto drop; } @@ -18284,12 +18284,14 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, if (!lpfc_complete_unsol_iocb(phba, phba->sli4_hba.els_wq->pring, iocbq, fc_hdr->fh_r_ctl, - fc_hdr->fh_type)) + fc_hdr->fh_type)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2540 Ring %d handler: unexpected Rctl " "x%x Type x%x received\n", LPFC_ELS_RING, fc_hdr->fh_r_ctl, fc_hdr->fh_type); + lpfc_in_buf_free(phba, &seq_dmabuf->dbuf); + } /* Free iocb created in lpfc_prep_seq */ list_for_each_entry_safe(curr_iocb, next_iocb, diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 80f546976c7e..daffa36988ae 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -4634,7 +4634,7 @@ static int __init megaraid_init(void) * major number allocation. */ major = register_chrdev(0, "megadev_legacy", &megadev_fops); - if (!major) { + if (major < 0) { printk(KERN_WARNING "megaraid: failed to register char device\n"); } diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 6b8ec57e8bdf..c088a848776e 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2554,6 +2554,9 @@ struct megasas_instance_template { #define MEGASAS_IS_LOGICAL(sdev) \ ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) +#define MEGASAS_IS_LUN_VALID(sdev) \ + (((sdev)->lun == 0) ? 1 : 0) + #define MEGASAS_DEV_INDEX(scp) \ (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ scp->device->id) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 1a70cc995c28..84a2e9292fd0 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2111,6 +2111,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev) goto scan_target; } return -ENXIO; + } else if (!MEGASAS_IS_LUN_VALID(sdev)) { + sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); + return -ENXIO; } scan_target: @@ -2141,6 +2144,10 @@ static void megasas_slave_destroy(struct scsi_device *sdev) instance = megasas_lookup_instance(sdev->host->host_no); if (MEGASAS_IS_LOGICAL(sdev)) { + if (!MEGASAS_IS_LUN_VALID(sdev)) { + sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); + return; + } ld_tgt_id = MEGASAS_TARGET_ID(sdev); instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED; if (megasas_dbg_lvl & LD_PD_DEBUG) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 13022a42fd6f..7838c7911add 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -5198,7 +5198,6 @@ megasas_alloc_fusion_context(struct megasas_instance *instance) if (!fusion->log_to_span) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); - kfree(instance->ctrl_context); return -ENOMEM; } } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 3153f164554a..c1b76cda60db 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2822,23 +2822,22 @@ static int _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) { struct sysinfo s; - int dma_mask; if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma || - dma_get_required_mask(&pdev->dev) <= 32) - dma_mask = 32; + dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32)) + ioc->dma_mask = 32; /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) - dma_mask = 63; + ioc->dma_mask = 63; else - dma_mask = 64; + ioc->dma_mask = 64; - if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) || - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask))) + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) || + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask))) return -ENODEV; - if (dma_mask > 32) { + if (ioc->dma_mask > 32) { ioc->base_add_sg_single = &_base_add_sg_single_64; ioc->sge_size = sizeof(Mpi2SGESimple64_t); } else { @@ -2848,7 +2847,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) si_meminfo(&s); ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", - dma_mask, convert_to_kb(s.totalram)); + ioc->dma_mask, convert_to_kb(s.totalram)); return 0; } @@ -4902,10 +4901,10 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) dma_pool_free(ioc->pcie_sgl_dma_pool, ioc->pcie_sg_lookup[i].pcie_sgl, ioc->pcie_sg_lookup[i].pcie_sgl_dma); + ioc->pcie_sg_lookup[i].pcie_sgl = NULL; } dma_pool_destroy(ioc->pcie_sgl_dma_pool); } - if (ioc->config_page) { dexitprintk(ioc, ioc_info(ioc, "config_page(0x%p): free\n", @@ -4960,6 +4959,89 @@ mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz) return 0; } +/** + * _base_reduce_hba_queue_depth- Retry with reduced queue depth + * @ioc: Adapter object + * + * Return: 0 for success, non-zero for failure. + **/ +static inline int +_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc) +{ + int reduce_sz = 64; + + if ((ioc->hba_queue_depth - reduce_sz) > + (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { + ioc->hba_queue_depth -= reduce_sz; + return 0; + } else + return -ENOMEM; +} + +/** + * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory + * for pcie sgl pools. + * @ioc: Adapter object + * @sz: DMA Pool size + * @ct: Chain tracker + * Return: 0 for success, non-zero for failure. + */ + +static int +_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + int i = 0, j = 0; + struct chain_tracker *ct; + + ioc->pcie_sgl_dma_pool = + dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, + ioc->page_size, 0); + if (!ioc->pcie_sgl_dma_pool) { + ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n"); + return -ENOMEM; + } + + ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; + ioc->chains_per_prp_buffer = + min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io); + for (i = 0; i < ioc->scsiio_depth; i++) { + ioc->pcie_sg_lookup[i].pcie_sgl = + dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL, + &ioc->pcie_sg_lookup[i].pcie_sgl_dma); + if (!ioc->pcie_sg_lookup[i].pcie_sgl) { + ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n"); + return -EAGAIN; + } + + if (!mpt3sas_check_same_4gb_region( + (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) { + ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n", + ioc->pcie_sg_lookup[i].pcie_sgl, + (unsigned long long) + ioc->pcie_sg_lookup[i].pcie_sgl_dma); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + + for (j = 0; j < ioc->chains_per_prp_buffer; j++) { + ct = &ioc->chain_lookup[i].chains_per_smid[j]; + ct->chain_buffer = + ioc->pcie_sg_lookup[i].pcie_sgl + + (j * ioc->chain_segment_sz); + ct->chain_buffer_dma = + ioc->pcie_sg_lookup[i].pcie_sgl_dma + + (j * ioc->chain_segment_sz); + } + } + dinitprintk(ioc, ioc_info(ioc, + "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n", + ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); + dinitprintk(ioc, ioc_info(ioc, + "Number of chains can fit in a PRP page(%d)\n", + ioc->chains_per_prp_buffer)); + return 0; +} + /** * base_alloc_rdpq_dma_pool - Allocating DMA'able memory * for reply queues. @@ -5058,7 +5140,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) unsigned short sg_tablesize; u16 sge_size; int i, j; - int ret = 0; + int ret = 0, rc = 0; struct chain_tracker *ct; dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); @@ -5357,6 +5439,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) * be required for NVMe PRP's, only each set of NVMe blocks will be * contiguous, so a new set is allocated for each possible I/O. */ + ioc->chains_per_prp_buffer = 0; if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { nvme_blocks_needed = @@ -5371,43 +5454,11 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) goto out; } sz = nvme_blocks_needed * ioc->page_size; - ioc->pcie_sgl_dma_pool = - dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0); - if (!ioc->pcie_sgl_dma_pool) { - ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n"); - goto out; - } - - ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; - ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer, - ioc->chains_needed_per_io); - - for (i = 0; i < ioc->scsiio_depth; i++) { - ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc( - ioc->pcie_sgl_dma_pool, GFP_KERNEL, - &ioc->pcie_sg_lookup[i].pcie_sgl_dma); - if (!ioc->pcie_sg_lookup[i].pcie_sgl) { - ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n"); - goto out; - } - for (j = 0; j < ioc->chains_per_prp_buffer; j++) { - ct = &ioc->chain_lookup[i].chains_per_smid[j]; - ct->chain_buffer = - ioc->pcie_sg_lookup[i].pcie_sgl + - (j * ioc->chain_segment_sz); - ct->chain_buffer_dma = - ioc->pcie_sg_lookup[i].pcie_sgl_dma + - (j * ioc->chain_segment_sz); - } - } - - dinitprintk(ioc, - ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n", - ioc->scsiio_depth, sz, - (sz * ioc->scsiio_depth) / 1024)); - dinitprintk(ioc, - ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n", - ioc->chains_per_prp_buffer)); + rc = _base_allocate_pcie_sgl_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; total_sz += sz * ioc->scsiio_depth; } @@ -5577,6 +5628,19 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc->shost->sg_tablesize); return 0; +try_32bit_dma: + _base_release_memory_pools(ioc); + if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) { + /* Change dma coherent mask to 32 bit and reallocate */ + if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) { + pr_err("Setting 32 bit coherent DMA mask Failed %s\n", + pci_name(ioc->pdev)); + return -ENODEV; + } + } else if (_base_reduce_hba_queue_depth(ioc) != 0) + return -ENOMEM; + goto retry_allocation; + out: return -ENOMEM; } @@ -7239,6 +7303,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->rdpq_array_enable_assigned = 0; ioc->use_32bit_dma = false; + ioc->dma_mask = 64; if (ioc->is_aero_ioc) ioc->base_readl = &_base_readl_aero; else diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index bc8beb10f3fc..823bbe64a477 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -1257,6 +1257,7 @@ struct MPT3SAS_ADAPTER { u16 thresh_hold; u8 high_iops_queues; u32 drv_support_bitmap; + u32 dma_mask; bool enable_sdev_max_qd; bool use_32bit_dma; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 8418b59b3743..c3a5978b0efa 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -3501,6 +3501,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) fw_event = list_first_entry(&ioc->fw_event_list, struct fw_event_work, list); list_del_init(&fw_event->list); + fw_event_work_put(fw_event); } spin_unlock_irqrestore(&ioc->fw_event_lock, flags); @@ -3559,7 +3560,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) if (cancel_work_sync(&fw_event->work)) fw_event_work_put(fw_event); - fw_event_work_put(fw_event); } ioc->fw_events_cleanup = 0; } diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index b03c0f35d7b0..85ca8421fb86 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -646,6 +646,7 @@ static struct pci_device_id mvs_pci_table[] = { { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, + { PCI_VDEVICE(TTI, 0x2640), chip_6440 }, { PCI_VDEVICE(TTI, 0x2710), chip_9480 }, { PCI_VDEVICE(TTI, 0x2720), chip_9480 }, { PCI_VDEVICE(TTI, 0x2721), chip_9480 }, @@ -697,7 +698,7 @@ static ssize_t mvs_show_driver_version(struct device *cdev, struct device_attribute *attr, char *buffer) { - return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION); + return sysfs_emit(buffer, "%s\n", DRV_VERSION); } static DEVICE_ATTR(driver_version, @@ -749,7 +750,7 @@ mvs_store_interrupt_coalescing(struct device *cdev, static ssize_t mvs_show_interrupt_coalescing(struct device *cdev, struct device_attribute *attr, char *buffer) { - return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing); + return sysfs_emit(buffer, "%d\n", interrupt_coalescing); } static DEVICE_ATTR(interrupt_coalescing, diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c index 5fa0f4ed6565..ad17c2beaaca 100644 --- a/drivers/scsi/myrb.c +++ b/drivers/scsi/myrb.c @@ -1241,7 +1241,8 @@ static void myrb_cleanup(struct myrb_hba *cb) myrb_unmap(cb); if (cb->mmio_base) { - cb->disable_intr(cb->io_base); + if (cb->disable_intr) + cb->disable_intr(cb->io_base); iounmap(cb->mmio_base); } if (cb->irq) @@ -3515,9 +3516,13 @@ static struct myrb_hba *myrb_detect(struct pci_dev *pdev, mutex_init(&cb->dcmd_mutex); mutex_init(&cb->dma_mutex); cb->pdev = pdev; + cb->host = shost; - if (pci_enable_device(pdev)) - goto failure; + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + scsi_host_put(shost); + return NULL; + } if (privdata->hw_init == DAC960_PD_hw_init || privdata->hw_init == DAC960_P_hw_init) { diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index cd0e1d31db70..da9fbe62a34d 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -1711,7 +1711,6 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, } task = sas_alloc_slow_task(GFP_ATOMIC); - if (!task) { pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n"); return; @@ -1720,8 +1719,10 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, task->task_done = pm8001_task_done; res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); - if (res) + if (res) { + sas_free_task(task); return; + } ccb = &pm8001_ha->ccb_info[ccb_tag]; ccb->device = pm8001_ha_dev; @@ -1738,8 +1739,10 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, sizeof(task_abort), 0); - if (ret) + if (ret) { + sas_free_task(task); pm8001_tag_free(pm8001_ha, ccb_tag); + } } @@ -3669,12 +3672,11 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) mb(); if (pm8001_dev->id & NCQ_ABORT_ALL_FLAG) { - pm8001_tag_free(pm8001_ha, tag); sas_free_task(t); - /* clear the flag */ - pm8001_dev->id &= 0xBFFFFFFF; - } else + pm8001_dev->id &= ~NCQ_ABORT_ALL_FLAG; + } else { t->task_done(t); + } return 0; } @@ -4428,6 +4430,9 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, SAS_ADDR_SIZE); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + return rc; } @@ -4840,6 +4845,11 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, ccb->ccb_tag = tag; rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info, tag); + if (rc) { + kfree(fw_control_context); + pm8001_tag_free(pm8001_ha, tag); + } + return rc; } @@ -4944,6 +4954,9 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, payload.nds = cpu_to_le32(state); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + return rc; } diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 75ac4d86d9c4..ba5852548bee 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -831,10 +831,10 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, res = PM8001_CHIP_DISP->task_abort(pm8001_ha, pm8001_dev, flag, task_tag, ccb_tag); - if (res) { del_timer(&task->slow_task->timer); pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n"); + pm8001_tag_free(pm8001_ha, ccb_tag); goto ex_err; } wait_for_completion(&task->slow_task->completion); diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index b5e60553acdc..0305c8999ba5 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -66,18 +66,16 @@ int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value) } static void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset, - const void *destination, + __le32 *destination, u32 dw_count, u32 bus_base_number) { u32 index, value, offset; - u32 *destination1; - destination1 = (u32 *)destination; - for (index = 0; index < dw_count; index += 4, destination1++) { + for (index = 0; index < dw_count; index += 4, destination++) { offset = (soffset + index); if (offset < (64 * 1024)) { value = pm8001_cr32(pm8001_ha, bus_base_number, offset); - *destination1 = cpu_to_le32(value); + *destination = cpu_to_le32(value); } } return; @@ -767,6 +765,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01; pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01; + /* Enable higher IQs and OQs, 32 to 63, bit 16 */ + if (pm8001_ha->max_q_num > 32) + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |= + 1 << 16; /* Disable end to end CRC checking */ pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16); @@ -1026,6 +1028,13 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) if (0x0000 != gst_len_mpistate) return -EBUSY; + /* + * As per controller datasheet, after successful MPI + * initialization minimum 500ms delay is required before + * issuing commands. + */ + msleep(500); + return 0; } @@ -1684,10 +1693,11 @@ static void pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX - u32 mask; - mask = (u32)(1 << vec); - - pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF)); + if (vec < 32) + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec); + else + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U, + 1U << (vec - 32)); return; #endif pm80xx_chip_intx_interrupt_enable(pm8001_ha); @@ -1703,12 +1713,15 @@ static void pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX - u32 mask; - if (vec == 0xFF) - mask = 0xFFFFFFFF; + if (vec == 0xFF) { + /* disable all vectors 0-31, 32-63 */ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF); + } else if (vec < 32) + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec); else - mask = (u32)(1 << vec); - pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF)); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, + 1U << (vec - 32)); return; #endif pm80xx_chip_intx_interrupt_disable(pm8001_ha); @@ -4849,8 +4862,13 @@ static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(tag); payload.phyop_phyid = cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF)); - return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, - sizeof(payload), 0); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + + return rc; } static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha) diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index cbe5fab793eb..ce10d680c56c 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -4528,7 +4528,7 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance) return 0; out_unwind: - while (--i > 0) + while (--i >= 0) free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]); pci_free_irq_vectors(pdev); return rc; diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index e64457f53da8..f48ef47546f4 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -1917,6 +1917,27 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled) fc_vport_setlink(vn_port); } + /* Set symbolic node name */ + if (base_qedf->pdev->device == QL45xxx) + snprintf(fc_host_symbolic_name(vn_port->host), 256, + "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION); + + if (base_qedf->pdev->device == QL41xxx) + snprintf(fc_host_symbolic_name(vn_port->host), 256, + "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION); + + /* Set supported speed */ + fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds; + + /* Set speed */ + vn_port->link_speed = n_port->link_speed; + + /* Set port type */ + fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV; + + /* Set maxframe size */ + fc_host_maxframe_size(vn_port->host) = n_port->mfs; + QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n", vn_port); @@ -3671,11 +3692,6 @@ err2: err1: scsi_host_put(lport->host); err0: - if (qedf) { - QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); - - clear_bit(QEDF_PROBING, &qedf->flags); - } return rc; } diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index f51723e2d522..3bcadb3dd40d 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -387,6 +387,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session, struct qedi_ctx *qedi = iscsi_host_priv(shost); struct qedi_endpoint *qedi_ep; struct iscsi_endpoint *ep; + int rc = 0; ep = iscsi_lookup_endpoint(transport_fd); if (!ep) @@ -394,11 +395,16 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session, qedi_ep = ep->dd_data; if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) || - (qedi_ep->state == EP_STATE_TCP_RST_RCVD)) - return -EINVAL; + (qedi_ep->state == EP_STATE_TCP_RST_RCVD)) { + rc = -EINVAL; + goto put_ep; + } + + if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) { + rc = -EINVAL; + goto put_ep; + } - if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) - return -EINVAL; qedi_ep->conn = qedi_conn; qedi_conn->ep = qedi_ep; @@ -408,13 +414,18 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session, qedi_conn->cmd_cleanup_req = 0; qedi_conn->cmd_cleanup_cmpl = 0; - if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) - return -EINVAL; + if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) { + rc = -EINVAL; + goto put_ep; + } + spin_lock_init(&qedi_conn->tmf_work_lock); INIT_LIST_HEAD(&qedi_conn->tmf_work_list); init_waitqueue_head(&qedi_conn->wait_queue); - return 0; +put_ep: + iscsi_put_endpoint(ep); + return rc; } static int qedi_iscsi_update_conn(struct qedi_ctx *qedi, @@ -817,6 +828,37 @@ static int qedi_task_xmit(struct iscsi_task *task) return qedi_iscsi_send_ioreq(task); } +static void qedi_offload_work(struct work_struct *work) +{ + struct qedi_endpoint *qedi_ep = + container_of(work, struct qedi_endpoint, offload_work); + struct qedi_ctx *qedi; + int wait_delay = 5 * HZ; + int ret; + + qedi = qedi_ep->qedi; + + ret = qedi_iscsi_offload_conn(qedi_ep); + if (ret) { + QEDI_ERR(&qedi->dbg_ctx, + "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n", + qedi_ep->iscsi_cid, qedi_ep, ret); + qedi_ep->state = EP_STATE_OFLDCONN_FAILED; + return; + } + + ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait, + (qedi_ep->state == + EP_STATE_OFLDCONN_COMPL), + wait_delay); + if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) { + qedi_ep->state = EP_STATE_OFLDCONN_FAILED; + QEDI_ERR(&qedi->dbg_ctx, + "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n", + qedi_ep->iscsi_cid, qedi_ep); + } +} + static struct iscsi_endpoint * qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking) @@ -865,6 +907,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, } qedi_ep = ep->dd_data; memset(qedi_ep, 0, sizeof(struct qedi_endpoint)); + INIT_WORK(&qedi_ep->offload_work, qedi_offload_work); qedi_ep->state = EP_STATE_IDLE; qedi_ep->iscsi_cid = (u32)-1; qedi_ep->qedi = qedi; @@ -1015,12 +1058,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) qedi_ep = ep->dd_data; qedi = qedi_ep->qedi; + flush_work(&qedi_ep->offload_work); + if (qedi_ep->state == EP_STATE_OFLDCONN_START) goto ep_exit_recover; - if (qedi_ep->state != EP_STATE_OFLDCONN_NONE) - flush_work(&qedi_ep->offload_work); - if (qedi_ep->conn) { qedi_conn = qedi_ep->conn; conn = qedi_conn->cls_conn->dd_data; @@ -1185,37 +1227,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid) return rc; } -static void qedi_offload_work(struct work_struct *work) -{ - struct qedi_endpoint *qedi_ep = - container_of(work, struct qedi_endpoint, offload_work); - struct qedi_ctx *qedi; - int wait_delay = 5 * HZ; - int ret; - - qedi = qedi_ep->qedi; - - ret = qedi_iscsi_offload_conn(qedi_ep); - if (ret) { - QEDI_ERR(&qedi->dbg_ctx, - "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n", - qedi_ep->iscsi_cid, qedi_ep, ret); - qedi_ep->state = EP_STATE_OFLDCONN_FAILED; - return; - } - - ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait, - (qedi_ep->state == - EP_STATE_OFLDCONN_COMPL), - wait_delay); - if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) { - qedi_ep->state = EP_STATE_OFLDCONN_FAILED; - QEDI_ERR(&qedi->dbg_ctx, - "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n", - qedi_ep->iscsi_cid, qedi_ep); - } -} - static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) { struct qedi_ctx *qedi; @@ -1331,7 +1342,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) qedi_ep->dst_addr, qedi_ep->dst_port); } - INIT_WORK(&qedi_ep->offload_work, qedi_offload_work); queue_work(qedi->offload_thread, &qedi_ep->offload_work); ret = 0; @@ -1428,6 +1438,7 @@ struct iscsi_transport qedi_iscsi_transport = { .destroy_session = qedi_session_destroy, .create_conn = qedi_conn_create, .bind_conn = qedi_conn_bind, + .unbind_conn = iscsi_conn_unbind, .start_conn = qedi_conn_start, .stop_conn = iscsi_conn_stop, .destroy_conn = qedi_conn_destroy, diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index d0407f44de78..61b9dc511d90 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -3262,11 +3262,34 @@ struct fc_function_template qla2xxx_transport_vport_functions = { .bsg_timeout = qla24xx_bsg_timeout, }; +static uint +qla2x00_get_host_supported_speeds(scsi_qla_host_t *vha, uint speeds) +{ + uint supported_speeds = FC_PORTSPEED_UNKNOWN; + + if (speeds & FDMI_PORT_SPEED_64GB) + supported_speeds |= FC_PORTSPEED_64GBIT; + if (speeds & FDMI_PORT_SPEED_32GB) + supported_speeds |= FC_PORTSPEED_32GBIT; + if (speeds & FDMI_PORT_SPEED_16GB) + supported_speeds |= FC_PORTSPEED_16GBIT; + if (speeds & FDMI_PORT_SPEED_8GB) + supported_speeds |= FC_PORTSPEED_8GBIT; + if (speeds & FDMI_PORT_SPEED_4GB) + supported_speeds |= FC_PORTSPEED_4GBIT; + if (speeds & FDMI_PORT_SPEED_2GB) + supported_speeds |= FC_PORTSPEED_2GBIT; + if (speeds & FDMI_PORT_SPEED_1GB) + supported_speeds |= FC_PORTSPEED_1GBIT; + + return supported_speeds; +} + void qla2x00_init_host_attr(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; - u32 speeds = FC_PORTSPEED_UNKNOWN; + u32 speeds = 0, fdmi_speed = 0; fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); @@ -3276,7 +3299,8 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha) fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; - speeds = qla25xx_fdmi_port_speed_capability(ha); + fdmi_speed = qla25xx_fdmi_port_speed_capability(ha); + speeds = qla2x00_get_host_supported_speeds(vha, fdmi_speed); fc_host_supported_speeds(vha->host) = speeds; } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 8a8e0920d2b4..6afce455b9d8 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3857,6 +3857,7 @@ struct qla_hw_data { /* SRB cache. */ #define SRB_MIN_REQ 128 mempool_t *srb_mempool; + u8 port_name[WWN_SIZE]; volatile struct { uint32_t mbox_int :1; @@ -4134,8 +4135,8 @@ struct qla_hw_data { #define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001) #define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS) #define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED) -#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \ - IS_QLA27XX(ha) || IS_QLA28XX(ha)) +#define IS_MQUE_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) #define IS_BIDI_CAPABLE(ha) \ (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) /* Bit 21 of fw_attributes decides the MCTP capabilities */ diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 3bc185027342..7e5ee31581d6 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -405,7 +405,8 @@ extern int qla2x00_get_resource_cnts(scsi_qla_host_t *); extern int -qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map); +qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map, + u8 *num_entries); extern int qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *, diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 73015c69b5e8..20bbd69e35e5 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -1594,7 +1594,6 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries, unsigned int callopt) { struct qla_hw_data *ha = vha->hw; - struct init_cb_24xx *icb24 = (void *)ha->init_cb; struct new_utsname *p_sysid = utsname(); struct ct_fdmi_hba_attr *eiter; uint16_t alen; @@ -1756,8 +1755,8 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries, /* MAX CT Payload Length */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH); - eiter->a.max_ct_len = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ? - icb24->frame_payload_size : ha->init_cb->frame_payload_size)); + eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size >> 2); + alen = sizeof(eiter->a.max_ct_len); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); @@ -1849,7 +1848,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, unsigned int callopt) { struct qla_hw_data *ha = vha->hw; - struct init_cb_24xx *icb24 = (void *)ha->init_cb; struct new_utsname *p_sysid = utsname(); char *hostname = p_sysid ? p_sysid->nodename : fc_host_system_hostname(vha->host); @@ -1901,8 +1899,7 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, /* Max frame size. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE); - eiter->a.max_frame_size = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ? - icb24->frame_payload_size : ha->init_cb->frame_payload_size)); + eiter->a.max_frame_size = cpu_to_be32(ha->frame_payload_size); alen = sizeof(eiter->a.max_frame_size); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); @@ -3555,7 +3552,7 @@ login_logout: do_delete) { if (fcport->loop_id != FC_NO_LOOP_ID) { if (fcport->flags & FCF_FCP2_DEVICE) - fcport->logout_on_delete = 0; + continue; ql_dbg(ql_dbg_disc, vha, 0x20f0, "%s %d %8phC post del sess\n", diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 9452848ede3f..422ff67038d1 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1734,7 +1734,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) case RSCN_PORT_ADDR: fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); if (fcport) { - if (fcport->flags & FCF_FCP2_DEVICE) { + if (fcport->flags & FCF_FCP2_DEVICE && + atomic_read(&fcport->state) == FCS_ONLINE) { ql_dbg(ql_dbg_disc, vha, 0x2115, "Delaying session delete for FCP2 portid=%06x %8phC ", fcport->d_id.b24, fcport->port_name); @@ -1746,7 +1747,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) break; case RSCN_AREA_ADDR: list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (fcport->flags & FCF_FCP2_DEVICE) + if (fcport->flags & FCF_FCP2_DEVICE && + atomic_read(&fcport->state) == FCS_ONLINE) continue; if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) { @@ -1757,7 +1759,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) break; case RSCN_DOM_ADDR: list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (fcport->flags & FCF_FCP2_DEVICE) + if (fcport->flags & FCF_FCP2_DEVICE && + atomic_read(&fcport->state) == FCS_ONLINE) continue; if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) { @@ -1769,7 +1772,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) case RSCN_FAB_ADDR: default: list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (fcport->flags & FCF_FCP2_DEVICE) + if (fcport->flags & FCF_FCP2_DEVICE && + atomic_read(&fcport->state) == FCS_ONLINE) continue; fcport->scan_needed = 1; @@ -4328,6 +4332,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha) BIT_6) != 0; ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n", (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); + /* Init_cb will be reused for other command(s). Save a backup copy of port_name */ + memcpy(ha->port_name, ha->init_cb->port_name, WWN_SIZE); } rval = qla2x00_init_firmware(vha, ha->init_cb_size); @@ -5268,6 +5274,22 @@ static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha) return QLA_FUNCTION_FAILED; } +static void +qla_reinitialize_link(scsi_qla_host_t *vha) +{ + int rval; + + atomic_set(&vha->loop_state, LOOP_DOWN); + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + rval = qla2x00_full_login_lip(vha); + if (rval == QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0xd050, "Link reinitialized\n"); + } else { + ql_dbg(ql_dbg_disc, vha, 0xd051, + "Link reinitialization failed (%d)\n", rval); + } +} + /* * qla2x00_configure_local_loop * Updates Fibre Channel Device Database with local loop devices. @@ -5319,6 +5341,19 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) spin_unlock_irqrestore(&vha->work_lock, flags); if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { + u8 loop_map_entries = 0; + int rc; + + rc = qla2x00_get_fcal_position_map(vha, NULL, + &loop_map_entries); + if (rc == QLA_SUCCESS && loop_map_entries > 1) { + /* + * There are devices that are still not logged + * in. Reinitialize to give them a chance. + */ + qla_reinitialize_link(vha); + return QLA_FUNCTION_FAILED; + } set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } @@ -5547,8 +5582,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) if (atomic_read(&fcport->state) == FCS_ONLINE) return; - qla2x00_set_fcport_state(fcport, FCS_ONLINE); - rport_ids.node_name = wwn_to_u64(fcport->node_name); rport_ids.port_name = wwn_to_u64(fcport->port_name); rport_ids.port_id = fcport->d_id.b.domain << 16 | @@ -5649,7 +5682,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) qla2x00_reg_remote_port(vha, fcport); break; case MODE_TARGET: - qla2x00_set_fcport_state(fcport, FCS_ONLINE); if (!vha->vha_tgt.qla_tgt->tgt_stop && !vha->vha_tgt.qla_tgt->tgt_stopped) qlt_fc_port_added(vha, fcport); @@ -5664,6 +5696,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) break; } + qla2x00_set_fcport_state(fcport, FCS_ONLINE); + if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) { if (fcport->id_changed) { fcport->id_changed = 0; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index c5c7d60ab252..7ea73ad845de 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1202,9 +1202,7 @@ skip_rio: if (!vha->vp_idx) { if (ha->flags.fawwpn_enabled && (ha->current_topology == ISP_CFG_F)) { - void *wwpn = ha->init_cb->port_name; - - memcpy(vha->port_name, wwpn, WWN_SIZE); + memcpy(vha->port_name, ha->port_name, WWN_SIZE); fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); ql_dbg(ql_dbg_init + ql_dbg_verbose, @@ -4056,16 +4054,12 @@ msix_register_fail: } /* Enable MSI-X vector for response queue update for queue 0 */ - if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { - if (ha->msixbase && ha->mqiobase && - (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || - ql2xmqsupport)) - ha->mqenable = 1; - } else - if (ha->mqiobase && - (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || - ql2xmqsupport)) - ha->mqenable = 1; + if (IS_MQUE_CAPABLE(ha) && + (ha->msixbase && ha->mqiobase && ha->max_qpairs)) + ha->mqenable = 1; + else + ha->mqenable = 0; + ql_dbg(ql_dbg_multiq, vha, 0xc005, "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index bbb57edc1f66..6ff720d8961d 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -233,6 +233,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ql_dbg(ql_dbg_mbx, vha, 0x1112, "mbox[%d]<-0x%04x\n", cnt, *iptr); wrt_reg_word(optr, *iptr); + } else { + wrt_reg_word(optr, 0); } mboxes >>= 1; @@ -269,6 +271,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) atomic_inc(&ha->num_pend_mbx_stage3); if (!wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ)) { + ql_dbg(ql_dbg_mbx, vha, 0x117a, + "cmd=%x Timeout.\n", command); + spin_lock_irqsave(&ha->hardware_lock, flags); + clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (chip_reset != ha->chip_reset) { spin_lock_irqsave(&ha->hardware_lock, flags); ha->flags.mbox_busy = 0; @@ -279,12 +287,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) rval = QLA_ABORTED; goto premature_exit; } - ql_dbg(ql_dbg_mbx, vha, 0x117a, - "cmd=%x Timeout.\n", command); - spin_lock_irqsave(&ha->hardware_lock, flags); - clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - } else if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { spin_lock_irqsave(&ha->hardware_lock, flags); @@ -3015,7 +3017,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha) * Kernel context. */ int -qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) +qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map, + u8 *num_entries) { int rval; mbx_cmd_t mc; @@ -3055,6 +3058,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) if (pos_map) memcpy(pos_map, pmap, FCAL_MAP_SIZE); + if (num_entries) + *num_entries = pmap[0]; } dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index ba1b1c7549d3..d63ccdf6e988 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -35,11 +35,6 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) (fcport->nvme_flag & NVME_FLAG_REGISTERED)) return 0; - if (atomic_read(&fcport->state) == FCS_ONLINE) - return 0; - - qla2x00_set_fcport_state(fcport, FCS_ONLINE); - fcport->nvme_flag &= ~NVME_FLAG_RESETTING; memset(&req, 0, sizeof(struct nvme_fc_port_info)); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index cf9ae0ab489a..ecb30c2738b8 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -3773,6 +3773,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd) spin_lock_irqsave(&cmd->cmd_lock, flags); if (cmd->aborted) { + if (cmd->sg_mapped) + qlt_unmap_sg(vha, cmd); + spin_unlock_irqrestore(&cmd->cmd_lock, flags); /* * It's normal to see 2 calls in this path: @@ -6814,14 +6817,8 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha) if (ha->flags.msix_enabled) { if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { - if (IS_QLA2071(ha)) { - /* 4 ports Baker: Enable Interrupt Handshake */ - icb->msix_atio = 0; - icb->firmware_options_2 |= cpu_to_le32(BIT_26); - } else { - icb->msix_atio = cpu_to_le16(msix->entry); - icb->firmware_options_2 &= cpu_to_le32(~BIT_26); - } + icb->msix_atio = cpu_to_le16(msix->entry); + icb->firmware_options_2 &= cpu_to_le32(~BIT_26); ql_dbg(ql_dbg_init, vha, 0xf072, "Registering ICB vector 0x%x for atio que.\n", msix->entry); diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 2c23b692e318..8d82d2a83059 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -259,6 +259,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = { .start_conn = qla4xxx_conn_start, .create_conn = qla4xxx_conn_create, .bind_conn = qla4xxx_conn_bind, + .unbind_conn = iscsi_conn_unbind, .stop_conn = iscsi_conn_stop, .destroy_conn = qla4xxx_conn_destroy, .set_param = iscsi_set_param, @@ -3237,6 +3238,7 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, conn = cls_conn->dd_data; qla_conn = conn->dd_data; qla_conn->qla_ep = ep->dd_data; + iscsi_put_endpoint(ep); return 0; } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 6b00de6b6f0e..cc20621bb49d 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1878,6 +1878,13 @@ static int resp_readcap16(struct scsi_cmnd *scp, arr[14] |= 0x40; } + /* + * Since the scsi_debug READ CAPACITY implementation always reports the + * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices. + */ + if (devip->zmodel == BLK_ZONED_HM) + arr[12] |= 1 << 4; + arr[15] = sdebug_lowest_aligned & 0xff; if (have_dif_prot) { @@ -2746,6 +2753,24 @@ static void zbc_open_zone(struct sdebug_dev_info *devip, } } +static inline void zbc_set_zone_full(struct sdebug_dev_info *devip, + struct sdeb_zone_state *zsp) +{ + switch (zsp->z_cond) { + case ZC2_IMPLICIT_OPEN: + devip->nr_imp_open--; + break; + case ZC3_EXPLICIT_OPEN: + devip->nr_exp_open--; + break; + default: + WARN_ONCE(true, "Invalid zone %llu condition %x\n", + zsp->z_start, zsp->z_cond); + break; + } + zsp->z_cond = ZC5_FULL; +} + static void zbc_inc_wp(struct sdebug_dev_info *devip, unsigned long long lba, unsigned int num) { @@ -2758,7 +2783,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip, if (zsp->z_type == ZBC_ZONE_TYPE_SWR) { zsp->z_wp += num; if (zsp->z_wp >= zend) - zsp->z_cond = ZC5_FULL; + zbc_set_zone_full(devip, zsp); return; } @@ -2777,7 +2802,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip, n = num; } if (zsp->z_wp >= zend) - zsp->z_cond = ZC5_FULL; + zbc_set_zone_full(devip, zsp); num -= n; lba += n; @@ -7061,8 +7086,12 @@ static int sdebug_add_host_helper(int per_host_idx) dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts); error = device_register(&sdbg_host->dev); - if (error) + if (error) { + spin_lock(&sdebug_host_list_lock); + list_del(&sdbg_host->host_list); + spin_unlock(&sdebug_host_list_lock); goto clean; + } ++sdebug_num_hosts; return 0; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 42db9c52208e..6cc4d0792e3d 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -815,6 +815,14 @@ store_state_field(struct device *dev, struct device_attribute *attr, } mutex_lock(&sdev->state_mutex); + switch (sdev->sdev_state) { + case SDEV_RUNNING: + case SDEV_OFFLINE: + break; + default: + mutex_unlock(&sdev->state_mutex); + return -EINVAL; + } if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) { ret = 0; } else { diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index a5759d0e388a..ef7cd7520e7c 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -86,16 +86,10 @@ struct iscsi_internal { struct transport_container session_cont; }; -/* Worker to perform connection failure on unresponsive connections - * completely in kernel space. - */ -static void stop_conn_work_fn(struct work_struct *work); -static DECLARE_WORK(stop_conn_work, stop_conn_work_fn); - static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ static struct workqueue_struct *iscsi_eh_timer_workq; -static struct workqueue_struct *iscsi_destroy_workq; +static struct workqueue_struct *iscsi_conn_cleanup_workq; static DEFINE_IDA(iscsi_sess_ida); /* @@ -268,9 +262,20 @@ void iscsi_destroy_endpoint(struct iscsi_endpoint *ep) } EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint); +void iscsi_put_endpoint(struct iscsi_endpoint *ep) +{ + put_device(&ep->dev); +} +EXPORT_SYMBOL_GPL(iscsi_put_endpoint); + +/** + * iscsi_lookup_endpoint - get ep from handle + * @handle: endpoint handle + * + * Caller must do a iscsi_put_endpoint. + */ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle) { - struct iscsi_endpoint *ep; struct device *dev; dev = class_find_device(&iscsi_endpoint_class, NULL, &handle, @@ -278,13 +283,7 @@ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle) if (!dev) return NULL; - ep = iscsi_dev_to_endpoint(dev); - /* - * we can drop this now because the interface will prevent - * removals and lookups from racing. - */ - put_device(dev); - return ep; + return iscsi_dev_to_endpoint(dev); } EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint); @@ -1598,12 +1597,6 @@ static DECLARE_TRANSPORT_CLASS(iscsi_connection_class, static struct sock *nls; static DEFINE_MUTEX(rx_queue_mutex); -/* - * conn_mutex protects the {start,bind,stop,destroy}_conn from racing - * against the kernel stop_connection recovery mechanism - */ -static DEFINE_MUTEX(conn_mutex); - static LIST_HEAD(sesslist); static DEFINE_SPINLOCK(sesslock); static LIST_HEAD(connlist); @@ -2225,6 +2218,155 @@ void iscsi_remove_session(struct iscsi_cls_session *session) } EXPORT_SYMBOL_GPL(iscsi_remove_session); +static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag) +{ + ISCSI_DBG_TRANS_CONN(conn, "Stopping conn.\n"); + + switch (flag) { + case STOP_CONN_RECOVER: + WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); + break; + case STOP_CONN_TERM: + WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); + break; + default: + iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n", + flag); + return; + } + + conn->transport->stop_conn(conn, flag); + ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n"); +} + +static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active) +{ + struct iscsi_cls_session *session = iscsi_conn_to_session(conn); + struct iscsi_endpoint *ep; + + ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n"); + WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); + + if (!conn->ep || !session->transport->ep_disconnect) + return; + + ep = conn->ep; + conn->ep = NULL; + + session->transport->unbind_conn(conn, is_active); + session->transport->ep_disconnect(ep); + ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n"); +} + +static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn, + struct iscsi_endpoint *ep, + bool is_active) +{ + /* Check if this was a conn error and the kernel took ownership */ + spin_lock_irq(&conn->lock); + if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { + spin_unlock_irq(&conn->lock); + iscsi_ep_disconnect(conn, is_active); + } else { + spin_unlock_irq(&conn->lock); + ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n"); + mutex_unlock(&conn->ep_mutex); + + flush_work(&conn->cleanup_work); + /* + * Userspace is now done with the EP so we can release the ref + * iscsi_cleanup_conn_work_fn took. + */ + iscsi_put_endpoint(ep); + mutex_lock(&conn->ep_mutex); + } +} + +static int iscsi_if_stop_conn(struct iscsi_transport *transport, + struct iscsi_uevent *ev) +{ + int flag = ev->u.stop_conn.flag; + struct iscsi_cls_conn *conn; + + conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid); + if (!conn) + return -EINVAL; + + ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n"); + /* + * If this is a termination we have to call stop_conn with that flag + * so the correct states get set. If we haven't run the work yet try to + * avoid the extra run. + */ + if (flag == STOP_CONN_TERM) { + cancel_work_sync(&conn->cleanup_work); + iscsi_stop_conn(conn, flag); + } else { + /* + * For offload, when iscsid is restarted it won't know about + * existing endpoints so it can't do a ep_disconnect. We clean + * it up here for userspace. + */ + mutex_lock(&conn->ep_mutex); + if (conn->ep) + iscsi_if_disconnect_bound_ep(conn, conn->ep, true); + mutex_unlock(&conn->ep_mutex); + + /* + * Figure out if it was the kernel or userspace initiating this. + */ + spin_lock_irq(&conn->lock); + if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { + spin_unlock_irq(&conn->lock); + iscsi_stop_conn(conn, flag); + } else { + spin_unlock_irq(&conn->lock); + ISCSI_DBG_TRANS_CONN(conn, + "flush kernel conn cleanup.\n"); + flush_work(&conn->cleanup_work); + } + /* + * Only clear for recovery to avoid extra cleanup runs during + * termination. + */ + spin_lock_irq(&conn->lock); + clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags); + spin_unlock_irq(&conn->lock); + } + ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n"); + return 0; +} + +static void iscsi_cleanup_conn_work_fn(struct work_struct *work) +{ + struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn, + cleanup_work); + struct iscsi_cls_session *session = iscsi_conn_to_session(conn); + + mutex_lock(&conn->ep_mutex); + /* + * Get a ref to the ep, so we don't release its ID until after + * userspace is done referencing it in iscsi_if_disconnect_bound_ep. + */ + if (conn->ep) + get_device(&conn->ep->dev); + iscsi_ep_disconnect(conn, false); + + if (system_state != SYSTEM_RUNNING) { + /* + * If the user has set up for the session to never timeout + * then hang like they wanted. For all other cases fail right + * away since userspace is not going to relogin. + */ + if (session->recovery_tmo > 0) + session->recovery_tmo = 0; + } + + iscsi_stop_conn(conn, STOP_CONN_RECOVER); + mutex_unlock(&conn->ep_mutex); + ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n"); +} + void iscsi_free_session(struct iscsi_cls_session *session) { ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n"); @@ -2263,11 +2405,12 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) conn->dd_data = &conn[1]; mutex_init(&conn->ep_mutex); + spin_lock_init(&conn->lock); INIT_LIST_HEAD(&conn->conn_list); - INIT_LIST_HEAD(&conn->conn_list_err); + INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn); conn->transport = transport; conn->cid = cid; - conn->state = ISCSI_CONN_DOWN; + WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); /* this is released in the dev's release function */ if (!get_device(&session->dev)) @@ -2321,7 +2464,6 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn) spin_lock_irqsave(&connlock, flags); list_del(&conn->conn_list); - list_del(&conn->conn_list_err); spin_unlock_irqrestore(&connlock, flags); transport_unregister_device(&conn->dev); @@ -2448,77 +2590,6 @@ int iscsi_offload_mesg(struct Scsi_Host *shost, } EXPORT_SYMBOL_GPL(iscsi_offload_mesg); -/* - * This can be called without the rx_queue_mutex, if invoked by the kernel - * stop work. But, in that case, it is guaranteed not to race with - * iscsi_destroy by conn_mutex. - */ -static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag) -{ - /* - * It is important that this path doesn't rely on - * rx_queue_mutex, otherwise, a thread doing allocation on a - * start_session/start_connection could sleep waiting on a - * writeback to a failed iscsi device, that cannot be recovered - * because the lock is held. If we don't hold it here, the - * kernel stop_conn_work_fn has a chance to stop the broken - * session and resolve the allocation. - * - * Still, the user invoked .stop_conn() needs to be serialized - * with stop_conn_work_fn by a private mutex. Not pretty, but - * it works. - */ - mutex_lock(&conn_mutex); - switch (flag) { - case STOP_CONN_RECOVER: - conn->state = ISCSI_CONN_FAILED; - break; - case STOP_CONN_TERM: - conn->state = ISCSI_CONN_DOWN; - break; - default: - iscsi_cls_conn_printk(KERN_ERR, conn, - "invalid stop flag %d\n", flag); - goto unlock; - } - - conn->transport->stop_conn(conn, flag); -unlock: - mutex_unlock(&conn_mutex); -} - -static void stop_conn_work_fn(struct work_struct *work) -{ - struct iscsi_cls_conn *conn, *tmp; - unsigned long flags; - LIST_HEAD(recovery_list); - - spin_lock_irqsave(&connlock, flags); - if (list_empty(&connlist_err)) { - spin_unlock_irqrestore(&connlock, flags); - return; - } - list_splice_init(&connlist_err, &recovery_list); - spin_unlock_irqrestore(&connlock, flags); - - list_for_each_entry_safe(conn, tmp, &recovery_list, conn_list_err) { - uint32_t sid = iscsi_conn_get_sid(conn); - struct iscsi_cls_session *session; - - session = iscsi_session_lookup(sid); - if (session) { - if (system_state != SYSTEM_RUNNING) { - session->recovery_tmo = 0; - iscsi_if_stop_conn(conn, STOP_CONN_TERM); - } else { - iscsi_if_stop_conn(conn, STOP_CONN_RECOVER); - } - } - - list_del_init(&conn->conn_list_err); - } -} - void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) { struct nlmsghdr *nlh; @@ -2527,11 +2598,31 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) struct iscsi_internal *priv; int len = nlmsg_total_size(sizeof(*ev)); unsigned long flags; + int state; - spin_lock_irqsave(&connlock, flags); - list_add(&conn->conn_list_err, &connlist_err); - spin_unlock_irqrestore(&connlock, flags); - queue_work(system_unbound_wq, &stop_conn_work); + spin_lock_irqsave(&conn->lock, flags); + /* + * Userspace will only do a stop call if we are at least bound. And, we + * only need to do the in kernel cleanup if in the UP state so cmds can + * be released to upper layers. If in other states just wait for + * userspace to avoid races that can leave the cleanup_work queued. + */ + state = READ_ONCE(conn->state); + switch (state) { + case ISCSI_CONN_BOUND: + case ISCSI_CONN_UP: + if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, + &conn->flags)) { + queue_work(iscsi_conn_cleanup_workq, + &conn->cleanup_work); + } + break; + default: + ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n", + state); + break; + } + spin_unlock_irqrestore(&conn->lock, flags); priv = iscsi_if_transport_lookup(conn->transport); if (!priv) @@ -2861,26 +2952,17 @@ static int iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct iscsi_cls_conn *conn; - unsigned long flags; conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid); if (!conn) return -EINVAL; - spin_lock_irqsave(&connlock, flags); - if (!list_empty(&conn->conn_list_err)) { - spin_unlock_irqrestore(&connlock, flags); - return -EAGAIN; - } - spin_unlock_irqrestore(&connlock, flags); - + ISCSI_DBG_TRANS_CONN(conn, "Flushing cleanup during destruction\n"); + flush_work(&conn->cleanup_work); ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n"); - mutex_lock(&conn_mutex); if (transport->destroy_conn) transport->destroy_conn(conn); - mutex_unlock(&conn_mutex); - return 0; } @@ -2890,7 +2972,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) char *data = (char*)ev + sizeof(*ev); struct iscsi_cls_conn *conn; struct iscsi_cls_session *session; - int err = 0, value = 0; + int err = 0, value = 0, state; if (ev->u.set_param.len > PAGE_SIZE) return -EINVAL; @@ -2907,8 +2989,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) session->recovery_tmo = value; break; default: - if ((conn->state == ISCSI_CONN_BOUND) || - (conn->state == ISCSI_CONN_UP)) { + state = READ_ONCE(conn->state); + if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) { err = transport->set_param(conn, ev->u.set_param.param, data, ev->u.set_param.len); } else { @@ -2968,15 +3050,22 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport, ep = iscsi_lookup_endpoint(ep_handle); if (!ep) return -EINVAL; + conn = ep->conn; - if (conn) { - mutex_lock(&conn->ep_mutex); - conn->ep = NULL; - mutex_unlock(&conn->ep_mutex); - conn->state = ISCSI_CONN_FAILED; + if (!conn) { + /* + * conn was not even bound yet, so we can't get iscsi conn + * failures yet. + */ + transport->ep_disconnect(ep); + goto put_ep; } - transport->ep_disconnect(ep); + mutex_lock(&conn->ep_mutex); + iscsi_if_disconnect_bound_ep(conn, ep, false); + mutex_unlock(&conn->ep_mutex); +put_ep: + iscsi_put_endpoint(ep); return 0; } @@ -3002,6 +3091,7 @@ iscsi_if_transport_ep(struct iscsi_transport *transport, ev->r.retcode = transport->ep_poll(ep, ev->u.ep_poll.timeout_ms); + iscsi_put_endpoint(ep); break; case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: rc = iscsi_if_ep_disconnect(transport, @@ -3632,18 +3722,123 @@ exit_host_stats: return err; } +static int iscsi_if_transport_conn(struct iscsi_transport *transport, + struct nlmsghdr *nlh) +{ + struct iscsi_uevent *ev = nlmsg_data(nlh); + struct iscsi_cls_session *session; + struct iscsi_cls_conn *conn = NULL; + struct iscsi_endpoint *ep; + uint32_t pdu_len; + int err = 0; + + switch (nlh->nlmsg_type) { + case ISCSI_UEVENT_CREATE_CONN: + return iscsi_if_create_conn(transport, ev); + case ISCSI_UEVENT_DESTROY_CONN: + return iscsi_if_destroy_conn(transport, ev); + case ISCSI_UEVENT_STOP_CONN: + return iscsi_if_stop_conn(transport, ev); + } + + /* + * The following cmds need to be run under the ep_mutex so in kernel + * conn cleanup (ep_disconnect + unbind and conn) is not done while + * these are running. They also must not run if we have just run a conn + * cleanup because they would set the state in a way that might allow + * IO or send IO themselves. + */ + switch (nlh->nlmsg_type) { + case ISCSI_UEVENT_START_CONN: + conn = iscsi_conn_lookup(ev->u.start_conn.sid, + ev->u.start_conn.cid); + break; + case ISCSI_UEVENT_BIND_CONN: + conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid); + break; + case ISCSI_UEVENT_SEND_PDU: + conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid); + break; + } + + if (!conn) + return -EINVAL; + + mutex_lock(&conn->ep_mutex); + spin_lock_irq(&conn->lock); + if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { + spin_unlock_irq(&conn->lock); + mutex_unlock(&conn->ep_mutex); + ev->r.retcode = -ENOTCONN; + return 0; + } + spin_unlock_irq(&conn->lock); + + switch (nlh->nlmsg_type) { + case ISCSI_UEVENT_BIND_CONN: + session = iscsi_session_lookup(ev->u.b_conn.sid); + if (!session) { + err = -EINVAL; + break; + } + + ev->r.retcode = transport->bind_conn(session, conn, + ev->u.b_conn.transport_eph, + ev->u.b_conn.is_leading); + if (!ev->r.retcode) + WRITE_ONCE(conn->state, ISCSI_CONN_BOUND); + + if (ev->r.retcode || !transport->ep_connect) + break; + + ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph); + if (ep) { + ep->conn = conn; + conn->ep = ep; + iscsi_put_endpoint(ep); + } else { + err = -ENOTCONN; + iscsi_cls_conn_printk(KERN_ERR, conn, + "Could not set ep conn binding\n"); + } + break; + case ISCSI_UEVENT_START_CONN: + ev->r.retcode = transport->start_conn(conn); + if (!ev->r.retcode) + WRITE_ONCE(conn->state, ISCSI_CONN_UP); + + break; + case ISCSI_UEVENT_SEND_PDU: + pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev); + + if ((ev->u.send_pdu.hdr_size > pdu_len) || + (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) { + err = -EINVAL; + break; + } + + ev->r.retcode = transport->send_pdu(conn, + (struct iscsi_hdr *)((char *)ev + sizeof(*ev)), + (char *)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, + ev->u.send_pdu.data_size); + break; + default: + err = -ENOSYS; + } + + mutex_unlock(&conn->ep_mutex); + return err; +} static int iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; u32 portid; - u32 pdu_len; struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_transport *transport = NULL; struct iscsi_internal *priv; struct iscsi_cls_session *session; - struct iscsi_cls_conn *conn; struct iscsi_endpoint *ep = NULL; if (!netlink_capable(skb, CAP_SYS_ADMIN)) @@ -3684,6 +3879,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) ev->u.c_bound_session.initial_cmdsn, ev->u.c_bound_session.cmds_max, ev->u.c_bound_session.queue_depth); + iscsi_put_endpoint(ep); break; case ISCSI_UEVENT_DESTROY_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); @@ -3708,7 +3904,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) list_del_init(&session->sess_list); spin_unlock_irqrestore(&sesslock, flags); - queue_work(iscsi_destroy_workq, &session->destroy_work); + queue_work(system_unbound_wq, &session->destroy_work); } break; case ISCSI_UEVENT_UNBIND_SESSION: @@ -3719,89 +3915,16 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) else err = -EINVAL; break; - case ISCSI_UEVENT_CREATE_CONN: - err = iscsi_if_create_conn(transport, ev); - break; - case ISCSI_UEVENT_DESTROY_CONN: - err = iscsi_if_destroy_conn(transport, ev); - break; - case ISCSI_UEVENT_BIND_CONN: - session = iscsi_session_lookup(ev->u.b_conn.sid); - conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid); - - if (conn && conn->ep) - iscsi_if_ep_disconnect(transport, conn->ep->id); - - if (!session || !conn) { - err = -EINVAL; - break; - } - - mutex_lock(&conn_mutex); - ev->r.retcode = transport->bind_conn(session, conn, - ev->u.b_conn.transport_eph, - ev->u.b_conn.is_leading); - if (!ev->r.retcode) - conn->state = ISCSI_CONN_BOUND; - mutex_unlock(&conn_mutex); - - if (ev->r.retcode || !transport->ep_connect) - break; - - ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph); - if (ep) { - ep->conn = conn; - - mutex_lock(&conn->ep_mutex); - conn->ep = ep; - mutex_unlock(&conn->ep_mutex); - } else - iscsi_cls_conn_printk(KERN_ERR, conn, - "Could not set ep conn " - "binding\n"); - break; case ISCSI_UEVENT_SET_PARAM: err = iscsi_set_param(transport, ev); break; - case ISCSI_UEVENT_START_CONN: - conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid); - if (conn) { - mutex_lock(&conn_mutex); - ev->r.retcode = transport->start_conn(conn); - if (!ev->r.retcode) - conn->state = ISCSI_CONN_UP; - mutex_unlock(&conn_mutex); - } - else - err = -EINVAL; - break; + case ISCSI_UEVENT_CREATE_CONN: + case ISCSI_UEVENT_DESTROY_CONN: case ISCSI_UEVENT_STOP_CONN: - conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid); - if (conn) - iscsi_if_stop_conn(conn, ev->u.stop_conn.flag); - else - err = -EINVAL; - break; + case ISCSI_UEVENT_START_CONN: + case ISCSI_UEVENT_BIND_CONN: case ISCSI_UEVENT_SEND_PDU: - pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev); - - if ((ev->u.send_pdu.hdr_size > pdu_len) || - (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) { - err = -EINVAL; - break; - } - - conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid); - if (conn) { - mutex_lock(&conn_mutex); - ev->r.retcode = transport->send_pdu(conn, - (struct iscsi_hdr*)((char*)ev + sizeof(*ev)), - (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, - ev->u.send_pdu.data_size); - mutex_unlock(&conn_mutex); - } - else - err = -EINVAL; + err = iscsi_if_transport_conn(transport, nlh); break; case ISCSI_UEVENT_GET_STATS: err = iscsi_if_get_stats(transport, nlh); @@ -3991,10 +4114,11 @@ static ssize_t show_conn_state(struct device *dev, { struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); const char *state = "unknown"; + int conn_state = READ_ONCE(conn->state); - if (conn->state >= 0 && - conn->state < ARRAY_SIZE(connection_state_names)) - state = connection_state_names[conn->state]; + if (conn_state >= 0 && + conn_state < ARRAY_SIZE(connection_state_names)) + state = connection_state_names[conn_state]; return sysfs_emit(buf, "%s\n", state); } @@ -4649,6 +4773,7 @@ iscsi_register_transport(struct iscsi_transport *tt) int err; BUG_ON(!tt); + WARN_ON(tt->ep_disconnect && !tt->unbind_conn); priv = iscsi_if_transport_lookup(tt); if (priv) @@ -4803,10 +4928,10 @@ static __init int iscsi_transport_init(void) goto release_nls; } - iscsi_destroy_workq = alloc_workqueue("%s", - WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, - 1, "iscsi_destroy"); - if (!iscsi_destroy_workq) { + iscsi_conn_cleanup_workq = alloc_workqueue("%s", + WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0, + "iscsi_conn_cleanup"); + if (!iscsi_conn_cleanup_workq) { err = -ENOMEM; goto destroy_wq; } @@ -4836,7 +4961,7 @@ unregister_transport_class: static void __exit iscsi_transport_exit(void) { - destroy_workqueue(iscsi_destroy_workq); + destroy_workqueue(iscsi_conn_cleanup_workq); destroy_workqueue(iscsi_eh_timer_workq); netlink_kernel_release(nls); bus_unregister(&iscsi_flashnode_bus); diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 4a96fb05731d..c6256fdc24b1 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -716,12 +716,17 @@ int sas_phy_add(struct sas_phy *phy) int error; error = device_add(&phy->dev); - if (!error) { - transport_add_device(&phy->dev); - transport_configure_device(&phy->dev); - } + if (error) + return error; - return error; + error = transport_add_device(&phy->dev); + if (error) { + device_del(&phy->dev); + return error; + } + transport_configure_device(&phy->dev); + + return 0; } EXPORT_SYMBOL(sas_phy_add); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 00a355f56d8f..d18c5606938e 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1074,6 +1074,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) struct bio *bio = rq->bio; u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); + unsigned int nr_bytes = blk_rq_bytes(rq); blk_status_t ret; if (sdkp->device->no_write_same) @@ -1110,7 +1111,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) */ rq->__data_len = sdp->sector_size; ret = scsi_alloc_sgtables(cmd); - rq->__data_len = blk_rq_bytes(rq); + rq->__data_len = nr_bytes; return ret; } @@ -3512,7 +3513,6 @@ static int sd_probe(struct device *dev) out_put: put_disk(gd); out_free: - sd_zbc_release_disk(sdkp); kfree(sdkp); out: scsi_autopm_put_device(sdp); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index bfa8d77322d7..e1c086ac8a60 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -190,7 +190,7 @@ static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); static Sg_fd *sg_add_sfp(Sg_device * sdp); static void sg_remove_sfp(struct kref *); -static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); +static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy); static Sg_request *sg_add_request(Sg_fd * sfp); static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); static Sg_device *sg_get_dev(int dev); @@ -444,6 +444,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) Sg_fd *sfp; Sg_request *srp; int req_pack_id = -1; + bool busy; sg_io_hdr_t *hp; struct sg_header *old_hdr; int retval; @@ -466,20 +467,16 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) if (retval) return retval; - srp = sg_get_rq_mark(sfp, req_pack_id); + srp = sg_get_rq_mark(sfp, req_pack_id, &busy); if (!srp) { /* now wait on packet to arrive */ - if (atomic_read(&sdp->detaching)) - return -ENODEV; if (filp->f_flags & O_NONBLOCK) return -EAGAIN; retval = wait_event_interruptible(sfp->read_wait, - (atomic_read(&sdp->detaching) || - (srp = sg_get_rq_mark(sfp, req_pack_id)))); - if (atomic_read(&sdp->detaching)) - return -ENODEV; - if (retval) - /* -ERESTARTSYS as signal hit process */ - return retval; + ((srp = sg_get_rq_mark(sfp, req_pack_id, &busy)) || + (!busy && atomic_read(&sdp->detaching)))); + if (!srp) + /* signal or detaching */ + return retval ? retval : -ENODEV; } if (srp->header.interface_id != '\0') return sg_new_read(sfp, buf, count, srp); @@ -938,9 +935,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp, if (result < 0) return result; result = wait_event_interruptible(sfp->read_wait, - (srp_done(sfp, srp) || atomic_read(&sdp->detaching))); - if (atomic_read(&sdp->detaching)) - return -ENODEV; + srp_done(sfp, srp)); write_lock_irq(&sfp->rq_list_lock); if (srp->done) { srp->done = 2; @@ -2093,19 +2088,28 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) } static Sg_request * -sg_get_rq_mark(Sg_fd * sfp, int pack_id) +sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy) { Sg_request *resp; unsigned long iflags; + *busy = false; write_lock_irqsave(&sfp->rq_list_lock, iflags); list_for_each_entry(resp, &sfp->rq_list, entry) { - /* look for requests that are ready + not SG_IO owned */ - if ((1 == resp->done) && (!resp->sg_io_owned) && + /* look for requests that are not SG_IO owned */ + if ((!resp->sg_io_owned) && ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { - resp->done = 2; /* guard against other readers */ - write_unlock_irqrestore(&sfp->rq_list_lock, iflags); - return resp; + switch (resp->done) { + case 0: /* request active */ + *busy = true; + break; + case 1: /* request done; response ready to return */ + resp->done = 2; /* guard against other readers */ + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + return resp; + case 2: /* response already being returned */ + break; + } } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); @@ -2159,6 +2163,15 @@ sg_remove_request(Sg_fd * sfp, Sg_request * srp) res = 1; } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + + /* + * If the device is detaching, wakeup any readers in case we just + * removed the last response, which would leave nothing for them to + * return other than -ENODEV. + */ + if (unlikely(atomic_read(&sfp->parentdp->detaching))) + wake_up_interruptible_all(&sfp->read_wait); + return res; } diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index de73ade70c24..fcff35e20a4a 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -4997,10 +4997,10 @@ static int pqi_raid_submit_scsi_cmd_with_io_request( } switch (scmd->sc_data_direction) { - case DMA_TO_DEVICE: + case DMA_FROM_DEVICE: request->data_direction = SOP_READ_FLAG; break; - case DMA_FROM_DEVICE: + case DMA_TO_DEVICE: request->data_direction = SOP_WRITE_FLAG; break; case DMA_NONE: diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index d4f10c0d813c..a3bce11ed4b4 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -668,16 +668,17 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) return 0; case PASSTHRU_CMD: if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { - struct st_drvver ver; + const struct st_drvver ver = { + .major = ST_VER_MAJOR, + .minor = ST_VER_MINOR, + .oem = ST_OEM, + .build = ST_BUILD_VER, + .signature[0] = PASSTHRU_SIGNATURE, + .console_id = host->max_id - 1, + .host_no = hba->host->host_no, + }; size_t cp_len = sizeof(ver); - ver.major = ST_VER_MAJOR; - ver.minor = ST_VER_MINOR; - ver.oem = ST_OEM; - ver.build = ST_BUILD_VER; - ver.signature[0] = PASSTHRU_SIGNATURE; - ver.console_id = host->max_id - 1; - ver.host_no = hba->host->host_no; cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len); cmd->result = sizeof(ver) == cp_len ? DID_OK << 16 | COMMAND_COMPLETE << 8 : diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 0ee0b80006e0..3fa8a0c94bdc 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -356,16 +356,21 @@ enum storvsc_request_type { }; /* - * SRB status codes and masks; a subset of the codes used here. + * SRB status codes and masks. In the 8-bit field, the two high order bits + * are flags, while the remaining 6 bits are an integer status code. The + * definitions here include only the subset of the integer status codes that + * are tested for in this driver. */ - #define SRB_STATUS_AUTOSENSE_VALID 0x80 #define SRB_STATUS_QUEUE_FROZEN 0x40 -#define SRB_STATUS_INVALID_LUN 0x20 -#define SRB_STATUS_SUCCESS 0x01 -#define SRB_STATUS_ABORTED 0x02 -#define SRB_STATUS_ERROR 0x04 -#define SRB_STATUS_DATA_OVERRUN 0x12 + +/* SRB status integer codes */ +#define SRB_STATUS_SUCCESS 0x01 +#define SRB_STATUS_ABORTED 0x02 +#define SRB_STATUS_ERROR 0x04 +#define SRB_STATUS_INVALID_REQUEST 0x06 +#define SRB_STATUS_DATA_OVERRUN 0x12 +#define SRB_STATUS_INVALID_LUN 0x20 #define SRB_STATUS(status) \ (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN)) @@ -995,38 +1000,25 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, void (*process_err_fn)(struct work_struct *work); struct hv_host_device *host_dev = shost_priv(host); - /* - * In some situations, Hyper-V sets multiple bits in the - * srb_status, such as ABORTED and ERROR. So process them - * individually, with the most specific bits first. - */ + switch (SRB_STATUS(vm_srb->srb_status)) { + case SRB_STATUS_ERROR: + case SRB_STATUS_ABORTED: + case SRB_STATUS_INVALID_REQUEST: + if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) { + /* Check for capacity change */ + if ((asc == 0x2a) && (ascq == 0x9)) { + process_err_fn = storvsc_device_scan; + /* Retry the I/O that triggered this. */ + set_host_byte(scmnd, DID_REQUEUE); + goto do_work; + } - if (vm_srb->srb_status & SRB_STATUS_INVALID_LUN) { - set_host_byte(scmnd, DID_NO_CONNECT); - process_err_fn = storvsc_remove_lun; - goto do_work; - } - - if (vm_srb->srb_status & SRB_STATUS_ABORTED) { - if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID && - /* Capacity data has changed */ - (asc == 0x2a) && (ascq == 0x9)) { - process_err_fn = storvsc_device_scan; /* - * Retry the I/O that triggered this. + * Otherwise, let upper layer deal with the + * error when sense message is present */ - set_host_byte(scmnd, DID_REQUEUE); - goto do_work; - } - } - - if (vm_srb->srb_status & SRB_STATUS_ERROR) { - /* - * Let upper layer deal with error when - * sense message is present. - */ - if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) return; + } /* * If there is an error; offline the device since all @@ -1049,6 +1041,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, default: set_host_byte(scmnd, DID_ERROR); } + return; + + case SRB_STATUS_INVALID_LUN: + set_host_byte(scmnd, DID_NO_CONNECT); + process_err_fn = storvsc_remove_lun; + goto do_work; + } return; @@ -1997,7 +1996,7 @@ static int storvsc_probe(struct hv_device *device, */ host_dev->handle_error_wq = alloc_ordered_workqueue("storvsc_error_wq_%d", - WQ_MEM_RECLAIM, + 0, host->host_no); if (!host_dev->handle_error_wq) { ret = -ENOMEM; diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c index eafe0db98d54..122d650d0810 100644 --- a/drivers/scsi/ufs/ti-j721e-ufs.c +++ b/drivers/scsi/ufs/ti-j721e-ufs.c @@ -29,11 +29,9 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev) return PTR_ERR(regbase); pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - pm_runtime_put_noidle(dev); + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) goto disable_pm; - } /* Select MPHY refclk frequency */ clk = devm_clk_get(dev, NULL); diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c index ca0ebb2c9b96..99eec4d06dde 100644 --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c @@ -501,7 +501,7 @@ static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba) struct ufs_mtk_host *host = ufshcd_get_variant(hba); host->reg_va09 = regulator_get(hba->dev, "va09"); - if (!host->reg_va09) + if (IS_ERR(host->reg_va09)) dev_info(hba->dev, "failed to get va09"); else host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL; diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 9fe3b93440f6..707e4a7a1838 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -637,12 +637,7 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) return err; } - err = ufs_qcom_ice_resume(host); - if (err) - return err; - - hba->is_sys_suspended = false; - return 0; + return ufs_qcom_ice_resume(host); } static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable) @@ -683,8 +678,11 @@ static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable) writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio); - /* ensure that ref_clk is enabled/disabled before we return */ - wmb(); + /* + * Make sure the write to ref_clk reaches the destination and + * not stored in a Write Buffer (WB). + */ + readl(host->dev_ref_clk_ctrl_mmio); /* * If we call hibern8 exit after this, we need to make sure that diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index f13564a3e4d1..582ca374595c 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -107,9 +107,20 @@ out: return ret; } +static bool phandle_exists(const struct device_node *np, + const char *phandle_name, int index) +{ + struct device_node *parse_np = of_parse_phandle(np, phandle_name, index); + + if (parse_np) + of_node_put(parse_np); + + return parse_np != NULL; +} + #define MAX_PROP_SIZE 32 static int ufshcd_populate_vreg(struct device *dev, const char *name, - struct ufs_vreg **out_vreg) + struct ufs_vreg **out_vreg) { int ret = 0; char prop_name[MAX_PROP_SIZE]; @@ -122,7 +133,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name, } snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name); - if (!of_parse_phandle(np, prop_name, 0)) { + if (!phandle_exists(np, prop_name, 0)) { dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n", __func__, prop_name); goto out; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 55cf3301de66..1812edbb7d72 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -113,8 +113,13 @@ int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, if (!regs) return -ENOMEM; - for (pos = 0; pos < len; pos += 4) + for (pos = 0; pos < len; pos += 4) { + if (offset == 0 && + pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER && + pos <= REG_UIC_ERROR_CODE_DME) + continue; regs[pos / 4] = ufshcd_readl(hba, offset + pos); + } ufshcd_hex_dump(prefix, regs, len); kfree(regs); @@ -2088,12 +2093,13 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) lrbp->issue_time_stamp = ktime_get(); lrbp->compl_time_stamp = ktime_set(0, 0); - ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false)); trace_android_vh_ufs_send_command(hba, lrbp); ufshcd_add_command_trace(hba, task_tag, "send"); ufshcd_clk_scaling_start_busy(hba); if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) ufshcd_start_monitor(hba, lrbp); + if (hba->vops && hba->vops->setup_xfer_req) + hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd); if (ufshcd_has_utrlcnr(hba)) { set_bit(task_tag, &hba->outstanding_reqs); ufshcd_writel(hba, 1 << task_tag, @@ -4380,7 +4386,7 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba) QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res); if (!flag_res) break; - usleep_range(5000, 10000); + usleep_range(500, 1000); } while (ktime_before(ktime_get(), timeout)); if (err) { @@ -7645,7 +7651,7 @@ static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba) peer_pa_tactivate_us = peer_pa_tactivate * gran_to_us_table[peer_granularity - 1]; - if (pa_tactivate_us > peer_pa_tactivate_us) { + if (pa_tactivate_us >= peer_pa_tactivate_us) { u32 new_peer_pa_tactivate; new_peer_pa_tactivate = pa_tactivate_us / diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 53678a8b80e5..6b80ac49908c 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -1304,18 +1304,6 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, return -ENOTSUPP; } -static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag, - bool is_scsi_cmd) -{ - if (hba->vops && hba->vops->setup_xfer_req) { - unsigned long flags; - - spin_lock_irqsave(hba->host->host_lock, flags); - hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd); - spin_unlock_irqrestore(hba->host->host_lock, flags); - } -} - static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba, int tag, u8 tm_function) { diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 7c63d262283f..9ac6f5d2d54b 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -134,11 +134,7 @@ static inline u32 ufshci_version(u32 major, u32 minor) #define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK) -#define UFSHCD_ERROR_MASK (UIC_ERROR |\ - DEVICE_FATAL_ERROR |\ - CONTROLLER_FATAL_ERROR |\ - SYSTEM_BUS_FATAL_ERROR |\ - CRYPTO_ENGINE_FATAL_ERROR) +#define UFSHCD_ERROR_MASK (UIC_ERROR | INT_FATAL_ERRORS) #define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\ CONTROLLER_FATAL_ERROR |\ diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index 75966d3f326e..d87c12324c03 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h @@ -333,8 +333,8 @@ struct PVSCSIRingReqDesc { u8 tag; u8 bus; u8 target; - u8 vcpuHint; - u8 unused[59]; + u16 vcpuHint; + u8 unused[58]; } __packed; /* diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c index 27b9e2baab1a..7acf9193a9e8 100644 --- a/drivers/scsi/zorro7xx.c +++ b/drivers/scsi/zorro7xx.c @@ -159,6 +159,8 @@ static void zorro7xx_remove_one(struct zorro_dev *z) scsi_remove_host(host); NCR_700_release(host); + if (host->base > 0x01000000) + iounmap(hostdata->base); kfree(hostdata); free_irq(host->irq, host); zorro_release_device(z); diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c index f8c08fb9891d..e0ffef6e9386 100644 --- a/drivers/siox/siox-core.c +++ b/drivers/siox/siox-core.c @@ -835,6 +835,8 @@ static struct siox_device *siox_device_add(struct siox_master *smaster, err_device_register: /* don't care to make the buffer smaller again */ + put_device(&sdevice->dev); + sdevice = NULL; err_buf_alloc: siox_master_unlock(smaster); diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c index f04b961b96cd..ec58091fc948 100644 --- a/drivers/slimbus/qcom-ctrl.c +++ b/drivers/slimbus/qcom-ctrl.c @@ -510,9 +510,9 @@ static int qcom_slim_probe(struct platform_device *pdev) } ctrl->irq = platform_get_irq(pdev, 0); - if (!ctrl->irq) { + if (ctrl->irq < 0) { dev_err(&pdev->dev, "no slimbus IRQ\n"); - return -ENODEV; + return ctrl->irq; } sctrl = &ctrl->ctrl; diff --git a/drivers/slimbus/stream.c b/drivers/slimbus/stream.c index 75f87b3d8b95..73a2aa362957 100644 --- a/drivers/slimbus/stream.c +++ b/drivers/slimbus/stream.c @@ -67,10 +67,10 @@ static const int slim_presence_rate_table[] = { 384000, 768000, 0, /* Reserved */ - 110250, - 220500, - 441000, - 882000, + 11025, + 22050, + 44100, + 88200, 176400, 352800, 705600, diff --git a/drivers/soc/amlogic/meson-mx-socinfo.c b/drivers/soc/amlogic/meson-mx-socinfo.c index 78f0f1aeca57..92125dd65f33 100644 --- a/drivers/soc/amlogic/meson-mx-socinfo.c +++ b/drivers/soc/amlogic/meson-mx-socinfo.c @@ -126,6 +126,7 @@ static int __init meson_mx_socinfo_init(void) np = of_find_matching_node(NULL, meson_mx_socinfo_analog_top_ids); if (np) { analog_top_regmap = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(analog_top_regmap)) return PTR_ERR(analog_top_regmap); diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c index 59bd195fa9c9..2eeea5e1b3b7 100644 --- a/drivers/soc/amlogic/meson-secure-pwrc.c +++ b/drivers/soc/amlogic/meson-secure-pwrc.c @@ -139,8 +139,10 @@ static int meson_secure_pwrc_probe(struct platform_device *pdev) } pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL); - if (!pwrc) + if (!pwrc) { + of_node_put(sm_np); return -ENOMEM; + } pwrc->fw = meson_sm_get(sm_np); of_node_put(sm_np); diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c index b1062334e608..722fd54e537c 100644 --- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c +++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c @@ -681,13 +681,14 @@ static int brcmstb_pm_probe(struct platform_device *pdev) const struct of_device_id *of_id = NULL; struct device_node *dn; void __iomem *base; - int ret, i; + int ret, i, s; /* AON ctrl registers */ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL); if (IS_ERR(base)) { pr_err("error mapping AON_CTRL\n"); - return PTR_ERR(base); + ret = PTR_ERR(base); + goto aon_err; } ctrl.aon_ctrl_base = base; @@ -697,8 +698,10 @@ static int brcmstb_pm_probe(struct platform_device *pdev) /* Assume standard offset */ ctrl.aon_sram = ctrl.aon_ctrl_base + AON_CTRL_SYSTEM_DATA_RAM_OFS; + s = 0; } else { ctrl.aon_sram = base; + s = 1; } writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC); @@ -708,7 +711,8 @@ static int brcmstb_pm_probe(struct platform_device *pdev) (const void **)&ddr_phy_data); if (IS_ERR(base)) { pr_err("error mapping DDR PHY\n"); - return PTR_ERR(base); + ret = PTR_ERR(base); + goto ddr_phy_err; } ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot; ctrl.pll_status_offset = ddr_phy_data->pll_status_offset; @@ -728,17 +732,20 @@ static int brcmstb_pm_probe(struct platform_device *pdev) for_each_matching_node(dn, ddr_shimphy_dt_ids) { i = ctrl.num_memc; if (i >= MAX_NUM_MEMC) { + of_node_put(dn); pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC); break; } base = of_io_request_and_map(dn, 0, dn->full_name); if (IS_ERR(base)) { + of_node_put(dn); if (!ctrl.support_warm_boot) break; pr_err("error mapping DDR SHIMPHY %d\n", i); - return PTR_ERR(base); + ret = PTR_ERR(base); + goto ddr_shimphy_err; } ctrl.memcs[i].ddr_shimphy_base = base; ctrl.num_memc++; @@ -749,14 +756,18 @@ static int brcmstb_pm_probe(struct platform_device *pdev) for_each_matching_node(dn, brcmstb_memc_of_match) { base = of_iomap(dn, 0); if (!base) { + of_node_put(dn); pr_err("error mapping DDR Sequencer %d\n", i); - return -ENOMEM; + ret = -ENOMEM; + goto brcmstb_memc_err; } of_id = of_match_node(brcmstb_memc_of_match, dn); if (!of_id) { iounmap(base); - return -EINVAL; + of_node_put(dn); + ret = -EINVAL; + goto brcmstb_memc_err; } ddr_seq_data = of_id->data; @@ -776,20 +787,24 @@ static int brcmstb_pm_probe(struct platform_device *pdev) dn = of_find_matching_node(NULL, sram_dt_ids); if (!dn) { pr_err("SRAM not found\n"); - return -EINVAL; + ret = -EINVAL; + goto brcmstb_memc_err; } ret = brcmstb_init_sram(dn); + of_node_put(dn); if (ret) { pr_err("error setting up SRAM for PM\n"); - return ret; + goto brcmstb_memc_err; } ctrl.pdev = pdev; ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL); - if (!ctrl.s3_params) - return -ENOMEM; + if (!ctrl.s3_params) { + ret = -ENOMEM; + goto s3_params_err; + } ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params, sizeof(*ctrl.s3_params), DMA_TO_DEVICE); @@ -809,7 +824,21 @@ static int brcmstb_pm_probe(struct platform_device *pdev) out: kfree(ctrl.s3_params); +s3_params_err: + iounmap(ctrl.boot_sram); +brcmstb_memc_err: + for (i--; i >= 0; i--) + iounmap(ctrl.memcs[i].ddr_ctrl); +ddr_shimphy_err: + for (i = 0; i < ctrl.num_memc; i++) + iounmap(ctrl.memcs[i].ddr_shimphy_base); + iounmap(ctrl.memcs[0].ddr_phy_base); +ddr_phy_err: + iounmap(ctrl.aon_ctrl_base); + if (s) + iounmap(ctrl.aon_sram); +aon_err: pr_warn("PM: initialization failed with code %d\n", ret); return ret; diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig index 4df32bc4c7a6..c5d46152d468 100644 --- a/drivers/soc/fsl/Kconfig +++ b/drivers/soc/fsl/Kconfig @@ -24,6 +24,7 @@ config FSL_MC_DPIO tristate "QorIQ DPAA2 DPIO driver" depends on FSL_MC_BUS select SOC_BUS + select FSL_GUTS help Driver for the DPAA2 DPIO object. A DPIO provides queue and buffer management facilities for software to interact with diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c index 091e94c04f30..6b0c433954bf 100644 --- a/drivers/soc/fsl/guts.c +++ b/drivers/soc/fsl/guts.c @@ -141,7 +141,7 @@ static int fsl_guts_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct resource *res; const struct fsl_soc_die_attr *soc_die; - const char *machine; + const char *machine = NULL; u32 svr; /* Initialize guts */ diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c index 6065aaab6740..8482a4892b83 100644 --- a/drivers/soc/ixp4xx/ixp4xx-npe.c +++ b/drivers/soc/ixp4xx/ixp4xx-npe.c @@ -735,7 +735,7 @@ static const struct of_device_id ixp4xx_npe_of_match[] = { static struct platform_driver ixp4xx_npe_driver = { .driver = { .name = "ixp4xx-npe", - .of_match_table = of_match_ptr(ixp4xx_npe_of_match), + .of_match_table = ixp4xx_npe_of_match, }, .probe = ixp4xx_npe_probe, .remove = ixp4xx_npe_remove, diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 79b568f82a1c..499718e131d7 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -129,6 +129,7 @@ config QCOM_RPMHPD config QCOM_RPMPD tristate "Qualcomm RPM Power domain driver" + depends on PM depends on QCOM_SMD_RPM help QCOM RPM Power domain driver to support power-domains with diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index 70fbe70c6213..2e06f48d683d 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -496,6 +496,7 @@ static const struct of_device_id qcom_llcc_of_match[] = { { .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg }, { } }; +MODULE_DEVICE_TABLE(of, qcom_llcc_of_match); static struct platform_driver qcom_llcc_driver = { .driver = { diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c index 85f82e195ef8..1dfdd0b9ba24 100644 --- a/drivers/soc/qcom/ocmem.c +++ b/drivers/soc/qcom/ocmem.c @@ -194,14 +194,17 @@ struct ocmem *of_get_ocmem(struct device *dev) devnode = of_parse_phandle(dev->of_node, "sram", 0); if (!devnode || !devnode->parent) { dev_err(dev, "Cannot look up sram phandle\n"); + of_node_put(devnode); return ERR_PTR(-ENODEV); } pdev = of_find_device_by_node(devnode->parent); if (!pdev) { dev_err(dev, "Cannot find device node %s\n", devnode->name); + of_node_put(devnode); return ERR_PTR(-EPROBE_DEFER); } + of_node_put(devnode); ocmem = platform_get_drvdata(pdev); if (!ocmem) { diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c index 941499b11758..401a0be3675a 100644 --- a/drivers/soc/qcom/qcom_aoss.c +++ b/drivers/soc/qcom/qcom_aoss.c @@ -493,8 +493,10 @@ static int qmp_cooling_devices_register(struct qmp *qmp) continue; ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++], child); - if (ret) + if (ret) { + of_node_put(child); goto unroll; + } } if (!count) diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c index d2b558438deb..41e929407196 100644 --- a/drivers/soc/qcom/smem_state.c +++ b/drivers/soc/qcom/smem_state.c @@ -136,6 +136,7 @@ static void qcom_smem_state_release(struct kref *ref) struct qcom_smem_state *state = container_of(ref, struct qcom_smem_state, refcount); list_del(&state->list); + of_node_put(state->of_node); kfree(state); } @@ -169,7 +170,7 @@ struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, kref_init(&state->refcount); - state->of_node = of_node; + state->of_node = of_node_get(of_node); state->ops = *ops; state->priv = priv; diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index a9709aae54ab..fb76c8bc3c64 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -420,6 +420,7 @@ static int smp2p_parse_ipc(struct qcom_smp2p *smp2p) } smp2p->ipc_regmap = syscon_node_to_regmap(syscon); + of_node_put(syscon); if (IS_ERR(smp2p->ipc_regmap)) return PTR_ERR(smp2p->ipc_regmap); diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c index c428d0f78816..acba67dfbc85 100644 --- a/drivers/soc/qcom/smsm.c +++ b/drivers/soc/qcom/smsm.c @@ -359,6 +359,7 @@ static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id) return 0; host->ipc_regmap = syscon_node_to_regmap(syscon); + of_node_put(syscon); if (IS_ERR(host->ipc_regmap)) return PTR_ERR(host->ipc_regmap); @@ -510,7 +511,7 @@ static int qcom_smsm_probe(struct platform_device *pdev) for (id = 0; id < smsm->num_hosts; id++) { ret = smsm_parse_ipc(smsm, id); if (ret < 0) - return ret; + goto out_put; } /* Acquire the main SMSM state vector */ @@ -518,13 +519,14 @@ static int qcom_smsm_probe(struct platform_device *pdev) smsm->num_entries * sizeof(u32)); if (ret < 0 && ret != -EEXIST) { dev_err(&pdev->dev, "unable to allocate shared state entry\n"); - return ret; + goto out_put; } states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL); if (IS_ERR(states)) { dev_err(&pdev->dev, "Unable to acquire shared state entry\n"); - return PTR_ERR(states); + ret = PTR_ERR(states); + goto out_put; } /* Acquire the list of interrupt mask vectors */ @@ -532,13 +534,14 @@ static int qcom_smsm_probe(struct platform_device *pdev) ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size); if (ret < 0 && ret != -EEXIST) { dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n"); - return ret; + goto out_put; } intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL); if (IS_ERR(intr_mask)) { dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n"); - return PTR_ERR(intr_mask); + ret = PTR_ERR(intr_mask); + goto out_put; } /* Setup the reference to the local state bits */ @@ -549,7 +552,8 @@ static int qcom_smsm_probe(struct platform_device *pdev) smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm); if (IS_ERR(smsm->state)) { dev_err(smsm->dev, "failed to register qcom_smem_state\n"); - return PTR_ERR(smsm->state); + ret = PTR_ERR(smsm->state); + goto out_put; } /* Register handlers for remote processor entries of interest. */ @@ -579,16 +583,19 @@ static int qcom_smsm_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, smsm); + of_node_put(local_node); return 0; unwind_interfaces: + of_node_put(node); for (id = 0; id < smsm->num_entries; id++) if (smsm->entries[id].domain) irq_domain_remove(smsm->entries[id].domain); qcom_smem_state_unregister(smsm->state); - +out_put: + of_node_put(local_node); return ret; } diff --git a/drivers/soc/renesas/r8a779a0-sysc.c b/drivers/soc/renesas/r8a779a0-sysc.c index d464ffa1be33..d0a5434715b8 100644 --- a/drivers/soc/renesas/r8a779a0-sysc.c +++ b/drivers/soc/renesas/r8a779a0-sysc.c @@ -83,11 +83,11 @@ static struct r8a779a0_sysc_area r8a779a0_areas[] __initdata = { { "a2cv6", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR }, { "a2cn2", R8A779A0_PD_A2CN2, R8A779A0_PD_A3IR }, { "a2imp23", R8A779A0_PD_A2IMP23, R8A779A0_PD_A3IR }, - { "a2dp1", R8A779A0_PD_A2DP0, R8A779A0_PD_A3IR }, - { "a2cv2", R8A779A0_PD_A2CV0, R8A779A0_PD_A3IR }, - { "a2cv3", R8A779A0_PD_A2CV1, R8A779A0_PD_A3IR }, - { "a2cv5", R8A779A0_PD_A2CV4, R8A779A0_PD_A3IR }, - { "a2cv7", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR }, + { "a2dp1", R8A779A0_PD_A2DP1, R8A779A0_PD_A3IR }, + { "a2cv2", R8A779A0_PD_A2CV2, R8A779A0_PD_A3IR }, + { "a2cv3", R8A779A0_PD_A2CV3, R8A779A0_PD_A3IR }, + { "a2cv5", R8A779A0_PD_A2CV5, R8A779A0_PD_A3IR }, + { "a2cv7", R8A779A0_PD_A2CV7, R8A779A0_PD_A3IR }, { "a2cn1", R8A779A0_PD_A2CN1, R8A779A0_PD_A3IR }, { "a1cnn0", R8A779A0_PD_A1CNN0, R8A779A0_PD_A2CN0 }, { "a1cnn2", R8A779A0_PD_A1CNN2, R8A779A0_PD_A2CN2 }, diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c index 7b1f24d279b4..890158d0da7f 100644 --- a/drivers/soc/rockchip/grf.c +++ b/drivers/soc/rockchip/grf.c @@ -322,12 +322,14 @@ static int __init rockchip_grf_init(void) return 0; if (!match || !match->data) { pr_err("%s: missing grf data\n", __func__); + of_node_put(np); return -EINVAL; } grf_info = match->data; grf = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(grf)) { pr_err("%s: could not get grf syscon\n", __func__); return PTR_ERR(grf); diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c index d4c7bd59429e..443e38e9f30a 100644 --- a/drivers/soc/sunxi/sunxi_sram.c +++ b/drivers/soc/sunxi/sunxi_sram.c @@ -78,8 +78,8 @@ static struct sunxi_sram_desc sun4i_a10_sram_d = { static struct sunxi_sram_desc sun50i_a64_sram_c = { .data = SUNXI_SRAM_DATA("C", 0x4, 24, 1, - SUNXI_SRAM_MAP(0, 1, "cpu"), - SUNXI_SRAM_MAP(1, 0, "de2")), + SUNXI_SRAM_MAP(1, 0, "cpu"), + SUNXI_SRAM_MAP(0, 1, "de2")), }; static const struct of_device_id sunxi_sram_dt_ids[] = { @@ -254,6 +254,7 @@ int sunxi_sram_claim(struct device *dev) writel(val | ((device << sram_data->offset) & mask), base + sram_data->reg); + sram_desc->claimed = true; spin_unlock(&sram_lock); return 0; @@ -318,12 +319,11 @@ static struct regmap_config sunxi_sram_emac_clock_regmap = { .writeable_reg = sunxi_sram_regmap_accessible_reg, }; -static int sunxi_sram_probe(struct platform_device *pdev) +static int __init sunxi_sram_probe(struct platform_device *pdev) { - struct resource *res; - struct dentry *d; struct regmap *emac_clock; const struct sunxi_sramc_variant *variant; + struct device *dev = &pdev->dev; sram_dev = &pdev->dev; @@ -331,18 +331,10 @@ static int sunxi_sram_probe(struct platform_device *pdev) if (!variant) return -EINVAL; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); - of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); - - d = debugfs_create_file("sram", S_IRUGO, NULL, NULL, - &sunxi_sram_fops); - if (!d) - return -ENOMEM; - if (variant->has_emac_clock) { emac_clock = devm_regmap_init_mmio(&pdev->dev, base, &sunxi_sram_emac_clock_regmap); @@ -351,6 +343,10 @@ static int sunxi_sram_probe(struct platform_device *pdev) return PTR_ERR(emac_clock); } + of_platform_populate(dev->of_node, NULL, NULL, dev); + + debugfs_create_file("sram", 0444, NULL, NULL, &sunxi_sram_fops); + return 0; } @@ -396,9 +392,8 @@ static struct platform_driver sunxi_sram_driver = { .name = "sunxi-sram", .of_match_table = sunxi_sram_dt_match, }, - .probe = sunxi_sram_probe, }; -module_platform_driver(sunxi_sram_driver); +builtin_platform_driver_probe(sunxi_sram_driver, sunxi_sram_probe); MODULE_AUTHOR("Maxime Ripard "); MODULE_DESCRIPTION("Allwinner sunXi SRAM Controller Driver"); diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig index 976dee036470..676807c5a215 100644 --- a/drivers/soc/tegra/Kconfig +++ b/drivers/soc/tegra/Kconfig @@ -136,7 +136,6 @@ config SOC_TEGRA_FUSE def_bool y depends on ARCH_TEGRA select SOC_BUS - select TEGRA20_APB_DMA if ARCH_TEGRA_2x_SOC config SOC_TEGRA_FLOWCTRL bool diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c index 8afb3f45d263..a33ec7eaf23d 100644 --- a/drivers/soc/ti/ti_sci_pm_domains.c +++ b/drivers/soc/ti/ti_sci_pm_domains.c @@ -183,6 +183,8 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev) devm_kcalloc(dev, max_id + 1, sizeof(*pd_provider->data.domains), GFP_KERNEL); + if (!pd_provider->data.domains) + return -ENOMEM; pd_provider->data.num_domains = max_id + 1; pd_provider->data.xlate = ti_sci_pd_xlate; diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c index 575b9bad99d5..2e8986cccdd4 100644 --- a/drivers/soundwire/bus_type.c +++ b/drivers/soundwire/bus_type.c @@ -184,12 +184,8 @@ int __sdw_register_driver(struct sdw_driver *drv, struct module *owner) drv->driver.owner = owner; drv->driver.probe = sdw_drv_probe; - - if (drv->remove) - drv->driver.remove = sdw_drv_remove; - - if (drv->shutdown) - drv->driver.shutdown = sdw_drv_shutdown; + drv->driver.remove = sdw_drv_remove; + drv->driver.shutdown = sdw_drv_shutdown; return driver_register(&drv->driver); } diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index c6d421a4b91b..a3247692ddc0 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -501,9 +501,12 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns, return SDW_CMD_IGNORED; } - /* fill response */ - for (i = 0; i < count; i++) - msg->buf[i + offset] = FIELD_GET(CDNS_MCP_RESP_RDATA, cdns->response_buf[i]); + if (msg->flags == SDW_MSG_FLAG_READ) { + /* fill response */ + for (i = 0; i < count; i++) + msg->buf[i + offset] = FIELD_GET(CDNS_MCP_RESP_RDATA, + cdns->response_buf[i]); + } return SDW_CMD_OK; } diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index 824d9f900aca..942d2fe13218 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -1470,7 +1470,6 @@ int intel_master_startup(struct platform_device *pdev) ret = intel_register_dai(sdw); if (ret) { dev_err(dev, "DAI registration failed: %d\n", ret); - snd_soc_unregister_component(dev); goto err_interrupt; } diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 1e63fd4821f9..8aa89d93db11 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -277,6 +277,9 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op) static bool atmel_qspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { + if (!spi_mem_default_supports_op(mem, op)) + return false; + if (atmel_qspi_find_mode(op) < 0) return false; diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c index 7f629544060d..a027cfd49df8 100644 --- a/drivers/spi/spi-amd.c +++ b/drivers/spi/spi-amd.c @@ -28,6 +28,7 @@ #define AMD_SPI_RX_COUNT_REG 0x4B #define AMD_SPI_STATUS_REG 0x4C +#define AMD_SPI_FIFO_SIZE 70 #define AMD_SPI_MEM_SIZE 200 /* M_CMD OP codes for SPI */ @@ -245,6 +246,11 @@ static int amd_spi_master_transfer(struct spi_master *master, return 0; } +static size_t amd_spi_max_transfer_size(struct spi_device *spi) +{ + return AMD_SPI_FIFO_SIZE; +} + static int amd_spi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -278,6 +284,8 @@ static int amd_spi_probe(struct platform_device *pdev) master->flags = SPI_MASTER_HALF_DUPLEX; master->setup = amd_spi_master_setup; master->transfer_one_message = amd_spi_master_transfer; + master->max_transfer_size = amd_spi_max_transfer_size; + master->max_message_size = amd_spi_max_transfer_size; /* Register the controller with SPI framework */ err = devm_spi_register_master(dev, master); diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 4a80f043b7b1..766b00350e39 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -1032,7 +1032,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem, addr = op->addr.val; len = op->data.nbytes; - if (bcm_qspi_bspi_ver_three(qspi) == true) { + if (has_bspi(qspi) && bcm_qspi_bspi_ver_three(qspi) == true) { /* * The address coming into this function is a raw flash offset. * But for BSPI <= V3, we need to convert it to a remapped BSPI @@ -1051,7 +1051,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem, len < 4) mspi_read = true; - if (mspi_read) + if (!has_bspi(qspi) || mspi_read) return bcm_qspi_mspi_exec_mem_op(spi, op); ret = bcm_qspi_bspi_set_mode(qspi, op, 0); diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 33c32e931767..bb9d8386ba3b 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -1174,10 +1174,14 @@ static void bcm2835_spi_handle_err(struct spi_controller *ctlr, struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); /* if an error occurred and we have an active dma, then terminate */ - dmaengine_terminate_sync(ctlr->dma_tx); - bs->tx_dma_active = false; - dmaengine_terminate_sync(ctlr->dma_rx); - bs->rx_dma_active = false; + if (ctlr->dma_tx) { + dmaengine_terminate_sync(ctlr->dma_tx); + bs->tx_dma_active = false; + } + if (ctlr->dma_rx) { + dmaengine_terminate_sync(ctlr->dma_rx); + bs->rx_dma_active = false; + } bcm2835_spi_undo_prologue(bs); /* and reset */ diff --git a/drivers/spi/spi-dw-bt1.c b/drivers/spi/spi-dw-bt1.c index bc9d5eab3c58..8f6a1af14456 100644 --- a/drivers/spi/spi-dw-bt1.c +++ b/drivers/spi/spi-dw-bt1.c @@ -293,8 +293,10 @@ static int dw_spi_bt1_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); ret = dw_spi_add_host(&pdev->dev, dws); - if (ret) + if (ret) { + pm_runtime_disable(&pdev->dev); goto err_disable_clk; + } platform_set_drvdata(pdev, dwsbt1); diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c index a09831c62192..32ac8f9068e8 100644 --- a/drivers/spi/spi-dw-dma.c +++ b/drivers/spi/spi-dw-dma.c @@ -127,12 +127,15 @@ static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws) dw_spi_dma_sg_burst_init(dws); + pci_dev_put(dma_dev); + return 0; free_rxchan: dma_release_channel(dws->rxchan); dws->rxchan = NULL; err_exit: + pci_dev_put(dma_dev); return -EBUSY; } diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c index 9851551ebbe0..46ae46a944c5 100644 --- a/drivers/spi/spi-fsl-qspi.c +++ b/drivers/spi/spi-fsl-qspi.c @@ -876,6 +876,10 @@ static int fsl_qspi_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI-memory"); + if (!res) { + ret = -EINVAL; + goto err_put_ctrl; + } q->memmap_phy = res->start; /* Since there are 4 cs, map size required is 4 times ahb_buf_size */ q->ahb_addr = devm_ioremap(dev, q->memmap_phy, diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index 5f05d519fbbd..71376b6df89d 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c @@ -731,7 +731,7 @@ static int img_spfi_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret) { + if (ret < 0) { pm_runtime_put_noidle(dev); return ret; } diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 0e3bc0b0a526..74b3b6ca15ef 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -434,8 +434,7 @@ static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx, unsigned int pre, post; unsigned int fin = spi_imx->spi_clk; - if (unlikely(fspi > fin)) - return 0; + fspi = min(fspi, fin); post = fls(fin) - fls(fspi); if (fin > fspi << post) diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index 0bc7daa7afc8..6974a1c947aa 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -156,6 +156,7 @@ struct meson_spicc_device { void __iomem *base; struct clk *core; struct clk *pclk; + struct clk_divider pow2_div; struct clk *clk; struct spi_message *message; struct spi_transfer *xfer; @@ -168,6 +169,8 @@ struct meson_spicc_device { unsigned long xfer_remain; }; +#define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div) + static void meson_spicc_oen_enable(struct meson_spicc_device *spicc) { u32 conf; @@ -421,7 +424,7 @@ static int meson_spicc_prepare_message(struct spi_master *master, { struct meson_spicc_device *spicc = spi_master_get_devdata(master); struct spi_device *spi = message->spi; - u32 conf = 0; + u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK; /* Store current message */ spicc->message = message; @@ -458,8 +461,6 @@ static int meson_spicc_prepare_message(struct spi_master *master, /* Select CS */ conf |= FIELD_PREP(SPICC_CS_MASK, spi->chip_select); - /* Default Clock rate core/4 */ - /* Default 8bit word */ conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, 8 - 1); @@ -476,12 +477,16 @@ static int meson_spicc_prepare_message(struct spi_master *master, static int meson_spicc_unprepare_transfer(struct spi_master *master) { struct meson_spicc_device *spicc = spi_master_get_devdata(master); + u32 conf = readl_relaxed(spicc->base + SPICC_CONREG) & SPICC_DATARATE_MASK; /* Disable all IRQs */ writel(0, spicc->base + SPICC_INTREG); device_reset_optional(&spicc->pdev->dev); + /* Set default configuration, keeping datarate field */ + writel_relaxed(conf, spicc->base + SPICC_CONREG); + return 0; } @@ -518,14 +523,60 @@ static void meson_spicc_cleanup(struct spi_device *spi) * Clk path for G12A series: * pclk -> pow2 fixed div -> pow2 div -> mux -> out * pclk -> enh fixed div -> enh div -> mux -> out + * + * The pow2 divider is tied to the controller HW state, and the + * divider is only valid when the controller is initialized. + * + * A set of clock ops is added to make sure we don't read/set this + * clock rate while the controller is in an unknown state. */ -static int meson_spicc_clk_init(struct meson_spicc_device *spicc) +static unsigned long meson_spicc_pow2_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_divider *divider = to_clk_divider(hw); + struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider); + + if (!spicc->master->cur_msg) + return 0; + + return clk_divider_ops.recalc_rate(hw, parent_rate); +} + +static int meson_spicc_pow2_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_divider *divider = to_clk_divider(hw); + struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider); + + if (!spicc->master->cur_msg) + return -EINVAL; + + return clk_divider_ops.determine_rate(hw, req); +} + +static int meson_spicc_pow2_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_divider *divider = to_clk_divider(hw); + struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider); + + if (!spicc->master->cur_msg) + return -EINVAL; + + return clk_divider_ops.set_rate(hw, rate, parent_rate); +} + +const struct clk_ops meson_spicc_pow2_clk_ops = { + .recalc_rate = meson_spicc_pow2_recalc_rate, + .determine_rate = meson_spicc_pow2_determine_rate, + .set_rate = meson_spicc_pow2_set_rate, +}; + +static int meson_spicc_pow2_clk_init(struct meson_spicc_device *spicc) { struct device *dev = &spicc->pdev->dev; - struct clk_fixed_factor *pow2_fixed_div, *enh_fixed_div; - struct clk_divider *pow2_div, *enh_div; - struct clk_mux *mux; + struct clk_fixed_factor *pow2_fixed_div; struct clk_init_data init; struct clk *clk; struct clk_parent_data parent_data[2]; @@ -560,31 +611,45 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc) if (WARN_ON(IS_ERR(clk))) return PTR_ERR(clk); - pow2_div = devm_kzalloc(dev, sizeof(*pow2_div), GFP_KERNEL); - if (!pow2_div) - return -ENOMEM; - snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev)); init.name = name; - init.ops = &clk_divider_ops; - init.flags = CLK_SET_RATE_PARENT; + init.ops = &meson_spicc_pow2_clk_ops; + /* + * Set NOCACHE here to make sure we read the actual HW value + * since we reset the HW after each transfer. + */ + init.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE; parent_data[0].hw = &pow2_fixed_div->hw; init.num_parents = 1; - pow2_div->shift = 16, - pow2_div->width = 3, - pow2_div->flags = CLK_DIVIDER_POWER_OF_TWO, - pow2_div->reg = spicc->base + SPICC_CONREG; - pow2_div->hw.init = &init; + spicc->pow2_div.shift = 16, + spicc->pow2_div.width = 3, + spicc->pow2_div.flags = CLK_DIVIDER_POWER_OF_TWO, + spicc->pow2_div.reg = spicc->base + SPICC_CONREG; + spicc->pow2_div.hw.init = &init; - clk = devm_clk_register(dev, &pow2_div->hw); - if (WARN_ON(IS_ERR(clk))) - return PTR_ERR(clk); + spicc->clk = devm_clk_register(dev, &spicc->pow2_div.hw); + if (WARN_ON(IS_ERR(spicc->clk))) + return PTR_ERR(spicc->clk); - if (!spicc->data->has_enhance_clk_div) { - spicc->clk = clk; - return 0; - } + return 0; +} + +static int meson_spicc_enh_clk_init(struct meson_spicc_device *spicc) +{ + struct device *dev = &spicc->pdev->dev; + struct clk_fixed_factor *enh_fixed_div; + struct clk_divider *enh_div; + struct clk_mux *mux; + struct clk_init_data init; + struct clk *clk; + struct clk_parent_data parent_data[2]; + char name[64]; + + memset(&init, 0, sizeof(init)); + memset(&parent_data, 0, sizeof(parent_data)); + + init.parent_data = parent_data; /* algorithm for enh div: rate = freq / 2 / (N + 1) */ @@ -637,7 +702,7 @@ static int meson_spicc_clk_init(struct meson_spicc_device *spicc) snprintf(name, sizeof(name), "%s#sel", dev_name(dev)); init.name = name; init.ops = &clk_mux_ops; - parent_data[0].hw = &pow2_div->hw; + parent_data[0].hw = &spicc->pow2_div.hw; parent_data[1].hw = &enh_div->hw; init.num_parents = 2; init.flags = CLK_SET_RATE_PARENT; @@ -754,12 +819,20 @@ static int meson_spicc_probe(struct platform_device *pdev) meson_spicc_oen_enable(spicc); - ret = meson_spicc_clk_init(spicc); + ret = meson_spicc_pow2_clk_init(spicc); if (ret) { - dev_err(&pdev->dev, "clock registration failed\n"); + dev_err(&pdev->dev, "pow2 clock registration failed\n"); goto out_clk; } + if (spicc->data->has_enhance_clk_div) { + ret = meson_spicc_enh_clk_init(spicc); + if (ret) { + dev_err(&pdev->dev, "clock registration failed\n"); + goto out_clk; + } + } + ret = devm_spi_register_master(&pdev->dev, master); if (ret) { dev_err(&pdev->dev, "spi master registration failed\n"); diff --git a/drivers/spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c index b4b9b7309b5e..351b0ef52bbc 100644 --- a/drivers/spi/spi-mt7621.c +++ b/drivers/spi/spi-mt7621.c @@ -340,11 +340,9 @@ static int mt7621_spi_probe(struct platform_device *pdev) return PTR_ERR(base); clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(clk)) { - dev_err(&pdev->dev, "unable to get SYS clock, err=%d\n", - status); - return PTR_ERR(clk); - } + if (IS_ERR(clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(clk), + "unable to get SYS clock\n"); status = clk_prepare_enable(clk); if (status) diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c index 288f6c2bbd57..106e3cacba4c 100644 --- a/drivers/spi/spi-mtk-nor.c +++ b/drivers/spi/spi-mtk-nor.c @@ -895,7 +895,17 @@ static int __maybe_unused mtk_nor_suspend(struct device *dev) static int __maybe_unused mtk_nor_resume(struct device *dev) { - return pm_runtime_force_resume(dev); + struct spi_controller *ctlr = dev_get_drvdata(dev); + struct mtk_nor *sp = spi_controller_get_devdata(ctlr); + int ret; + + ret = pm_runtime_force_resume(dev); + if (ret) + return ret; + + mtk_nor_init(sp); + + return 0; } static const struct dev_pm_ops mtk_nor_pm_ops = { diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c index 0d0cd061d356..7c992d1f4abd 100644 --- a/drivers/spi/spi-omap-100k.c +++ b/drivers/spi/spi-omap-100k.c @@ -414,6 +414,7 @@ static int omap1_spi100k_probe(struct platform_device *pdev) return status; err_fck: + pm_runtime_disable(&pdev->dev); clk_disable_unprepare(spi100k->fck); err_ick: clk_disable_unprepare(spi100k->ick); diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index d39dec6d1c91..f3877eeb3da6 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c @@ -1199,8 +1199,10 @@ static int spi_qup_pm_resume_runtime(struct device *device) return ret; ret = clk_prepare_enable(controller->cclk); - if (ret) + if (ret) { + clk_disable_unprepare(controller->iclk); return ret; + } /* Disable clocks auto gaiting */ config = readl_relaxed(controller->base + QUP_CONFIG); @@ -1246,14 +1248,25 @@ static int spi_qup_resume(struct device *device) return ret; ret = clk_prepare_enable(controller->cclk); - if (ret) + if (ret) { + clk_disable_unprepare(controller->iclk); return ret; + } ret = spi_qup_set_state(controller, QUP_STATE_RESET); if (ret) - return ret; + goto disable_clk; - return spi_master_resume(master); + ret = spi_master_resume(master); + if (ret) + goto disable_clk; + + return 0; + +disable_clk: + clk_disable_unprepare(controller->cclk); + clk_disable_unprepare(controller->iclk); + return ret; } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index e39fd38f5180..4600e3c9e49e 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -612,6 +612,10 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, rspi->dma_callbacked, HZ); if (ret > 0 && rspi->dma_callbacked) { ret = 0; + if (tx) + dmaengine_synchronize(rspi->ctlr->dma_tx); + if (rx) + dmaengine_synchronize(rspi->ctlr->dma_rx); } else { if (!ret) { dev_err(&rspi->ctlr->dev, "DMA timeout\n"); @@ -1107,14 +1111,11 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev, } memset(&cfg, 0, sizeof(cfg)); + cfg.dst_addr = port_addr + RSPI_SPDR; + cfg.src_addr = port_addr + RSPI_SPDR; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; cfg.direction = dir; - if (dir == DMA_MEM_TO_DEV) { - cfg.dst_addr = port_addr; - cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; - } else { - cfg.src_addr = port_addr; - cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; - } ret = dmaengine_slave_config(chan, &cfg); if (ret) { @@ -1145,12 +1146,12 @@ static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr, } ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id, - res->start + RSPI_SPDR); + res->start); if (!ctlr->dma_tx) return -ENODEV; ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id, - res->start + RSPI_SPDR); + res->start); if (!ctlr->dma_rx) { dma_release_channel(ctlr->dma_tx); ctlr->dma_tx = NULL; diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index dfa7c91e13aa..d435df1b715b 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -84,6 +84,7 @@ #define S3C64XX_SPI_ST_TX_FIFORDY (1<<0) #define S3C64XX_SPI_PACKET_CNT_EN (1<<16) +#define S3C64XX_SPI_PACKET_CNT_MASK GENMASK(15, 0) #define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4) #define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3) @@ -660,6 +661,13 @@ static int s3c64xx_spi_prepare_message(struct spi_master *master, return 0; } +static size_t s3c64xx_spi_max_transfer_size(struct spi_device *spi) +{ + struct spi_controller *ctlr = spi->controller; + + return ctlr->can_dma ? S3C64XX_SPI_PACKET_CNT_MASK : SIZE_MAX; +} + static int s3c64xx_spi_transfer_one(struct spi_master *master, struct spi_device *spi, struct spi_transfer *xfer) @@ -1135,6 +1143,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer; master->prepare_message = s3c64xx_spi_prepare_message; master->transfer_one = s3c64xx_spi_transfer_one; + master->max_transfer_size = s3c64xx_spi_max_transfer_size; master->num_chipselect = sci->num_cs; master->dma_alignment = 8; master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index 4f24f6392212..9c58dcd7b324 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -295,7 +295,8 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi, if (!op->data.nbytes) goto wait_nobusy; - if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF) + if ((readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF) || + qspi->fmode == CCR_FMODE_APM) goto out; reinit_completion(&qspi->data_completion); diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index a6dfc8fef20c..9ec37cf10c01 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -443,7 +443,7 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz, u32 div, mbrdiv; /* Ensure spi->clk_rate is even */ - div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz); + div = DIV_ROUND_CLOSEST(spi->clk_rate & ~0x1, speed_hz); /* * SPI framework set xfer->speed_hz to master->max_speed_hz if @@ -941,6 +941,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL * 10, 1); + ratelimit_set_flags(&rs, RATELIMIT_MSG_ON_RELEASE); if (__ratelimit(&rs)) dev_dbg_ratelimited(spi->dev, "Communication suspended\n"); if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c index ea706d9629cb..47cbe73137c2 100644 --- a/drivers/spi/spi-synquacer.c +++ b/drivers/spi/spi-synquacer.c @@ -783,6 +783,7 @@ static int __maybe_unused synquacer_spi_resume(struct device *dev) ret = synquacer_spi_enable(master); if (ret) { + clk_disable_unprepare(sspi->clk); dev_err(dev, "failed to enable spi (%d)\n", ret); return ret; } diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index e06aafe169e0..081da1fd3fd7 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -448,6 +448,7 @@ static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst, enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; struct dma_async_tx_descriptor *tx; int ret; + unsigned long time_left; tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags); if (!tx) { @@ -467,9 +468,9 @@ static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst, } dma_async_issue_pending(chan); - ret = wait_for_completion_timeout(&qspi->transfer_complete, + time_left = wait_for_completion_timeout(&qspi->transfer_complete, msecs_to_jiffies(len)); - if (ret <= 0) { + if (time_left == 0) { dmaengine_terminate_sync(chan); dev_err(qspi->dev, "DMA wait_for_completion_timeout\n"); return -ETIMEDOUT; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 2ff27f332104..b1a638d91b2b 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -946,6 +946,8 @@ void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, if (sgt->orig_nents) { dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); sg_free_table(sgt); + sgt->orig_nents = 0; + sgt->nents = 0; } } diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index bbbd311eda03..e6de2aeece8d 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -887,7 +887,8 @@ static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pmic_arb) * version 5, there is more than one APID mapped to each PPID. * The owner field for each of these mappings specifies the EE which is * allowed to write to the APID. The owner of the last (highest) APID - * for a given PPID will receive interrupts from the PPID. + * which has the IRQ owner bit set for a given PPID will receive + * interrupts from the PPID. */ for (i = 0; ; i++, apidd++) { offset = pmic_arb->ver_ops->apid_map_offset(i); @@ -910,16 +911,16 @@ static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pmic_arb) apid = pmic_arb->ppid_to_apid[ppid] & ~PMIC_ARB_APID_VALID; prev_apidd = &pmic_arb->apid_data[apid]; - if (valid && is_irq_ee && - prev_apidd->write_ee == pmic_arb->ee) { + if (!valid || apidd->write_ee == pmic_arb->ee) { + /* First PPID mapping or one for this EE */ + pmic_arb->ppid_to_apid[ppid] = i | PMIC_ARB_APID_VALID; + } else if (valid && is_irq_ee && + prev_apidd->write_ee == pmic_arb->ee) { /* * Duplicate PPID mapping after the one for this EE; * override the irq owner */ prev_apidd->irq_ee = apidd->irq_ee; - } else if (!valid || is_irq_ee) { - /* First PPID mapping or duplicate for another EE */ - pmic_arb->ppid_to_apid[ppid] = i | PMIC_ARB_APID_VALID; } apidd->ppid = ppid; diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c index 7769eadfaf61..ccc65cfc519f 100644 --- a/drivers/staging/comedi/drivers/vmk80xx.c +++ b/drivers/staging/comedi/drivers/vmk80xx.c @@ -685,7 +685,7 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev) if (!devpriv->usb_rx_buf) return -ENOMEM; - size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE); + size = max(usb_endpoint_maxp(devpriv->ep_tx), MIN_BUF_SIZE); devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_tx_buf) return -ENOMEM; diff --git a/drivers/staging/fieldbus/anybuss/host.c b/drivers/staging/fieldbus/anybuss/host.c index 549cb7d51af8..2a20a1767d77 100644 --- a/drivers/staging/fieldbus/anybuss/host.c +++ b/drivers/staging/fieldbus/anybuss/host.c @@ -1384,7 +1384,7 @@ anybuss_host_common_probe(struct device *dev, goto err_device; return cd; err_device: - device_unregister(&cd->client->dev); + put_device(&cd->client->dev); err_kthread: kthread_stop(cd->qthread); err_reset: diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c index 42ce6c88ea75..4ed29f852c23 100644 --- a/drivers/staging/greybus/audio_codec.c +++ b/drivers/staging/greybus/audio_codec.c @@ -621,8 +621,8 @@ static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream) break; } if (!data) { - dev_err(dai->dev, "%s:%s DATA connection missing\n", - dai->name, module->name); + dev_err(dai->dev, "%s DATA connection missing\n", + dai->name); mutex_unlock(&codec->lock); return -ENODEV; } diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c index a9576f92efaa..08443f4aa045 100644 --- a/drivers/staging/greybus/audio_helper.c +++ b/drivers/staging/greybus/audio_helper.c @@ -3,7 +3,6 @@ * Greybus Audio Sound SoC helper APIs */ -#include #include #include #include @@ -116,10 +115,6 @@ int gbaudio_dapm_free_controls(struct snd_soc_dapm_context *dapm, { int i; struct snd_soc_dapm_widget *w, *next_w; -#ifdef CONFIG_DEBUG_FS - struct dentry *parent = dapm->debugfs_dapm; - struct dentry *debugfs_w = NULL; -#endif mutex_lock(&dapm->card->dapm_mutex); for (i = 0; i < num; i++) { @@ -139,12 +134,6 @@ int gbaudio_dapm_free_controls(struct snd_soc_dapm_context *dapm, continue; } widget++; -#ifdef CONFIG_DEBUG_FS - if (!parent) - debugfs_w = debugfs_lookup(w->name, parent); - debugfs_remove(debugfs_w); - debugfs_w = NULL; -#endif gbaudio_dapm_free_widget(w); } mutex_unlock(&dapm->card->dapm_mutex); diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c index 90d50a693ce5..20c19e08968e 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c +++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c @@ -899,9 +899,9 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, int err; unsigned long irqflags; struct ia_css_frame *frame = NULL; - struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf_tmp; - struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf_tmp; - struct atomisp_metadata_buf *md_buf = NULL, *_md_buf_tmp; + struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf_tmp, *s3a_iter; + struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf_tmp, *dis_iter; + struct atomisp_metadata_buf *md_buf = NULL, *_md_buf_tmp, *md_iter; enum atomisp_metadata_type md_type; struct atomisp_device *isp = asd->isp; struct v4l2_control ctrl; @@ -940,60 +940,75 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, switch (buf_type) { case IA_CSS_BUFFER_TYPE_3A_STATISTICS: - list_for_each_entry_safe(s3a_buf, _s3a_buf_tmp, + list_for_each_entry_safe(s3a_iter, _s3a_buf_tmp, &asd->s3a_stats_in_css, list) { - if (s3a_buf->s3a_data == + if (s3a_iter->s3a_data == buffer.css_buffer.data.stats_3a) { - list_del_init(&s3a_buf->list); - list_add_tail(&s3a_buf->list, + list_del_init(&s3a_iter->list); + list_add_tail(&s3a_iter->list, &asd->s3a_stats_ready); + s3a_buf = s3a_iter; break; } } asd->s3a_bufs_in_css[css_pipe_id]--; atomisp_3a_stats_ready_event(asd, buffer.css_buffer.exp_id); - dev_dbg(isp->dev, "%s: s3a stat with exp_id %d is ready\n", - __func__, s3a_buf->s3a_data->exp_id); + if (s3a_buf) + dev_dbg(isp->dev, "%s: s3a stat with exp_id %d is ready\n", + __func__, s3a_buf->s3a_data->exp_id); + else + dev_dbg(isp->dev, "%s: s3a stat is ready with no exp_id found\n", + __func__); break; case IA_CSS_BUFFER_TYPE_METADATA: if (error) break; md_type = atomisp_get_metadata_type(asd, css_pipe_id); - list_for_each_entry_safe(md_buf, _md_buf_tmp, + list_for_each_entry_safe(md_iter, _md_buf_tmp, &asd->metadata_in_css[md_type], list) { - if (md_buf->metadata == + if (md_iter->metadata == buffer.css_buffer.data.metadata) { - list_del_init(&md_buf->list); - list_add_tail(&md_buf->list, + list_del_init(&md_iter->list); + list_add_tail(&md_iter->list, &asd->metadata_ready[md_type]); + md_buf = md_iter; break; } } asd->metadata_bufs_in_css[stream_id][css_pipe_id]--; atomisp_metadata_ready_event(asd, md_type); - dev_dbg(isp->dev, "%s: metadata with exp_id %d is ready\n", - __func__, md_buf->metadata->exp_id); + if (md_buf) + dev_dbg(isp->dev, "%s: metadata with exp_id %d is ready\n", + __func__, md_buf->metadata->exp_id); + else + dev_dbg(isp->dev, "%s: metadata is ready with no exp_id found\n", + __func__); break; case IA_CSS_BUFFER_TYPE_DIS_STATISTICS: - list_for_each_entry_safe(dis_buf, _dis_buf_tmp, + list_for_each_entry_safe(dis_iter, _dis_buf_tmp, &asd->dis_stats_in_css, list) { - if (dis_buf->dis_data == + if (dis_iter->dis_data == buffer.css_buffer.data.stats_dvs) { spin_lock_irqsave(&asd->dis_stats_lock, irqflags); - list_del_init(&dis_buf->list); - list_add(&dis_buf->list, &asd->dis_stats); + list_del_init(&dis_iter->list); + list_add(&dis_iter->list, &asd->dis_stats); asd->params.dis_proj_data_valid = true; spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags); + dis_buf = dis_iter; break; } } asd->dis_bufs_in_css--; - dev_dbg(isp->dev, "%s: dis stat with exp_id %d is ready\n", - __func__, dis_buf->dis_data->exp_id); + if (dis_buf) + dev_dbg(isp->dev, "%s: dis stat with exp_id %d is ready\n", + __func__, dis_buf->dis_data->exp_id); + else + dev_dbg(isp->dev, "%s: dis stat is ready with no exp_id found\n", + __func__); break; case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME: case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME: diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c index 5c2ca61add8e..b65f2f3ef357 100644 --- a/drivers/staging/media/hantro/hantro_v4l2.c +++ b/drivers/staging/media/hantro/hantro_v4l2.c @@ -644,8 +644,12 @@ static int hantro_buf_prepare(struct vb2_buffer *vb) * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets * it to buffer length). */ - if (V4L2_TYPE_IS_CAPTURE(vq->type)) - vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage); + if (V4L2_TYPE_IS_CAPTURE(vq->type)) { + if (ctx->is_encoder) + vb2_set_plane_payload(vb, 0, 0); + else + vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage); + } return 0; } diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c index 5ccb3846c879..7a818ca15b37 100644 --- a/drivers/staging/media/meson/vdec/vdec.c +++ b/drivers/staging/media/meson/vdec/vdec.c @@ -1109,6 +1109,7 @@ static int vdec_probe(struct platform_device *pdev) err_vdev_release: video_device_release(vdev); + v4l2_device_unregister(&core->v4l2_dev); return ret; } @@ -1117,6 +1118,7 @@ static int vdec_remove(struct platform_device *pdev) struct amvdec_core *core = platform_get_drvdata(pdev); video_unregister_device(core->vdev_dec); + v4l2_device_unregister(&core->v4l2_dev); return 0; } diff --git a/drivers/staging/media/meson/vdec/vdec_hevc.c b/drivers/staging/media/meson/vdec/vdec_hevc.c index 9530e580e57a..afced435c907 100644 --- a/drivers/staging/media/meson/vdec/vdec_hevc.c +++ b/drivers/staging/media/meson/vdec/vdec_hevc.c @@ -167,8 +167,12 @@ static int vdec_hevc_start(struct amvdec_session *sess) clk_set_rate(core->vdec_hevc_clk, 666666666); ret = clk_prepare_enable(core->vdec_hevc_clk); - if (ret) + if (ret) { + if (core->platform->revision == VDEC_REVISION_G12A || + core->platform->revision == VDEC_REVISION_SM1) + clk_disable_unprepare(core->vdec_hevcf_clk); return ret; + } if (core->platform->revision == VDEC_REVISION_SM1) regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, diff --git a/drivers/staging/media/rkisp1/rkisp1-capture.c b/drivers/staging/media/rkisp1/rkisp1-capture.c index 0c934ca5adaa..8936f5a81680 100644 --- a/drivers/staging/media/rkisp1/rkisp1-capture.c +++ b/drivers/staging/media/rkisp1/rkisp1-capture.c @@ -1258,11 +1258,12 @@ static int rkisp1_capture_link_validate(struct media_link *link) struct rkisp1_capture *cap = video_get_drvdata(vdev); const struct rkisp1_capture_fmt_cfg *fmt = rkisp1_find_fmt_cfg(cap, cap->pix.fmt.pixelformat); - struct v4l2_subdev_format sd_fmt; + struct v4l2_subdev_format sd_fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = link->source->index, + }; int ret; - sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; - sd_fmt.pad = link->source->index; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt); if (ret) return ret; diff --git a/drivers/staging/media/rkisp1/rkisp1-resizer.c b/drivers/staging/media/rkisp1/rkisp1-resizer.c index 4dcc342ac2b2..76f17dd7670f 100644 --- a/drivers/staging/media/rkisp1/rkisp1-resizer.c +++ b/drivers/staging/media/rkisp1/rkisp1-resizer.c @@ -500,6 +500,10 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd, sink_fmt->height = RKISP1_DEFAULT_HEIGHT; sink_fmt->field = V4L2_FIELD_NONE; sink_fmt->code = RKISP1_DEF_FMT; + sink_fmt->colorspace = V4L2_COLORSPACE_SRGB; + sink_fmt->xfer_func = V4L2_XFER_FUNC_SRGB; + sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601; + sink_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE; sink_crop = v4l2_subdev_get_try_crop(sd, cfg, RKISP1_RSZ_PAD_SINK); sink_crop->width = RKISP1_DEFAULT_WIDTH; diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/staging/media/rkvdec/rkvdec-h264.c index 5487f6d0bcb6..ddccd97a359f 100644 --- a/drivers/staging/media/rkvdec/rkvdec-h264.c +++ b/drivers/staging/media/rkvdec/rkvdec-h264.c @@ -112,6 +112,7 @@ struct rkvdec_h264_run { const struct v4l2_ctrl_h264_sps *sps; const struct v4l2_ctrl_h264_pps *pps; const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix; + int ref_buf_idx[V4L2_H264_NUM_DPB_ENTRIES]; }; struct rkvdec_h264_ctx { @@ -661,8 +662,8 @@ static void assemble_hw_pps(struct rkvdec_ctx *ctx, WRITE_PPS(0xff, PROFILE_IDC); WRITE_PPS(1, CONSTRAINT_SET3_FLAG); WRITE_PPS(sps->chroma_format_idc, CHROMA_FORMAT_IDC); - WRITE_PPS(sps->bit_depth_luma_minus8 + 8, BIT_DEPTH_LUMA); - WRITE_PPS(sps->bit_depth_chroma_minus8 + 8, BIT_DEPTH_CHROMA); + WRITE_PPS(sps->bit_depth_luma_minus8, BIT_DEPTH_LUMA); + WRITE_PPS(sps->bit_depth_chroma_minus8, BIT_DEPTH_CHROMA); WRITE_PPS(0, QPPRIME_Y_ZERO_TRANSFORM_BYPASS_FLAG); WRITE_PPS(sps->log2_max_frame_num_minus4, LOG2_MAX_FRAME_NUM_MINUS4); WRITE_PPS(sps->max_num_ref_frames, MAX_NUM_REF_FRAMES); @@ -725,6 +726,26 @@ static void assemble_hw_pps(struct rkvdec_ctx *ctx, } } +static void lookup_ref_buf_idx(struct rkvdec_ctx *ctx, + struct rkvdec_h264_run *run) +{ + const struct v4l2_ctrl_h264_decode_params *dec_params = run->decode_params; + u32 i; + + for (i = 0; i < ARRAY_SIZE(dec_params->dpb); i++) { + struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx; + const struct v4l2_h264_dpb_entry *dpb = run->decode_params->dpb; + struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q; + int buf_idx = -1; + + if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE) + buf_idx = vb2_find_timestamp(cap_q, + dpb[i].reference_ts, 0); + + run->ref_buf_idx[i] = buf_idx; + } +} + static void assemble_hw_rps(struct rkvdec_ctx *ctx, struct rkvdec_h264_run *run) { @@ -762,7 +783,7 @@ static void assemble_hw_rps(struct rkvdec_ctx *ctx, for (j = 0; j < RKVDEC_NUM_REFLIST; j++) { for (i = 0; i < h264_ctx->reflists.num_valid; i++) { - u8 dpb_valid = 0; + bool dpb_valid = run->ref_buf_idx[i] >= 0; u8 idx = 0; switch (j) { @@ -779,8 +800,6 @@ static void assemble_hw_rps(struct rkvdec_ctx *ctx, if (idx >= ARRAY_SIZE(dec_params->dpb)) continue; - dpb_valid = !!(dpb[idx].flags & - V4L2_H264_DPB_ENTRY_FLAG_ACTIVE); set_ps_field(hw_rps, DPB_INFO(i, j), idx | dpb_valid << 4); @@ -859,13 +878,8 @@ get_ref_buf(struct rkvdec_ctx *ctx, struct rkvdec_h264_run *run, unsigned int dpb_idx) { struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx; - const struct v4l2_h264_dpb_entry *dpb = run->decode_params->dpb; struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q; - int buf_idx = -1; - - if (dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE) - buf_idx = vb2_find_timestamp(cap_q, - dpb[dpb_idx].reference_ts, 0); + int buf_idx = run->ref_buf_idx[dpb_idx]; /* * If a DPB entry is unused or invalid, address of current destination @@ -1102,6 +1116,7 @@ static int rkvdec_h264_run(struct rkvdec_ctx *ctx) assemble_hw_scaling_list(ctx, &run); assemble_hw_pps(ctx, &run); + lookup_ref_buf_idx(ctx, &run); assemble_hw_rps(ctx, &run); config_registers(ctx, &run); @@ -1109,8 +1124,8 @@ static int rkvdec_h264_run(struct rkvdec_ctx *ctx) schedule_delayed_work(&rkvdec->watchdog_work, msecs_to_jiffies(2000)); - writel(0xffffffff, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN); - writel(0xffffffff, rkvdec->regs + RKVDEC_REG_H264_ERR_E); + writel(0, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN); + writel(0, rkvdec->regs + RKVDEC_REG_H264_ERR_E); writel(1, rkvdec->regs + RKVDEC_REG_PREF_LUMA_CACHE_COMMAND); writel(1, rkvdec->regs + RKVDEC_REG_PREF_CHR_CACHE_COMMAND); diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c index a7788e7a9542..e384ea8d7280 100644 --- a/drivers/staging/media/rkvdec/rkvdec.c +++ b/drivers/staging/media/rkvdec/rkvdec.c @@ -1000,7 +1000,6 @@ static const char * const rkvdec_clk_names[] = { static int rkvdec_probe(struct platform_device *pdev) { struct rkvdec_dev *rkvdec; - struct resource *res; unsigned int i; int ret, irq; @@ -1032,8 +1031,7 @@ static int rkvdec_probe(struct platform_device *pdev) */ clk_set_rate(rkvdec->clocks[0].clk, 500 * 1000 * 1000); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rkvdec->regs = devm_ioremap_resource(&pdev->dev, res); + rkvdec->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rkvdec->regs)) return PTR_ERR(rkvdec->regs); diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c index 1dd833757c4e..28de90edf4cc 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus.c @@ -399,6 +399,8 @@ static int cedrus_probe(struct platform_device *pdev) if (!dev) return -ENOMEM; + platform_set_drvdata(pdev, dev); + dev->vfd = cedrus_video_device; dev->dev = &pdev->dev; dev->pdev = pdev; @@ -469,8 +471,6 @@ static int cedrus_probe(struct platform_device *pdev) goto err_m2m_mc; } - platform_set_drvdata(pdev, dev); - return 0; err_m2m_mc: diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c index 368439cf5e17..20c01a56f284 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c @@ -147,6 +147,9 @@ static void cedrus_h265_frame_info_write_dpb(struct cedrus_ctx *ctx, dpb[i].pic_order_cnt[1] }; + if (buffer_index < 0) + continue; + cedrus_h265_frame_info_write_single(ctx, i, dpb[i].field_pic, pic_order_cnt, buffer_index); diff --git a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c index 09b0b8a16e99..2e971cbe2d7a 100644 --- a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c +++ b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c @@ -267,6 +267,8 @@ static int rt2880_pinmux_pins(struct rt2880_priv *p) p->func[i]->pin_count, sizeof(int), GFP_KERNEL); + if (!p->func[i]->pins) + return -ENOMEM; for (j = 0; j < p->func[i]->pin_count; j++) p->func[i]->pins[j] = p->func[i]->pin_first + j; diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c index e8e72f79ca00..aeb6f015fdda 100644 --- a/drivers/staging/rtl8192e/rtllib_softmac.c +++ b/drivers/staging/rtl8192e/rtllib_softmac.c @@ -651,9 +651,9 @@ static void rtllib_beacons_stop(struct rtllib_device *ieee) spin_lock_irqsave(&ieee->beacon_lock, flags); ieee->beacon_txing = 0; - del_timer_sync(&ieee->beacon_timer); spin_unlock_irqrestore(&ieee->beacon_lock, flags); + del_timer_sync(&ieee->beacon_timer); } diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c index 690b664df8fa..56a447651644 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c @@ -528,9 +528,9 @@ static void ieee80211_beacons_stop(struct ieee80211_device *ieee) spin_lock_irqsave(&ieee->beacon_lock, flags); ieee->beacon_txing = 0; - del_timer_sync(&ieee->beacon_timer); spin_unlock_irqrestore(&ieee->beacon_lock, flags); + del_timer_sync(&ieee->beacon_timer); } void ieee80211_stop_send_beacons(struct ieee80211_device *ieee) diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h index ec33fb9122e9..57badc1e91e3 100644 --- a/drivers/staging/rtl8192u/r8192U.h +++ b/drivers/staging/rtl8192u/r8192U.h @@ -1013,7 +1013,7 @@ typedef struct r8192_priv { bool bis_any_nonbepkts; bool bcurrent_turbo_EDCA; bool bis_cur_rdlstate; - struct timer_list fsync_timer; + struct delayed_work fsync_work; bool bfsync_processing; /* 500ms Fsync timer is active or not */ u32 rate_record; u32 rateCountDiffRecord; diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c index bac402b40121..6aa424a31569 100644 --- a/drivers/staging/rtl8192u/r8192U_dm.c +++ b/drivers/staging/rtl8192u/r8192U_dm.c @@ -2578,19 +2578,20 @@ static void dm_init_fsync(struct net_device *dev) priv->ieee80211->fsync_seconddiff_ratethreshold = 200; priv->ieee80211->fsync_state = Default_Fsync; priv->framesyncMonitor = 1; /* current default 0xc38 monitor on */ - timer_setup(&priv->fsync_timer, dm_fsync_timer_callback, 0); + INIT_DELAYED_WORK(&priv->fsync_work, dm_fsync_work_callback); } static void dm_deInit_fsync(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); - del_timer_sync(&priv->fsync_timer); + cancel_delayed_work_sync(&priv->fsync_work); } -void dm_fsync_timer_callback(struct timer_list *t) +void dm_fsync_work_callback(struct work_struct *work) { - struct r8192_priv *priv = from_timer(priv, t, fsync_timer); + struct r8192_priv *priv = + container_of(work, struct r8192_priv, fsync_work.work); struct net_device *dev = priv->ieee80211->dev; u32 rate_index, rate_count = 0, rate_count_diff = 0; bool bSwitchFromCountDiff = false; @@ -2657,17 +2658,16 @@ void dm_fsync_timer_callback(struct timer_list *t) } } if (bDoubleTimeInterval) { - if (timer_pending(&priv->fsync_timer)) - del_timer_sync(&priv->fsync_timer); - priv->fsync_timer.expires = jiffies + - msecs_to_jiffies(priv->ieee80211->fsync_time_interval*priv->ieee80211->fsync_multiple_timeinterval); - add_timer(&priv->fsync_timer); + cancel_delayed_work_sync(&priv->fsync_work); + schedule_delayed_work(&priv->fsync_work, + msecs_to_jiffies(priv + ->ieee80211->fsync_time_interval * + priv->ieee80211->fsync_multiple_timeinterval)); } else { - if (timer_pending(&priv->fsync_timer)) - del_timer_sync(&priv->fsync_timer); - priv->fsync_timer.expires = jiffies + - msecs_to_jiffies(priv->ieee80211->fsync_time_interval); - add_timer(&priv->fsync_timer); + cancel_delayed_work_sync(&priv->fsync_work); + schedule_delayed_work(&priv->fsync_work, + msecs_to_jiffies(priv + ->ieee80211->fsync_time_interval)); } } else { /* Let Register return to default value; */ @@ -2695,7 +2695,7 @@ static void dm_EndSWFsync(struct net_device *dev) struct r8192_priv *priv = ieee80211_priv(dev); RT_TRACE(COMP_HALDM, "%s\n", __func__); - del_timer_sync(&(priv->fsync_timer)); + cancel_delayed_work_sync(&priv->fsync_work); /* Let Register return to default value; */ if (priv->bswitch_fsync) { @@ -2736,11 +2736,9 @@ static void dm_StartSWFsync(struct net_device *dev) if (priv->ieee80211->fsync_rate_bitmap & rateBitmap) priv->rate_record += priv->stats.received_rate_histogram[1][rateIndex]; } - if (timer_pending(&priv->fsync_timer)) - del_timer_sync(&priv->fsync_timer); - priv->fsync_timer.expires = jiffies + - msecs_to_jiffies(priv->ieee80211->fsync_time_interval); - add_timer(&priv->fsync_timer); + cancel_delayed_work_sync(&priv->fsync_work); + schedule_delayed_work(&priv->fsync_work, + msecs_to_jiffies(priv->ieee80211->fsync_time_interval)); write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd); } diff --git a/drivers/staging/rtl8192u/r8192U_dm.h b/drivers/staging/rtl8192u/r8192U_dm.h index 0b2a1c688597..2159018b4e38 100644 --- a/drivers/staging/rtl8192u/r8192U_dm.h +++ b/drivers/staging/rtl8192u/r8192U_dm.h @@ -166,7 +166,7 @@ void dm_force_tx_fw_info(struct net_device *dev, void dm_init_edca_turbo(struct net_device *dev); void dm_rf_operation_test_callback(unsigned long data); void dm_rf_pathcheck_workitemcallback(struct work_struct *work); -void dm_fsync_timer_callback(struct timer_list *t); +void dm_fsync_work_callback(struct work_struct *work); void dm_cck_txpower_adjust(struct net_device *dev, bool binch14); void dm_shadow_init(struct net_device *dev); void dm_initialize_txpower_tracking(struct net_device *dev); diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c index 2214aca09730..daa3180dfde3 100644 --- a/drivers/staging/rtl8712/os_intfs.c +++ b/drivers/staging/rtl8712/os_intfs.c @@ -332,7 +332,6 @@ void r8712_free_drv_sw(struct _adapter *padapter) r8712_free_evt_priv(&padapter->evtpriv); r8712_DeInitSwLeds(padapter); r8712_free_mlme_priv(&padapter->mlmepriv); - r8712_free_io_queue(padapter); _free_xmit_priv(&padapter->xmitpriv); _r8712_free_sta_priv(&padapter->stapriv); _r8712_free_recv_priv(&padapter->recvpriv); diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c index ff3cb09c57a6..30e965c410ff 100644 --- a/drivers/staging/rtl8712/rtl8712_cmd.c +++ b/drivers/staging/rtl8712/rtl8712_cmd.c @@ -117,34 +117,6 @@ static void r871x_internal_cmd_hdl(struct _adapter *padapter, u8 *pbuf) kfree(pdrvcmd->pbuf); } -static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf) -{ - void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd); - struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; - - /* invoke cmd->callback function */ - pcmd_callback = cmd_callback[pcmd->cmdcode].callback; - if (!pcmd_callback) - r8712_free_cmd_obj(pcmd); - else - pcmd_callback(padapter, pcmd); - return H2C_SUCCESS; -} - -static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf) -{ - void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd); - struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; - - /* invoke cmd->callback function */ - pcmd_callback = cmd_callback[pcmd->cmdcode].callback; - if (!pcmd_callback) - r8712_free_cmd_obj(pcmd); - else - pcmd_callback(padapter, pcmd); - return H2C_SUCCESS; -} - static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf) { struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; @@ -213,14 +185,6 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter, pcmd_r = NULL; switch (pcmd->cmdcode) { - case GEN_CMD_CODE(_Read_MACREG): - read_macreg_hdl(padapter, (u8 *)pcmd); - pcmd_r = pcmd; - break; - case GEN_CMD_CODE(_Write_MACREG): - write_macreg_hdl(padapter, (u8 *)pcmd); - pcmd_r = pcmd; - break; case GEN_CMD_CODE(_Read_BBREG): read_bbreg_hdl(padapter, (u8 *)pcmd); break; diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c index fed96d4251bf..68d66c3ce2c8 100644 --- a/drivers/staging/rtl8712/usb_intf.c +++ b/drivers/staging/rtl8712/usb_intf.c @@ -266,6 +266,7 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter) static void r8712_usb_dvobj_deinit(struct _adapter *padapter) { + r8712_free_io_queue(padapter); } void rtl871x_intf_stop(struct _adapter *padapter) @@ -303,9 +304,6 @@ void r871x_dev_unload(struct _adapter *padapter) rtl8712_hal_deinit(padapter); } - /*s6.*/ - if (padapter->dvobj_deinit) - padapter->dvobj_deinit(padapter); padapter->bup = false; } } @@ -541,13 +539,13 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf, } else { AutoloadFail = false; } - if (((mac[0] == 0xff) && (mac[1] == 0xff) && + if ((!AutoloadFail) || + ((mac[0] == 0xff) && (mac[1] == 0xff) && (mac[2] == 0xff) && (mac[3] == 0xff) && (mac[4] == 0xff) && (mac[5] == 0xff)) || ((mac[0] == 0x00) && (mac[1] == 0x00) && (mac[2] == 0x00) && (mac[3] == 0x00) && - (mac[4] == 0x00) && (mac[5] == 0x00)) || - (!AutoloadFail)) { + (mac[4] == 0x00) && (mac[5] == 0x00))) { mac[0] = 0x00; mac[1] = 0xe0; mac[2] = 0x4c; @@ -610,6 +608,8 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf) /* Stop driver mlme relation timer */ r8712_stop_drv_timers(padapter); r871x_dev_unload(padapter); + if (padapter->dvobj_deinit) + padapter->dvobj_deinit(padapter); r8712_free_drv_sw(padapter); free_netdev(pnetdev); diff --git a/drivers/staging/rtl8712/usb_ops.c b/drivers/staging/rtl8712/usb_ops.c index e64845e6adf3..af9966d03979 100644 --- a/drivers/staging/rtl8712/usb_ops.c +++ b/drivers/staging/rtl8712/usb_ops.c @@ -29,7 +29,8 @@ static u8 usb_read8(struct intf_hdl *intfhdl, u32 addr) u16 wvalue; u16 index; u16 len; - __le32 data; + int status; + __le32 data = 0; struct intf_priv *intfpriv = intfhdl->pintfpriv; request = 0x05; @@ -37,8 +38,10 @@ static u8 usb_read8(struct intf_hdl *intfhdl, u32 addr) index = 0; wvalue = (u16)(addr & 0x0000ffff); len = 1; - r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len, - requesttype); + status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, + &data, len, requesttype); + if (status < 0) + return 0; return (u8)(le32_to_cpu(data) & 0x0ff); } @@ -49,7 +52,8 @@ static u16 usb_read16(struct intf_hdl *intfhdl, u32 addr) u16 wvalue; u16 index; u16 len; - __le32 data; + int status; + __le32 data = 0; struct intf_priv *intfpriv = intfhdl->pintfpriv; request = 0x05; @@ -57,8 +61,10 @@ static u16 usb_read16(struct intf_hdl *intfhdl, u32 addr) index = 0; wvalue = (u16)(addr & 0x0000ffff); len = 2; - r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len, - requesttype); + status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, + &data, len, requesttype); + if (status < 0) + return 0; return (u16)(le32_to_cpu(data) & 0xffff); } @@ -69,7 +75,8 @@ static u32 usb_read32(struct intf_hdl *intfhdl, u32 addr) u16 wvalue; u16 index; u16 len; - __le32 data; + int status; + __le32 data = 0; struct intf_priv *intfpriv = intfhdl->pintfpriv; request = 0x05; @@ -77,8 +84,10 @@ static u32 usb_read32(struct intf_hdl *intfhdl, u32 addr) index = 0; wvalue = (u16)(addr & 0x0000ffff); len = 4; - r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len, - requesttype); + status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, + &data, len, requesttype); + if (status < 0) + return 0; return le32_to_cpu(data); } diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c index 2abe205e3453..cee05385f872 100644 --- a/drivers/staging/rtl8723bs/core/rtw_cmd.c +++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c @@ -165,8 +165,6 @@ No irqsave is necessary. int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv) { - int res = 0; - init_completion(&pcmdpriv->cmd_queue_comp); init_completion(&pcmdpriv->terminate_cmdthread_comp); @@ -178,18 +176,16 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv) pcmdpriv->cmd_allocated_buf = rtw_zmalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ); - if (!pcmdpriv->cmd_allocated_buf) { - res = -ENOMEM; - goto exit; - } + if (!pcmdpriv->cmd_allocated_buf) + return -ENOMEM; pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((SIZE_PTR)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ-1)); pcmdpriv->rsp_allocated_buf = rtw_zmalloc(MAX_RSPSZ + 4); if (!pcmdpriv->rsp_allocated_buf) { - res = -ENOMEM; - goto exit; + kfree(pcmdpriv->cmd_allocated_buf); + return -ENOMEM; } pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((SIZE_PTR)(pcmdpriv->rsp_allocated_buf) & 3); @@ -199,8 +195,8 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv) pcmdpriv->rsp_cnt = 0; mutex_init(&pcmdpriv->sctx_mutex); -exit: - return res; + + return 0; } static void c2h_wk_callback(_workitem * work); diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c index 902ac8169948..083ff72976cf 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c @@ -1351,9 +1351,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, sec_len = *(pos++); len -= 1; - if (sec_len > 0 && sec_len <= len) { + if (sec_len > 0 && + sec_len <= len && + sec_len <= 32) { ssid[ssid_index].SsidLength = sec_len; - memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength); + memcpy(ssid[ssid_index].Ssid, pos, sec_len); /* DBG_871X("%s COMBO_SCAN with specific ssid:%s, %d\n", __func__ */ /* , ssid[ssid_index].Ssid, ssid[ssid_index].SsidLength); */ ssid_index++; diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c index 38b10fd5d992..95b91fe45cb3 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c @@ -2280,6 +2280,9 @@ void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header) struct vchiq_service *service = find_service_by_handle(handle); int pos; + if (!service) + return; + while (service->msg_queue_write == service->msg_queue_read + VCHIQ_MAX_SLOTS) { if (wait_for_completion_interruptible(&service->msg_queue_pop)) @@ -2299,6 +2302,9 @@ struct vchiq_header *vchiq_msg_hold(unsigned int handle) struct vchiq_header *header; int pos; + if (!service) + return NULL; + if (service->msg_queue_write == service->msg_queue_read) return NULL; diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 09ab6d6f2429..343f0de03154 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -564,7 +564,7 @@ err_free_rd: kfree(desc->rd_info); err_free_desc: - while (--i) { + while (i--) { desc = &priv->aRD0Ring[i]; device_free_rx_buf(priv, desc); kfree(desc->rd_info); @@ -610,7 +610,7 @@ err_free_rd: kfree(desc->rd_info); err_free_desc: - while (--i) { + while (i--) { desc = &priv->aRD1Ring[i]; device_free_rx_buf(priv, desc); kfree(desc->rd_info); @@ -675,7 +675,7 @@ static int device_init_td0_ring(struct vnt_private *priv) return 0; err_free_desc: - while (--i) { + while (i--) { desc = &priv->apTD0Rings[i]; kfree(desc->td_info); } @@ -715,7 +715,7 @@ static int device_init_td1_ring(struct vnt_private *priv) return 0; err_free_desc: - while (--i) { + while (i--) { desc = &priv->apTD1Rings[i]; kfree(desc->td_info); } diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c index e7bc1988124a..d5dacd5583c6 100644 --- a/drivers/staging/wfx/main.c +++ b/drivers/staging/wfx/main.c @@ -309,7 +309,8 @@ struct wfx_dev *wfx_init_common(struct device *dev, wdev->pdata.gpio_wakeup = devm_gpiod_get_optional(dev, "wakeup", GPIOD_OUT_LOW); if (IS_ERR(wdev->pdata.gpio_wakeup)) - return NULL; + goto err; + if (wdev->pdata.gpio_wakeup) gpiod_set_consumer_name(wdev->pdata.gpio_wakeup, "wfx wakeup"); @@ -328,6 +329,10 @@ struct wfx_dev *wfx_init_common(struct device *dev, return NULL; return wdev; + +err: + ieee80211_free_hw(hw); + return NULL; } int wfx_probe(struct wfx_dev *wdev) diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 16d5a4e117a2..5ae5d94c5b93 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -394,6 +394,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host ret = device_register(&tl_hba->dev); if (ret) { pr_err("device_register() failed for tl_hba->dev: %d\n", ret); + put_device(&tl_hba->dev); return -ENODEV; } @@ -1072,7 +1073,7 @@ check_len: */ ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt); if (ret) - goto out; + return ERR_PTR(ret); sh = tl_hba->sh; tcm_loop_hba_no_cnt++; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 109f019d2148..1eded5c4ebda 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -831,7 +831,6 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, attrib->unmap_granularity = q->limits.discard_granularity / block_size; attrib->unmap_granularity_alignment = q->limits.discard_alignment / block_size; - attrib->unmap_zeroes_data = !!(q->limits.max_write_zeroes_sectors); return true; } EXPORT_SYMBOL(target_configure_unmap_from_queue); diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index c6950f157b99..c283e45ac300 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1676,6 +1676,7 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) mutex_lock(&udev->cmdr_lock); page = tcmu_get_block_page(udev, dbi); if (likely(page)) { + get_page(page); mutex_unlock(&udev->cmdr_lock); return page; } @@ -1714,6 +1715,7 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) /* For the vmalloc()ed cmd area pages */ addr = (void *)(unsigned long)info->mem[mi].addr + offset; page = vmalloc_to_page(addr); + get_page(page); } else { uint32_t dbi; @@ -1724,7 +1726,6 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } - get_page(page); vmf->page = page; return 0; } diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c index 031806468af4..60ffc54da003 100644 --- a/drivers/tee/optee/device.c +++ b/drivers/tee/optee/device.c @@ -80,7 +80,7 @@ static int optee_register_device(const uuid_t *device_uuid) rc = device_register(&optee_device->dev); if (rc) { pr_err("device registration failed, err: %d\n", rc); - kfree(optee_device); + put_device(&optee_device->dev); } return rc; diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index e07f997cf8dd..9cc4a7b63b0d 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -334,6 +334,9 @@ tee_ioctl_shm_register(struct tee_context *ctx, if (data.flags) return -EINVAL; + if (!access_ok((void __user *)(unsigned long)data.addr, data.length)) + return -EFAULT; + shm = tee_shm_register(ctx, data.addr, data.length, TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED); if (IS_ERR(shm)) diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 499fccba3d74..6fb4400333fb 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include "tee_private.h" diff --git a/drivers/thermal/broadcom/bcm2711_thermal.c b/drivers/thermal/broadcom/bcm2711_thermal.c index 67c2a737bc9d..7b536c8a59dc 100644 --- a/drivers/thermal/broadcom/bcm2711_thermal.c +++ b/drivers/thermal/broadcom/bcm2711_thermal.c @@ -38,7 +38,6 @@ static int bcm2711_get_temp(void *data, int *temp) int offset = thermal_zone_get_offset(priv->thermal); u32 val; int ret; - long t; ret = regmap_read(priv->regmap, AVS_RO_TEMP_STATUS, &val); if (ret) @@ -50,9 +49,7 @@ static int bcm2711_get_temp(void *data, int *temp) val &= AVS_RO_TEMP_STATUS_DATA_MSK; /* Convert a HW code to a temperature reading (millidegree celsius) */ - t = slope * val + offset; - - *temp = t < 0 ? 0 : t; + *temp = slope * val + offset; return 0; } diff --git a/drivers/thermal/broadcom/sr-thermal.c b/drivers/thermal/broadcom/sr-thermal.c index 475ce2900771..85ab9edd580c 100644 --- a/drivers/thermal/broadcom/sr-thermal.c +++ b/drivers/thermal/broadcom/sr-thermal.c @@ -60,6 +60,9 @@ static int sr_thermal_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENOENT; + sr_thermal->regs = (void __iomem *)devm_memremap(&pdev->dev, res->start, resource_size(res), MEMREMAP_WB); diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c index 8d76dbfde6a9..331a241eb0ef 100644 --- a/drivers/thermal/imx_sc_thermal.c +++ b/drivers/thermal/imx_sc_thermal.c @@ -94,8 +94,8 @@ static int imx_sc_thermal_probe(struct platform_device *pdev) sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL); if (!sensor) { of_node_put(child); - of_node_put(sensor_np); - return -ENOMEM; + ret = -ENOMEM; + goto put_node; } ret = thermal_zone_of_get_sensor_id(child, @@ -124,7 +124,9 @@ static int imx_sc_thermal_probe(struct platform_device *pdev) dev_warn(&pdev->dev, "failed to add hwmon sysfs attributes\n"); } +put_node: of_node_put(sensor_np); + of_node_put(np); return ret; } diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 72a26867c209..28913867cd4b 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -67,7 +67,7 @@ static int evaluate_odvp(struct int3400_thermal_priv *priv); struct odvp_attr { int odvp; struct int3400_thermal_priv *priv; - struct kobj_attribute attr; + struct device_attribute attr; }; static ssize_t data_vault_read(struct file *file, struct kobject *kobj, @@ -269,7 +269,7 @@ static int int3400_thermal_run_osc(acpi_handle handle, return result; } -static ssize_t odvp_show(struct kobject *kobj, struct kobj_attribute *attr, +static ssize_t odvp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct odvp_attr *odvp_attr; diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c index b0eb5ece9243..fb04470d7d4b 100644 --- a/drivers/thermal/intel/intel_powerclamp.c +++ b/drivers/thermal/intel/intel_powerclamp.c @@ -531,9 +531,7 @@ static int start_power_clamp(void) get_online_cpus(); /* prefer BSP */ - control_cpu = 0; - if (!cpu_online(control_cpu)) - control_cpu = smp_processor_id(); + control_cpu = cpumask_first(cpu_online_mask); clamping = true; schedule_delayed_work(&poll_pkg_cstate_work, 0); diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c index 4ffa2e2c0145..9b8ba429a304 100644 --- a/drivers/thermal/qcom/tsens-v0_1.c +++ b/drivers/thermal/qcom/tsens-v0_1.c @@ -522,7 +522,7 @@ static const struct tsens_ops ops_8939 = { struct tsens_plat_data data_8939 = { .num_sensors = 10, .ops = &ops_8939, - .hw_ids = (unsigned int []){ 0, 1, 2, 4, 5, 6, 7, 8, 9, 10 }, + .hw_ids = (unsigned int []){ 0, 1, 2, 3, 5, 6, 7, 8, 9, 10 }, .feat = &tsens_v0_1_feat, .fields = tsens_v0_1_regfields, diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index f52708f310e0..01308e83764d 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "thermal_core.h" @@ -886,19 +887,39 @@ static struct attribute *cooling_device_stats_attrs[] = { NULL }; +static umode_t cooling_device_stats_is_visible(struct kobject *kobj, + struct attribute *attr, int attrno) +{ + struct thermal_cooling_device *cdev = to_cooling_device( + kobj_to_dev(kobj)); + + if (!cdev->stats) + return 0; + + return attr->mode; +} + static const struct attribute_group cooling_device_stats_attr_group = { .attrs = cooling_device_stats_attrs, - .name = "stats" + .name = "stats", + .is_visible = cooling_device_stats_is_visible, }; static void cooling_device_stats_setup(struct thermal_cooling_device *cdev) { + const struct attribute_group *stats_attr_group = NULL; struct cooling_dev_stats *stats; unsigned long states; int var; + bool disable_cdev_stats = false; + + trace_android_vh_disable_thermal_cooling_stats(cdev, + &disable_cdev_stats); + if (disable_cdev_stats) + return; if (cdev->ops->get_max_state(cdev, &states)) - return; + goto out; states++; /* Total number of states is highest state + 1 */ @@ -908,7 +929,7 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev) stats = kzalloc(var, GFP_KERNEL); if (!stats) - return; + goto out; stats->time_in_state = (ktime_t *)(stats + 1); stats->trans_table = (unsigned int *)(stats->time_in_state + states); @@ -918,9 +939,12 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev) spin_lock_init(&stats->lock); + stats_attr_group = &cooling_device_stats_attr_group; + +out: /* Fill the empty slot left in cooling_device_attr_groups */ var = ARRAY_SIZE(cooling_device_attr_groups) - 2; - cooling_device_attr_groups[var] = &cooling_device_stats_attr_group; + cooling_device_attr_groups[var] = stats_attr_group; } static void cooling_device_stats_destroy(struct thermal_cooling_device *cdev) diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c index 9894b8f63064..772acb190f50 100644 --- a/drivers/thunderbolt/ctl.c +++ b/drivers/thunderbolt/ctl.c @@ -396,7 +396,7 @@ static void tb_ctl_rx_submit(struct ctl_pkg *pkg) static int tb_async_error(const struct ctl_pkg *pkg) { - const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg; + const struct cfg_error_pkg *error = pkg->buffer; if (pkg->frame.eof != TB_CFG_PKG_ERROR) return false; diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 82c46b200c34..b2fb3397310e 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -2300,6 +2300,18 @@ struct tb *icm_probe(struct tb_nhi *nhi) icm->rtd3_veto = icm_icl_rtd3_veto; tb->cm_ops = &icm_icl_ops; break; + + case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI: + case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI: + icm->is_supported = icm_tgl_is_supported; + icm->get_mode = icm_ar_get_mode; + icm->driver_ready = icm_tr_driver_ready; + icm->device_connected = icm_tr_device_connected; + icm->device_disconnected = icm_tr_device_disconnected; + icm->xdomain_connected = icm_tr_xdomain_connected; + icm->xdomain_disconnected = icm_tr_xdomain_disconnected; + tb->cm_ops = &icm_tr_ops; + break; } if (!icm->is_supported || !icm->is_supported(tb)) { diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h index 4e0861d75072..7ad6d3f0583b 100644 --- a/drivers/thunderbolt/nhi.h +++ b/drivers/thunderbolt/nhi.h @@ -55,6 +55,8 @@ extern const struct tb_nhi_ops icl_nhi_ops; * need for the PCI quirk anymore as we will use ICM also on Apple * hardware. */ +#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI 0x1134 +#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137 #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI 0x15bf diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index c4b157c29af7..e881b72833dc 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -2046,6 +2046,7 @@ int tb_switch_configure(struct tb_switch *sw) * additional capabilities. */ sw->config.cmuv = USB4_VERSION_1_0; + sw->config.plug_events_delay = 0xa; /* Enumerate the switch */ ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, @@ -2412,6 +2413,26 @@ void tb_switch_unconfigure_link(struct tb_switch *sw) tb_lc_unconfigure_port(down); } +static int tb_switch_port_hotplug_enable(struct tb_switch *sw) +{ + struct tb_port *port; + + if (tb_switch_is_icm(sw)) + return 0; + + tb_switch_for_each_port(sw, port) { + int res; + + if (!port->cap_usb4) + continue; + + res = usb4_port_hotplug_enable(port); + if (res) + return res; + } + return 0; +} + /** * tb_switch_add() - Add a switch to the domain * @sw: Switch to add @@ -2479,6 +2500,10 @@ int tb_switch_add(struct tb_switch *sw) return ret; } + ret = tb_switch_port_hotplug_enable(sw); + if (ret) + return ret; + ret = device_add(&sw->dev); if (ret) { dev_err(&sw->dev, "failed to add device: %d\n", ret); diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 8ea360b0ff77..266f3bf8ff5c 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -979,6 +979,7 @@ struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, const struct tb_port *port); int usb4_port_unlock(struct tb_port *port); +int usb4_port_hotplug_enable(struct tb_port *port); int usb4_port_configure(struct tb_port *port); void usb4_port_unconfigure(struct tb_port *port); int usb4_port_configure_xdomain(struct tb_port *port); diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index e7d9529822fa..26868e2f9d0b 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -285,6 +285,7 @@ struct tb_regs_port_header { #define ADP_CS_5 0x05 #define ADP_CS_5_LCA_MASK GENMASK(28, 22) #define ADP_CS_5_LCA_SHIFT 22 +#define ADP_CS_5_DHP BIT(31) /* TMU adapter registers */ #define TMU_ADP_CS_3 0x03 diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index c05ec6fad77f..0b3a77ade04d 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -854,6 +854,26 @@ int usb4_port_unlock(struct tb_port *port) return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1); } +/** + * usb4_port_hotplug_enable() - Enables hotplug for a port + * @port: USB4 port to operate on + * + * Enables hot plug events on a given port. This is only intended + * to be used on lane, DP-IN, and DP-OUT adapters. + */ +int usb4_port_hotplug_enable(struct tb_port *port) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1); + if (ret) + return ret; + + val &= ~ADP_CS_5_DHP; + return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1); +} + static int usb4_port_set_configured(struct tb_port *port, bool configured) { int ret; diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c index 097266342e5e..4bcaab250676 100644 --- a/drivers/tty/cyclades.c +++ b/drivers/tty/cyclades.c @@ -556,7 +556,7 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip, } info->idle_stats.recv_idle = jiffies; } - tty_schedule_flip(port); + tty_flip_buffer_push(port); /* end of service */ cyy_writeb(info, CyRIR, save_xir & 0x3f); @@ -996,7 +996,7 @@ static void cyz_handle_rx(struct cyclades_port *info) mod_timer(&info->rx_full_timer, jiffies + 1); #endif info->idle_stats.recv_idle = jiffies; - tty_schedule_flip(&info->port); + tty_flip_buffer_push(&info->port); /* Update rx_get */ cy_writel(&buf_ctrl->rx_get, new_rx_get); @@ -1172,7 +1172,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo) if (delta_count) wake_up_interruptible(&info->port.delta_msr_wait); if (special_count) - tty_schedule_flip(&info->port); + tty_flip_buffer_push(&info->port); } } diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c index c8c5cdfc5e19..d6e82eb61fc2 100644 --- a/drivers/tty/goldfish.c +++ b/drivers/tty/goldfish.c @@ -151,7 +151,7 @@ static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id) address = (unsigned long)(void *)buf; goldfish_tty_rw(qtty, address, count, 0); - tty_schedule_flip(&qtty->port); + tty_flip_buffer_push(&qtty->port); return IRQ_HANDLED; } @@ -407,6 +407,7 @@ static int goldfish_tty_probe(struct platform_device *pdev) err_tty_register_device_failed: free_irq(irq, qtty); err_dec_line_count: + tty_port_destroy(&qtty->port); goldfish_tty_current_line_count--; if (goldfish_tty_current_line_count == 0) goldfish_tty_delete_driver(); @@ -427,7 +428,8 @@ static int goldfish_tty_remove(struct platform_device *pdev) tty_unregister_device(goldfish_tty_driver, qtty->console.index); iounmap(qtty->base); qtty->base = NULL; - free_irq(qtty->irq, pdev); + free_irq(qtty->irq, qtty); + tty_port_destroy(&qtty->port); goldfish_tty_current_line_count--; if (goldfish_tty_current_line_count == 0) goldfish_tty_delete_driver(); diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c index f9f14104bd2c..d6528f3bb9b9 100644 --- a/drivers/tty/moxa.c +++ b/drivers/tty/moxa.c @@ -1385,7 +1385,7 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle, if (inited && !tty_throttled(tty) && MoxaPortRxQueue(p) > 0) { /* RX */ MoxaPortReadData(p); - tty_schedule_flip(&p->port); + tty_flip_buffer_push(&p->port); } } else { clear_bit(EMPTYWAIT, &p->statusflags); @@ -1410,7 +1410,7 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle, if (tty && (intr & IntrBreak) && !I_IGNBRK(tty)) { /* BREAK */ tty_insert_flip_char(&p->port, 0, TTY_BREAK); - tty_schedule_flip(&p->port); + tty_flip_buffer_push(&p->port); } if (intr & IntrLine) diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 05562b3cca45..f5063499f9cf 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -72,6 +72,8 @@ module_param(debug, int, 0600); */ #define MAX_MRU 1500 #define MAX_MTU 1500 +/* SOF, ADDR, CTRL, LEN1, LEN2, ..., FCS, EOF */ +#define PROT_OVERHEAD 7 #define GSM_NET_TX_TIMEOUT (HZ*10) /** @@ -230,6 +232,7 @@ struct gsm_mux { int initiator; /* Did we initiate connection */ bool dead; /* Has the mux been shut down */ struct gsm_dlci *dlci[NUM_DLCI]; + int old_c_iflag; /* termios c_iflag value before attach */ bool constipated; /* Asked by remote to shut up */ spinlock_t tx_lock; @@ -414,6 +417,27 @@ static int gsm_read_ea(unsigned int *val, u8 c) return c & EA; } +/** + * gsm_read_ea_val - read a value until EA + * @val: variable holding value + * @data: buffer of data + * @dlen: length of data + * + * Processes an EA value. Updates the passed variable and + * returns the processed data length. + */ +static unsigned int gsm_read_ea_val(unsigned int *val, const u8 *data, int dlen) +{ + unsigned int len = 0; + + for (; dlen > 0; dlen--) { + len++; + if (gsm_read_ea(val, *data++)) + break; + } + return len; +} + /** * gsm_encode_modem - encode modem data bits * @dlci: DLCI to encode from @@ -650,6 +674,37 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len, return m; } +/** + * gsm_is_flow_ctrl_msg - checks if flow control message + * @msg: message to check + * + * Returns true if the given message is a flow control command of the + * control channel. False is returned in any other case. + */ +static bool gsm_is_flow_ctrl_msg(struct gsm_msg *msg) +{ + unsigned int cmd; + + if (msg->addr > 0) + return false; + + switch (msg->ctrl & ~PF) { + case UI: + case UIH: + cmd = 0; + if (gsm_read_ea_val(&cmd, msg->data + 2, msg->len - 2) < 1) + break; + switch (cmd & ~PF) { + case CMD_FCOFF: + case CMD_FCON: + return true; + } + break; + } + + return false; +} + /** * gsm_data_kick - poke the queue * @gsm: GSM Mux @@ -668,7 +723,7 @@ static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci) int len; list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) { - if (gsm->constipated && msg->addr) + if (gsm->constipated && !gsm_is_flow_ctrl_msg(msg)) continue; if (gsm->encoding != 0) { gsm->txframe[0] = GSM1_SOF; @@ -792,41 +847,51 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci) { struct gsm_msg *msg; u8 *dp; - int len, total_size, size; - int h = dlci->adaption - 1; + int h, len, size; - total_size = 0; - while (1) { - len = kfifo_len(&dlci->fifo); - if (len == 0) - return total_size; + /* for modem bits without break data */ + h = ((dlci->adaption == 1) ? 0 : 1); - /* MTU/MRU count only the data bits */ - if (len > gsm->mtu) - len = gsm->mtu; + len = kfifo_len(&dlci->fifo); + if (len == 0) + return 0; - size = len + h; + /* MTU/MRU count only the data bits but watch adaption mode */ + if ((len + h) > gsm->mtu) + len = gsm->mtu - h; - msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype); - /* FIXME: need a timer or something to kick this so it can't - get stuck with no work outstanding and no buffer free */ - if (msg == NULL) - return -ENOMEM; - dp = msg->data; - switch (dlci->adaption) { - case 1: /* Unstructured */ - break; - case 2: /* Unstructed with modem bits. - Always one byte as we never send inline break data */ - *dp++ = gsm_encode_modem(dlci); - break; - } - WARN_ON(kfifo_out_locked(&dlci->fifo, dp , len, &dlci->lock) != len); - __gsm_data_queue(dlci, msg); - total_size += size; + size = len + h; + + msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype); + /* FIXME: need a timer or something to kick this so it can't + * get stuck with no work outstanding and no buffer free + */ + if (!msg) + return -ENOMEM; + dp = msg->data; + switch (dlci->adaption) { + case 1: /* Unstructured */ + break; + case 2: /* Unstructured with modem bits. + * Always one byte as we never send inline break data + */ + *dp++ = (gsm_encode_modem(dlci) << 1) | EA; + break; + default: + pr_err("%s: unsupported adaption %d\n", __func__, + dlci->adaption); + break; } + + WARN_ON(len != kfifo_out_locked(&dlci->fifo, dp, len, + &dlci->lock)); + + /* Notify upper layer about available send space. */ + tty_port_tty_wakeup(&dlci->port); + + __gsm_data_queue(dlci, msg); /* Bytes of data we used up */ - return total_size; + return size; } /** @@ -1295,11 +1360,12 @@ static void gsm_control_response(struct gsm_mux *gsm, unsigned int command, static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl) { - struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 1, gsm->ftype); + struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 2, gsm->ftype); if (msg == NULL) return; - msg->data[0] = (ctrl->cmd << 1) | 2 | EA; /* command */ - memcpy(msg->data + 1, ctrl->data, ctrl->len); + msg->data[0] = (ctrl->cmd << 1) | CR | EA; /* command */ + msg->data[1] = (ctrl->len << 1) | EA; + memcpy(msg->data + 2, ctrl->data, ctrl->len); gsm_data_queue(gsm->dlci[0], msg); } @@ -1322,8 +1388,7 @@ static void gsm_control_retransmit(struct timer_list *t) spin_lock_irqsave(&gsm->control_lock, flags); ctrl = gsm->pending_cmd; if (ctrl) { - gsm->cretries--; - if (gsm->cretries == 0) { + if (gsm->cretries == 0 || !gsm->dlci[0] || gsm->dlci[0]->dead) { gsm->pending_cmd = NULL; ctrl->error = -ETIMEDOUT; ctrl->done = 1; @@ -1331,6 +1396,7 @@ static void gsm_control_retransmit(struct timer_list *t) wake_up(&gsm->event); return; } + gsm->cretries--; gsm_control_transmit(gsm, ctrl); mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); } @@ -1353,7 +1419,7 @@ static struct gsm_control *gsm_control_send(struct gsm_mux *gsm, unsigned int command, u8 *data, int clen) { struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control), - GFP_KERNEL); + GFP_ATOMIC); unsigned long flags; if (ctrl == NULL) return NULL; @@ -1371,7 +1437,7 @@ retry: /* If DLCI0 is in ADM mode skip retries, it won't respond */ if (gsm->dlci[0]->mode == DLCI_MODE_ADM) - gsm->cretries = 1; + gsm->cretries = 0; else gsm->cretries = gsm->n2; @@ -1419,13 +1485,19 @@ static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control) static void gsm_dlci_close(struct gsm_dlci *dlci) { + unsigned long flags; + del_timer(&dlci->t1); if (debug & 8) pr_debug("DLCI %d goes closed.\n", dlci->addr); dlci->state = DLCI_CLOSED; + /* Prevent us from sending data before the link is up again */ + dlci->constipated = true; if (dlci->addr != 0) { tty_port_tty_hangup(&dlci->port, false); + spin_lock_irqsave(&dlci->lock, flags); kfifo_reset(&dlci->fifo); + spin_unlock_irqrestore(&dlci->lock, flags); /* Ensure that gsmtty_open() can return. */ tty_port_set_initialized(&dlci->port, 0); wake_up_interruptible(&dlci->port.open_wait); @@ -1450,6 +1522,7 @@ static void gsm_dlci_open(struct gsm_dlci *dlci) del_timer(&dlci->t1); /* This will let a tty open continue */ dlci->state = DLCI_OPEN; + dlci->constipated = false; if (debug & 8) pr_debug("DLCI %d goes open.\n", dlci->addr); wake_up(&dlci->gsm->event); @@ -1477,8 +1550,8 @@ static void gsm_dlci_t1(struct timer_list *t) switch (dlci->state) { case DLCI_OPENING: - dlci->retries--; if (dlci->retries) { + dlci->retries--; gsm_command(dlci->gsm, dlci->addr, SABM|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); } else if (!dlci->addr && gsm->control == (DM | PF)) { @@ -1493,8 +1566,8 @@ static void gsm_dlci_t1(struct timer_list *t) break; case DLCI_CLOSING: - dlci->retries--; if (dlci->retries) { + dlci->retries--; gsm_command(dlci->gsm, dlci->addr, DISC|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); } else @@ -1527,6 +1600,25 @@ static void gsm_dlci_begin_open(struct gsm_dlci *dlci) mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); } +/** + * gsm_dlci_set_opening - change state to opening + * @dlci: DLCI to open + * + * Change internal state to wait for DLCI open from initiator side. + * We set off timers and responses upon reception of an SABM. + */ +static void gsm_dlci_set_opening(struct gsm_dlci *dlci) +{ + switch (dlci->state) { + case DLCI_CLOSED: + case DLCI_CLOSING: + dlci->state = DLCI_OPENING; + break; + default: + break; + } +} + /** * gsm_dlci_begin_close - start channel open procedure * @dlci: DLCI to open @@ -1665,10 +1757,13 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr) dlci->addr = addr; dlci->adaption = gsm->adaption; dlci->state = DLCI_CLOSED; - if (addr) + if (addr) { dlci->data = gsm_dlci_data; - else + /* Prevent us from sending data before the link is up */ + dlci->constipated = true; + } else { dlci->data = gsm_dlci_command; + } gsm->dlci[addr] = dlci; return dlci; } @@ -1810,7 +1905,6 @@ static void gsm_queue(struct gsm_mux *gsm) gsm_response(gsm, address, UA); gsm_dlci_close(dlci); break; - case UA: case UA|PF: if (cr == 0 || dlci == NULL) break; @@ -1844,7 +1938,7 @@ static void gsm_queue(struct gsm_mux *gsm) goto invalid; #endif if (dlci == NULL || dlci->state != DLCI_OPEN) { - gsm_command(gsm, address, DM|PF); + gsm_response(gsm, address, DM|PF); return; } dlci->data(dlci, gsm->buf, gsm->len); @@ -1954,6 +2048,16 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c) static void gsm1_receive(struct gsm_mux *gsm, unsigned char c) { + /* handle XON/XOFF */ + if ((c & ISO_IEC_646_MASK) == XON) { + gsm->constipated = true; + return; + } else if ((c & ISO_IEC_646_MASK) == XOFF) { + gsm->constipated = false; + /* Kick the link in case it is idling */ + gsm_data_kick(gsm, NULL); + return; + } if (c == GSM1_SOF) { /* EOF is only valid in frame if we have got to the data state and received at least one byte (the FCS) */ @@ -1968,7 +2072,8 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c) } /* Any partial frame was a runt so go back to start */ if (gsm->state != GSM_START) { - gsm->malformed++; + if (gsm->state != GSM_SEARCH) + gsm->malformed++; gsm->state = GSM_START; } /* A SOF in GSM_START means we are still reading idling or @@ -2040,74 +2145,43 @@ static void gsm_error(struct gsm_mux *gsm, gsm->io_error++; } -static int gsm_disconnect(struct gsm_mux *gsm) -{ - struct gsm_dlci *dlci = gsm->dlci[0]; - struct gsm_control *gc; - - if (!dlci) - return 0; - - /* In theory disconnecting DLCI 0 is sufficient but for some - modems this is apparently not the case. */ - gc = gsm_control_send(gsm, CMD_CLD, NULL, 0); - if (gc) - gsm_control_wait(gsm, gc); - - del_timer_sync(&gsm->t2_timer); - /* Now we are sure T2 has stopped */ - - gsm_dlci_begin_close(dlci); - wait_event_interruptible(gsm->event, - dlci->state == DLCI_CLOSED); - - if (signal_pending(current)) - return -EINTR; - - return 0; -} - /** * gsm_cleanup_mux - generic GSM protocol cleanup * @gsm: our mux + * @disc: disconnect link? * * Clean up the bits of the mux which are the same for all framing * protocols. Remove the mux from the mux table, stop all the timers * and then shut down each device hanging up the channels as we go. */ -static void gsm_cleanup_mux(struct gsm_mux *gsm) +static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) { int i; struct gsm_dlci *dlci = gsm->dlci[0]; struct gsm_msg *txq, *ntxq; gsm->dead = true; - - spin_lock(&gsm_mux_lock); - for (i = 0; i < MAX_MUX; i++) { - if (gsm_mux[i] == gsm) { - gsm_mux[i] = NULL; - break; - } - } - spin_unlock(&gsm_mux_lock); - /* open failed before registering => nothing to do */ - if (i == MAX_MUX) - return; - - del_timer_sync(&gsm->t2_timer); - /* Now we are sure T2 has stopped */ - if (dlci) - dlci->dead = true; - - /* Free up any link layer users */ mutex_lock(&gsm->mutex); - for (i = 0; i < NUM_DLCI; i++) + + if (dlci) { + if (disc && dlci->state != DLCI_CLOSED) { + gsm_dlci_begin_close(dlci); + wait_event(gsm->event, dlci->state == DLCI_CLOSED); + } + dlci->dead = true; + } + + /* Finish outstanding timers, making sure they are done */ + del_timer_sync(&gsm->t2_timer); + + /* Free up any link layer users and finally the control channel */ + for (i = NUM_DLCI - 1; i >= 0; i--) if (gsm->dlci[i]) gsm_dlci_release(gsm->dlci[i]); mutex_unlock(&gsm->mutex); /* Now wipe the queues */ + tty_ldisc_flush(gsm->tty); list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list) kfree(txq); INIT_LIST_HEAD(&gsm->tx_list); @@ -2125,30 +2199,12 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm) static int gsm_activate_mux(struct gsm_mux *gsm) { struct gsm_dlci *dlci; - int i = 0; - - timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0); - init_waitqueue_head(&gsm->event); - spin_lock_init(&gsm->control_lock); - spin_lock_init(&gsm->tx_lock); if (gsm->encoding == 0) gsm->receive = gsm0_receive; else gsm->receive = gsm1_receive; - spin_lock(&gsm_mux_lock); - for (i = 0; i < MAX_MUX; i++) { - if (gsm_mux[i] == NULL) { - gsm->num = i; - gsm_mux[i] = gsm; - break; - } - } - spin_unlock(&gsm_mux_lock); - if (i == MAX_MUX) - return -EBUSY; - dlci = gsm_dlci_alloc(gsm, 0); if (dlci == NULL) return -ENOMEM; @@ -2164,6 +2220,15 @@ static int gsm_activate_mux(struct gsm_mux *gsm) */ static void gsm_free_mux(struct gsm_mux *gsm) { + int i; + + for (i = 0; i < MAX_MUX; i++) { + if (gsm == gsm_mux[i]) { + gsm_mux[i] = NULL; + break; + } + } + mutex_destroy(&gsm->mutex); kfree(gsm->txframe); kfree(gsm->buf); kfree(gsm); @@ -2183,12 +2248,20 @@ static void gsm_free_muxr(struct kref *ref) static inline void mux_get(struct gsm_mux *gsm) { + unsigned long flags; + + spin_lock_irqsave(&gsm_mux_lock, flags); kref_get(&gsm->ref); + spin_unlock_irqrestore(&gsm_mux_lock, flags); } static inline void mux_put(struct gsm_mux *gsm) { + unsigned long flags; + + spin_lock_irqsave(&gsm_mux_lock, flags); kref_put(&gsm->ref, gsm_free_muxr); + spin_unlock_irqrestore(&gsm_mux_lock, flags); } static inline unsigned int mux_num_to_base(struct gsm_mux *gsm) @@ -2209,6 +2282,7 @@ static inline unsigned int mux_line_to_num(unsigned int line) static struct gsm_mux *gsm_alloc_mux(void) { + int i; struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL); if (gsm == NULL) return NULL; @@ -2217,7 +2291,7 @@ static struct gsm_mux *gsm_alloc_mux(void) kfree(gsm); return NULL; } - gsm->txframe = kmalloc(2 * MAX_MRU + 2, GFP_KERNEL); + gsm->txframe = kmalloc(2 * (MAX_MTU + PROT_OVERHEAD - 1), GFP_KERNEL); if (gsm->txframe == NULL) { kfree(gsm->buf); kfree(gsm); @@ -2227,6 +2301,10 @@ static struct gsm_mux *gsm_alloc_mux(void) mutex_init(&gsm->mutex); kref_init(&gsm->ref); INIT_LIST_HEAD(&gsm->tx_list); + timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0); + init_waitqueue_head(&gsm->event); + spin_lock_init(&gsm->control_lock); + spin_lock_init(&gsm->tx_lock); gsm->t1 = T1; gsm->t2 = T2; @@ -2238,6 +2316,26 @@ static struct gsm_mux *gsm_alloc_mux(void) gsm->mtu = 64; gsm->dead = true; /* Avoid early tty opens */ + /* Store the instance to the mux array or abort if no space is + * available. + */ + spin_lock(&gsm_mux_lock); + for (i = 0; i < MAX_MUX; i++) { + if (!gsm_mux[i]) { + gsm_mux[i] = gsm; + gsm->num = i; + break; + } + } + spin_unlock(&gsm_mux_lock); + if (i == MAX_MUX) { + mutex_destroy(&gsm->mutex); + kfree(gsm->txframe); + kfree(gsm->buf); + kfree(gsm); + return NULL; + } + return gsm; } @@ -2264,6 +2362,7 @@ static void gsm_copy_config_values(struct gsm_mux *gsm, static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c) { + int ret = 0; int need_close = 0; int need_restart = 0; @@ -2273,7 +2372,7 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c) /* Check the MRU/MTU range looks sane */ if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8) return -EINVAL; - if (c->n2 < 3) + if (c->n2 > 255) return -EINVAL; if (c->encapsulation > 1) /* Basic, advanced, no I */ return -EINVAL; @@ -2304,19 +2403,11 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c) /* * Close down what is needed, restart and initiate the new - * configuration + * configuration. On the first time there is no DLCI[0] + * and closing or cleaning up is not necessary. */ - - if (need_close || need_restart) { - int ret; - - ret = gsm_disconnect(gsm); - - if (ret) - return ret; - } - if (need_restart) - gsm_cleanup_mux(gsm); + if (need_close || need_restart) + gsm_cleanup_mux(gsm, true); gsm->initiator = c->initiator; gsm->mru = c->mru; @@ -2339,10 +2430,13 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c) * FIXME: We need to separate activation/deactivation from adding * and removing from the mux array */ - if (need_restart) - gsm_activate_mux(gsm); - if (gsm->initiator && need_close) - gsm_dlci_begin_open(gsm->dlci[0]); + if (gsm->dead) { + ret = gsm_activate_mux(gsm); + if (ret) + return ret; + if (gsm->initiator) + gsm_dlci_begin_open(gsm->dlci[0]); + } return 0; } @@ -2385,6 +2479,9 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) int ret, i; gsm->tty = tty_kref_get(tty); + /* Turn off tty XON/XOFF handling to handle it explicitly. */ + gsm->old_c_iflag = tty->termios.c_iflag; + tty->termios.c_iflag &= (IXON | IXOFF); ret = gsm_activate_mux(gsm); if (ret != 0) tty_kref_put(gsm->tty); @@ -2425,7 +2522,8 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) WARN_ON(tty != gsm->tty); for (i = 1; i < NUM_DLCI; i++) tty_unregister_device(gsm_tty_driver, base + i); - gsm_cleanup_mux(gsm); + /* Restore tty XON/XOFF handling. */ + gsm->tty->termios.c_iflag = gsm->old_c_iflag; tty_kref_put(gsm->tty); gsm->tty = NULL; } @@ -2493,6 +2591,12 @@ static void gsmld_close(struct tty_struct *tty) { struct gsm_mux *gsm = tty->disc_data; + /* The ldisc locks and closes the port before calling our close. This + * means we have no way to do a proper disconnect. We will not bother + * to do one. + */ + gsm_cleanup_mux(gsm, false); + gsmld_detach_gsm(tty, gsm); gsmld_flush_buffer(tty); @@ -2531,7 +2635,7 @@ static int gsmld_open(struct tty_struct *tty) ret = gsmld_attach_gsm(tty, gsm); if (ret != 0) { - gsm_cleanup_mux(gsm); + gsm_cleanup_mux(gsm, false); mux_put(gsm); } return ret; @@ -2600,11 +2704,24 @@ static ssize_t gsmld_read(struct tty_struct *tty, struct file *file, static ssize_t gsmld_write(struct tty_struct *tty, struct file *file, const unsigned char *buf, size_t nr) { - int space = tty_write_room(tty); + struct gsm_mux *gsm = tty->disc_data; + unsigned long flags; + int space; + int ret; + + if (!gsm) + return -ENODEV; + + ret = -ENOBUFS; + spin_lock_irqsave(&gsm->tx_lock, flags); + space = tty_write_room(tty); if (space >= nr) - return tty->ops->write(tty, buf, nr); - set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); - return -ENOBUFS; + ret = tty->ops->write(tty, buf, nr); + else + set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + spin_unlock_irqrestore(&gsm->tx_lock, flags); + + return ret; } /** @@ -2629,12 +2746,15 @@ static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file, poll_wait(file, &tty->read_wait, wait); poll_wait(file, &tty->write_wait, wait); + + if (gsm->dead) + mask |= EPOLLHUP; if (tty_hung_up_p(file)) mask |= EPOLLHUP; + if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) + mask |= EPOLLHUP; if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0) mask |= EPOLLOUT | EPOLLWRNORM; - if (gsm->dead) - mask |= EPOLLHUP; return mask; } @@ -2888,19 +3008,17 @@ static struct tty_ldisc_ops tty_ldisc_packet = { static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk) { - u8 modembits[5]; + u8 modembits[3]; struct gsm_control *ctrl; int len = 2; - if (brk) + modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */ + modembits[1] = (gsm_encode_modem(dlci) << 1) | EA; + if (brk) { + modembits[2] = (brk << 4) | 2 | EA; /* Length, Break, EA */ len++; - - modembits[0] = len << 1 | EA; /* Data bytes */ - modembits[1] = dlci->addr << 2 | 3; /* DLCI, EA, 1 */ - modembits[2] = gsm_encode_modem(dlci) << 1 | EA; - if (brk) - modembits[3] = brk << 4 | 2 | EA; /* Valid, EA */ - ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len + 1); + } + ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len); if (ctrl == NULL) return -ENOMEM; return gsm_control_wait(dlci->gsm, ctrl); @@ -3008,6 +3126,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp) { struct gsm_dlci *dlci = tty->driver_data; struct tty_port *port = &dlci->port; + struct gsm_mux *gsm = dlci->gsm; port->count++; tty_port_tty_set(port, tty); @@ -3017,7 +3136,10 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp) a DM straight back. This is ok as that will have caused a hangup */ tty_port_set_initialized(port, 1); /* Start sending off SABM messages */ - gsm_dlci_begin_open(dlci); + if (gsm->initiator) + gsm_dlci_begin_open(dlci); + else + gsm_dlci_set_opening(dlci); /* And wait for virtual carrier */ return tty_port_block_til_ready(port, tty, filp); } @@ -3085,13 +3207,17 @@ static int gsmtty_chars_in_buffer(struct tty_struct *tty) static void gsmtty_flush_buffer(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; + unsigned long flags; + if (dlci->state == DLCI_CLOSED) return; /* Caution needed: If we implement reliable transport classes then the data being transmitted can't simply be junked once it has first hit the stack. Until then we can just blow it away */ + spin_lock_irqsave(&dlci->lock, flags); kfifo_reset(&dlci->fifo); + spin_unlock_irqrestore(&dlci->lock, flags); /* Need to unhook this DLCI from the transmit queue logic */ } diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 58190135efb7..12dde01e576b 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -2073,6 +2073,35 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty, return ldata->read_tail != canon_head; } +/* + * If we finished a read at the exact location of an + * EOF (special EOL character that's a __DISABLED_CHAR) + * in the stream, silently eat the EOF. + */ +static void canon_skip_eof(struct tty_struct *tty) +{ + struct n_tty_data *ldata = tty->disc_data; + size_t tail, canon_head; + + canon_head = smp_load_acquire(&ldata->canon_head); + tail = ldata->read_tail; + + // No data? + if (tail == canon_head) + return; + + // See if the tail position is EOF in the circular buffer + tail &= (N_TTY_BUF_SIZE - 1); + if (!test_bit(tail, ldata->read_flags)) + return; + if (read_buf(ldata, tail) != __DISABLED_CHAR) + return; + + // Clear the EOL bit, skip the EOF char. + clear_bit(tail, ldata->read_flags); + smp_store_release(&ldata->read_tail, ldata->read_tail + 1); +} + /** * job_control - check job control * @tty: tty @@ -2142,7 +2171,14 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, */ if (*cookie) { if (ldata->icanon && !L_EXTPROC(tty)) { - if (canon_copy_from_read_buf(tty, &kb, &nr)) + /* + * If we have filled the user buffer, see + * if we should skip an EOF character before + * releasing the lock and returning done. + */ + if (!nr) + canon_skip_eof(tty); + else if (canon_copy_from_read_buf(tty, &kb, &nr)) return kb - kbuf; } else { if (copy_from_read_buf(tty, &kb, &nr)) diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 23368cec7ee8..16498f5fba64 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -111,21 +111,11 @@ static void pty_unthrottle(struct tty_struct *tty) static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) { struct tty_struct *to = tty->link; - unsigned long flags; - if (tty->stopped) + if (tty->stopped || !c) return 0; - if (c > 0) { - spin_lock_irqsave(&to->port->lock, flags); - /* Stuff the data into the input queue of the other end */ - c = tty_insert_flip_string(to->port, buf, c); - spin_unlock_irqrestore(&to->port->lock, flags); - /* And shovel */ - if (c) - tty_flip_buffer_push(to->port); - } - return c; + return tty_insert_flip_string_and_push_buffer(to->port, buf, c); } /** diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h index 137658bb086f..2e001e258128 100644 --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h @@ -122,6 +122,28 @@ static inline void serial_out(struct uart_8250_port *up, int offset, int value) up->port.serial_out(&up->port, offset, value); } +/* + * For the 16C950 + */ +static void serial_icr_write(struct uart_8250_port *up, int offset, int value) +{ + serial_out(up, UART_SCR, offset); + serial_out(up, UART_ICR, value); +} + +static unsigned int __maybe_unused serial_icr_read(struct uart_8250_port *up, + int offset) +{ + unsigned int value; + + serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD); + serial_out(up, UART_SCR, offset); + value = serial_in(up, UART_ICR); + serial_icr_write(up, UART_ACR, up->acr); + + return value; +} + void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p); static inline int serial_dl_read(struct uart_8250_port *up) diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index fc81bbc7697e..00f6dc7e9477 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -331,9 +332,9 @@ static int univ8250_setup_irq(struct uart_8250_port *up) * hardware interrupt, we use a timer-based system. The original * driver used to do this with IRQ0. */ - if (!port->irq) { + if (!port->irq) mod_timer(&up->timer, jiffies + uart_poll_timeout(port)); - } else + else retval = serial_link_irq_chain(up); return retval; @@ -572,6 +573,9 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev) up->port.dev = dev; + if (uart_console_enabled(&up->port)) + pm_runtime_get_sync(up->port.dev); + serial8250_apply_quirks(up); uart_add_one_port(drv, &up->port); } @@ -764,6 +768,7 @@ void serial8250_suspend_port(int line) if (!console_suspend_enabled && uart_console(port) && port->type != PORT_8250) { unsigned char canary = 0xa5; + serial_out(up, UART_SCR, canary); if (serial_in(up, UART_SCR) == canary) up->canary = canary; diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 96abbe2628ca..82a4f6dab59a 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -134,12 +134,15 @@ static void dw8250_check_lcr(struct uart_port *p, int value) /* Returns once the transmitter is empty or we run out of retries */ static void dw8250_tx_wait_empty(struct uart_port *p) { + struct uart_8250_port *up = up_to_u8250p(p); unsigned int tries = 20000; unsigned int delay_threshold = tries - 1000; unsigned int lsr; while (tries--) { lsr = readb (p->membase + (UART_LSR << p->regshift)); + up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; + if (lsr & UART_LSR_TEMT) break; diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c index 251f0018ae8c..dba5950b8d0e 100644 --- a/drivers/tty/serial/8250/8250_fintek.c +++ b/drivers/tty/serial/8250/8250_fintek.c @@ -200,12 +200,12 @@ static int fintek_8250_rs485_config(struct uart_port *port, if (!pdata) return -EINVAL; - /* Hardware do not support same RTS level on send and receive */ - if (!(rs485->flags & SER_RS485_RTS_ON_SEND) == - !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) - return -EINVAL; if (rs485->flags & SER_RS485_ENABLED) { + /* Hardware do not support same RTS level on send and receive */ + if (!(rs485->flags & SER_RS485_RTS_ON_SEND) == + !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) + return -EINVAL; memset(rs485->padding, 0, sizeof(rs485->padding)); config |= RS485_URA; } else { diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c index dfb730b7ea2a..1349c161c192 100644 --- a/drivers/tty/serial/8250/8250_lpss.c +++ b/drivers/tty/serial/8250/8250_lpss.c @@ -268,8 +268,13 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port struct dw_dma_slave *rx_param, *tx_param; struct device *dev = port->port.dev; - if (!lpss->dma_param.dma_dev) + if (!lpss->dma_param.dma_dev) { + dma = port->dma; + if (dma) + goto out_configuration_only; + return 0; + } rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL); if (!rx_param) @@ -280,16 +285,18 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port return -ENOMEM; *rx_param = lpss->dma_param; - dma->rxconf.src_maxburst = lpss->dma_maxburst; - *tx_param = lpss->dma_param; - dma->txconf.dst_maxburst = lpss->dma_maxburst; dma->fn = lpss8250_dma_filter; dma->rx_param = rx_param; dma->tx_param = tx_param; port->dma = dma; + +out_configuration_only: + dma->rxconf.src_maxburst = lpss->dma_maxburst; + dma->txconf.dst_maxburst = lpss->dma_maxburst; + return 0; } diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index fb65dc601b23..de48a58460f4 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -37,6 +37,7 @@ #define MTK_UART_IER_RTSI 0x40 /* Enable RTS Modem status interrupt */ #define MTK_UART_IER_CTSI 0x80 /* Enable CTS Modem status interrupt */ +#define MTK_UART_EFR 38 /* I/O: Extended Features Register */ #define MTK_UART_EFR_EN 0x10 /* Enable enhancement feature */ #define MTK_UART_EFR_RTS 0x40 /* Enable hardware rx flow control */ #define MTK_UART_EFR_CTS 0x80 /* Enable hardware tx flow control */ @@ -53,6 +54,9 @@ #define MTK_UART_TX_TRIGGER 1 #define MTK_UART_RX_TRIGGER MTK_UART_RX_SIZE +#define MTK_UART_XON1 40 /* I/O: Xon character 1 */ +#define MTK_UART_XOFF1 42 /* I/O: Xoff character 1 */ + #ifdef CONFIG_SERIAL_8250_DMA enum dma_rx_status { DMA_RX_START = 0, @@ -169,7 +173,7 @@ static void mtk8250_dma_enable(struct uart_8250_port *up) MTK_UART_DMA_EN_RX | MTK_UART_DMA_EN_TX); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); - serial_out(up, UART_EFR, UART_EFR_ECB); + serial_out(up, MTK_UART_EFR, UART_EFR_ECB); serial_out(up, UART_LCR, lcr); if (dmaengine_slave_config(dma->rxchan, &dma->rxconf) != 0) @@ -232,7 +236,7 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode) int lcr = serial_in(up, UART_LCR); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); - serial_out(up, UART_EFR, UART_EFR_ECB); + serial_out(up, MTK_UART_EFR, UART_EFR_ECB); serial_out(up, UART_LCR, lcr); lcr = serial_in(up, UART_LCR); @@ -241,7 +245,7 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode) serial_out(up, MTK_UART_ESCAPE_DAT, MTK_UART_ESCAPE_CHAR); serial_out(up, MTK_UART_ESCAPE_EN, 0x00); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); - serial_out(up, UART_EFR, serial_in(up, UART_EFR) & + serial_out(up, MTK_UART_EFR, serial_in(up, MTK_UART_EFR) & (~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))); serial_out(up, UART_LCR, lcr); mtk8250_disable_intrs(up, MTK_UART_IER_XOFFI | @@ -255,8 +259,8 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode) serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /*enable hw flow control*/ - serial_out(up, UART_EFR, MTK_UART_EFR_HW_FC | - (serial_in(up, UART_EFR) & + serial_out(up, MTK_UART_EFR, MTK_UART_EFR_HW_FC | + (serial_in(up, MTK_UART_EFR) & (~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK)))); serial_out(up, UART_LCR, lcr); @@ -270,12 +274,12 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode) serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /*enable sw flow control */ - serial_out(up, UART_EFR, MTK_UART_EFR_XON1_XOFF1 | - (serial_in(up, UART_EFR) & + serial_out(up, MTK_UART_EFR, MTK_UART_EFR_XON1_XOFF1 | + (serial_in(up, MTK_UART_EFR) & (~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK)))); - serial_out(up, UART_XON1, START_CHAR(port->state->port.tty)); - serial_out(up, UART_XOFF1, STOP_CHAR(port->state->port.tty)); + serial_out(up, MTK_UART_XON1, START_CHAR(port->state->port.tty)); + serial_out(up, MTK_UART_XOFF1, STOP_CHAR(port->state->port.tty)); serial_out(up, UART_LCR, lcr); mtk8250_disable_intrs(up, MTK_UART_IER_CTSI|MTK_UART_IER_RTSI); mtk8250_enable_intrs(up, MTK_UART_IER_XOFFI); diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 537bee8d2258..483fff3a95c9 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -157,7 +157,11 @@ static u32 uart_read(struct uart_8250_port *up, u32 reg) return readl(up->port.membase + (reg << up->port.regshift)); } -static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) +/* + * Called on runtime PM resume path from omap8250_restore_regs(), and + * omap8250_set_mctrl(). + */ +static void __omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct uart_8250_port *up = up_to_u8250p(port); struct omap8250_priv *priv = up->port.private_data; @@ -181,6 +185,20 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) } } +static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + int err; + + err = pm_runtime_resume_and_get(port->dev); + if (err) + return; + + __omap8250_set_mctrl(port, mctrl); + + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); +} + /* * Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460) * The access to uart register after MDR1 Access @@ -193,27 +211,10 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) static void omap_8250_mdr1_errataset(struct uart_8250_port *up, struct omap8250_priv *priv) { - u8 timeout = 255; - serial_out(up, UART_OMAP_MDR1, priv->mdr1); udelay(2); serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR); - /* - * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and - * TX_FIFO_E bit is 1. - */ - while (UART_LSR_THRE != (serial_in(up, UART_LSR) & - (UART_LSR_THRE | UART_LSR_DR))) { - timeout--; - if (!timeout) { - /* Should *never* happen. we warn and carry on */ - dev_crit(up->port.dev, "Errata i202: timedout %x\n", - serial_in(up, UART_LSR)); - break; - } - udelay(1); - } } static void omap_8250_get_divisor(struct uart_port *port, unsigned int baud, @@ -292,6 +293,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up) { struct omap8250_priv *priv = up->port.private_data; struct uart_8250_dma *dma = up->dma; + u8 mcr = serial8250_in_MCR(up); if (dma && dma->tx_running) { /* @@ -308,7 +310,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up) serial_out(up, UART_EFR, UART_EFR_ECB); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); - serial8250_out_MCR(up, UART_MCR_TCRTLR); + serial8250_out_MCR(up, mcr | UART_MCR_TCRTLR); serial_out(up, UART_FCR, up->fcr); omap8250_update_scr(up, priv); @@ -324,7 +326,8 @@ static void omap8250_restore_regs(struct uart_8250_port *up) serial_out(up, UART_LCR, 0); /* drop TCR + TLR access, we setup XON/XOFF later */ - serial8250_out_MCR(up, up->mcr); + serial8250_out_MCR(up, mcr); + serial_out(up, UART_IER, up->ier); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); @@ -341,7 +344,10 @@ static void omap8250_restore_regs(struct uart_8250_port *up) omap8250_update_mdr1(up, priv); - up->port.ops->set_mctrl(&up->port, up->port.mctrl); + __omap8250_set_mctrl(&up->port, up->port.mctrl); + + if (up->port.rs485.flags & SER_RS485_ENABLED) + serial8250_em485_stop_tx(up); } /* @@ -680,7 +686,6 @@ static int omap_8250_startup(struct uart_port *port) pm_runtime_get_sync(port->dev); - up->mcr = 0; serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); serial_out(up, UART_LCR, UART_LCR_WLEN8); @@ -1471,9 +1476,15 @@ err: static int omap8250_remove(struct platform_device *pdev) { struct omap8250_priv *priv = platform_get_drvdata(pdev); + int err; + + err = pm_runtime_resume_and_get(&pdev->dev); + if (err) + return err; pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_sync(&pdev->dev); + flush_work(&priv->qos_work); pm_runtime_disable(&pdev->dev); serial8250_unregister_port(priv->line); cpu_latency_qos_remove_request(&priv->pm_qos_request); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 3a985e953b8e..b6656898699d 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -75,13 +75,12 @@ static int pci_default_setup(struct serial_private*, static void moan_device(const char *str, struct pci_dev *dev) { - dev_err(&dev->dev, - "%s: %s\n" + pci_err(dev, "%s\n" "Please send the output of lspci -vv, this\n" "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n" "manufacturer and name of serial board or\n" "modem board to .\n", - pci_name(dev), str, dev->vendor, dev->device, + str, dev->vendor, dev->device, dev->subsystem_vendor, dev->subsystem_device); } @@ -238,7 +237,7 @@ static int pci_inteli960ni_init(struct pci_dev *dev) /* is firmware started? */ pci_read_config_dword(dev, 0x44, &oldval); if (oldval == 0x00001000L) { /* RESET value */ - dev_dbg(&dev->dev, "Local i960 firmware missing\n"); + pci_dbg(dev, "Local i960 firmware missing\n"); return -ENODEV; } return 0; @@ -588,9 +587,8 @@ static int pci_timedia_probe(struct pci_dev *dev) * (0,2,3,5,6: serial only -- 7,8,9: serial + parallel) */ if ((dev->subsystem_device & 0x00f0) >= 0x70) { - dev_info(&dev->dev, - "ignoring Timedia subdevice %04x for parport_serial\n", - dev->subsystem_device); + pci_info(dev, "ignoring Timedia subdevice %04x for parport_serial\n", + dev->subsystem_device); return -ENODEV; } @@ -827,8 +825,7 @@ static int pci_netmos_9900_numports(struct pci_dev *dev) if (sub_serports > 0) return sub_serports; - dev_err(&dev->dev, - "NetMos/Mostech serial driver ignoring port on ambiguous config.\n"); + pci_err(dev, "NetMos/Mostech serial driver ignoring port on ambiguous config.\n"); return 0; } @@ -897,18 +894,16 @@ static int pci_netmos_init(struct pci_dev *dev) /* enable IO_Space bit */ #define ITE_887x_POSIO_ENABLE (1 << 31) +/* inta_addr are the configuration addresses of the ITE */ +static const short inta_addr[] = { 0x2a0, 0x2c0, 0x220, 0x240, 0x1e0, 0x200, 0x280 }; static int pci_ite887x_init(struct pci_dev *dev) { - /* inta_addr are the configuration addresses of the ITE */ - static const short inta_addr[] = { 0x2a0, 0x2c0, 0x220, 0x240, 0x1e0, - 0x200, 0x280, 0 }; int ret, i, type; struct resource *iobase = NULL; u32 miscr, uartbar, ioport; /* search for the base-ioport */ - i = 0; - while (inta_addr[i] && iobase == NULL) { + for (i = 0; i < ARRAY_SIZE(inta_addr); i++) { iobase = request_region(inta_addr[i], ITE_887x_IOSIZE, "ite887x"); if (iobase != NULL) { @@ -925,13 +920,11 @@ static int pci_ite887x_init(struct pci_dev *dev) break; } release_region(iobase->start, ITE_887x_IOSIZE); - iobase = NULL; } - i++; } - if (!inta_addr[i]) { - dev_err(&dev->dev, "ite887x: could not find iobase\n"); + if (i == ARRAY_SIZE(inta_addr)) { + pci_err(dev, "could not find iobase\n"); return -ENODEV; } @@ -1001,43 +994,29 @@ static void pci_ite887x_exit(struct pci_dev *dev) } /* - * EndRun Technologies. - * Determine the number of ports available on the device. + * Oxford Semiconductor Inc. + * Check if an OxSemi device is part of the Tornado range of devices. */ #define PCI_VENDOR_ID_ENDRUN 0x7401 #define PCI_DEVICE_ID_ENDRUN_1588 0xe100 -static int pci_endrun_init(struct pci_dev *dev) +static bool pci_oxsemi_tornado_p(struct pci_dev *dev) { - u8 __iomem *p; - unsigned long deviceID; - unsigned int number_uarts = 0; + /* OxSemi Tornado devices are all 0xCxxx */ + if (dev->vendor == PCI_VENDOR_ID_OXSEMI && + (dev->device & 0xf000) != 0xc000) + return false; - /* EndRun device is all 0xexxx */ + /* EndRun devices are all 0xExxx */ if (dev->vendor == PCI_VENDOR_ID_ENDRUN && - (dev->device & 0xf000) != 0xe000) - return 0; + (dev->device & 0xf000) != 0xe000) + return false; - p = pci_iomap(dev, 0, 5); - if (p == NULL) - return -ENOMEM; - - deviceID = ioread32(p); - /* EndRun device */ - if (deviceID == 0x07000200) { - number_uarts = ioread8(p + 4); - dev_dbg(&dev->dev, - "%d ports detected on EndRun PCI Express device\n", - number_uarts); - } - pci_iounmap(dev, p); - return number_uarts; + return true; } /* - * Oxford Semiconductor Inc. - * Check that device is part of the Tornado range of devices, then determine - * the number of ports available on the device. + * Determine the number of ports available on a Tornado device. */ static int pci_oxsemi_tornado_init(struct pci_dev *dev) { @@ -1045,9 +1024,7 @@ static int pci_oxsemi_tornado_init(struct pci_dev *dev) unsigned long deviceID; unsigned int number_uarts = 0; - /* OxSemi Tornado devices are all 0xCxxx */ - if (dev->vendor == PCI_VENDOR_ID_OXSEMI && - (dev->device & 0xF000) != 0xC000) + if (!pci_oxsemi_tornado_p(dev)) return 0; p = pci_iomap(dev, 0, 5); @@ -1058,9 +1035,10 @@ static int pci_oxsemi_tornado_init(struct pci_dev *dev) /* Tornado device */ if (deviceID == 0x07000200) { number_uarts = ioread8(p + 4); - dev_dbg(&dev->dev, - "%d ports detected on Oxford PCI Express device\n", - number_uarts); + pci_dbg(dev, "%d ports detected on %s PCI Express device\n", + number_uarts, + dev->vendor == PCI_VENDOR_ID_ENDRUN ? + "EndRun" : "Oxford"); } pci_iounmap(dev, p); return number_uarts; @@ -1120,15 +1098,15 @@ static struct quatech_feature quatech_cards[] = { { 0, } }; -static int pci_quatech_amcc(u16 devid) +static int pci_quatech_amcc(struct pci_dev *dev) { struct quatech_feature *qf = &quatech_cards[0]; while (qf->devid) { - if (qf->devid == devid) + if (qf->devid == dev->device) return qf->amcc; qf++; } - pr_err("quatech: unknown port type '0x%04X'.\n", devid); + pci_err(dev, "unknown port type '0x%04X'.\n", dev->device); return 0; }; @@ -1291,7 +1269,7 @@ static int pci_quatech_rs422(struct uart_8250_port *port) static int pci_quatech_init(struct pci_dev *dev) { - if (pci_quatech_amcc(dev->device)) { + if (pci_quatech_amcc(dev)) { unsigned long base = pci_resource_start(dev, 0); if (base) { u32 tmp; @@ -1315,7 +1293,7 @@ static int pci_quatech_setup(struct serial_private *priv, port->port.uartclk = pci_quatech_clock(port); /* For now just warn about RS422 */ if (pci_quatech_rs422(port)) - pr_warn("quatech: software control of RS422 features not currently supported.\n"); + pci_warn(priv->dev, "software control of RS422 features not currently supported.\n"); return pci_default_setup(priv, board, port, idx); } @@ -1529,7 +1507,7 @@ static int pci_fintek_setup(struct serial_private *priv, /* Get the io address from configuration space */ pci_read_config_word(pdev, config_base + 4, &iobase); - dev_dbg(&pdev->dev, "%s: idx=%d iobase=0x%x", __func__, idx, iobase); + pci_dbg(pdev, "idx=%d iobase=0x%x", idx, iobase); port->port.iotype = UPIO_PORT; port->port.iobase = iobase; @@ -1553,7 +1531,6 @@ static int pci_fintek_init(struct pci_dev *dev) resource_size_t bar_data[3]; u8 config_base; struct serial_private *priv = pci_get_drvdata(dev); - struct uart_8250_port *port; if (!(pci_resource_flags(dev, 5) & IORESOURCE_IO) || !(pci_resource_flags(dev, 4) & IORESOURCE_IO) || @@ -1600,13 +1577,7 @@ static int pci_fintek_init(struct pci_dev *dev) pci_write_config_byte(dev, config_base + 0x06, dev->irq); - if (priv) { - /* re-apply RS232/485 mode when - * pciserial_resume_ports() - */ - port = serial8250_get_port(priv->line[i]); - pci_fintek_rs485_config(&port->port, NULL); - } else { + if (!priv) { /* First init without port data * force init to RS232 Mode */ @@ -1693,7 +1664,7 @@ static int skip_tx_en_setup(struct serial_private *priv, struct uart_8250_port *port, int idx) { port->port.quirks |= UPQ_NO_TXEN_TEST; - dev_dbg(&priv->dev->dev, + pci_dbg(priv->dev, "serial8250: skipping TxEn test for device [%04x:%04x] subsystem [%04x:%04x]\n", priv->dev->vendor, priv->dev->device, priv->dev->subsystem_vendor, priv->dev->subsystem_device); @@ -2517,7 +2488,7 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, - .init = pci_endrun_init, + .init = pci_oxsemi_tornado_init, .setup = pci_default_setup, }, /* @@ -2862,7 +2833,7 @@ enum pci_board_num_t { pbn_b0_2_1843200, pbn_b0_4_1843200, - pbn_b0_1_4000000, + pbn_b0_1_3906250, pbn_b0_bt_1_115200, pbn_b0_bt_2_115200, @@ -2940,12 +2911,11 @@ enum pci_board_num_t { pbn_panacom2, pbn_panacom4, pbn_plx_romulus, - pbn_endrun_2_4000000, pbn_oxsemi, - pbn_oxsemi_1_4000000, - pbn_oxsemi_2_4000000, - pbn_oxsemi_4_4000000, - pbn_oxsemi_8_4000000, + pbn_oxsemi_1_3906250, + pbn_oxsemi_2_3906250, + pbn_oxsemi_4_3906250, + pbn_oxsemi_8_3906250, pbn_intel_i960, pbn_sgi_ioc3, pbn_computone_4, @@ -2983,6 +2953,10 @@ enum pci_board_num_t { pbn_sunix_pci_4s, pbn_sunix_pci_8s, pbn_sunix_pci_16s, + pbn_titan_1_4000000, + pbn_titan_2_4000000, + pbn_titan_4_4000000, + pbn_titan_8_4000000, pbn_moxa8250_2p, pbn_moxa8250_4p, pbn_moxa8250_8p, @@ -3088,10 +3062,10 @@ static struct pciserial_board pci_boards[] = { .uart_offset = 8, }, - [pbn_b0_1_4000000] = { + [pbn_b0_1_3906250] = { .flags = FL_BASE0, .num_ports = 1, - .base_baud = 4000000, + .base_baud = 3906250, .uart_offset = 8, }, @@ -3462,20 +3436,6 @@ static struct pciserial_board pci_boards[] = { .first_offset = 0x03, }, - /* - * EndRun Technologies - * Uses the size of PCI Base region 0 to - * signal now many ports are available - * 2 port 952 Uart support - */ - [pbn_endrun_2_4000000] = { - .flags = FL_BASE0, - .num_ports = 2, - .base_baud = 4000000, - .uart_offset = 0x200, - .first_offset = 0x1000, - }, - /* * This board uses the size of PCI Base region 0 to * signal now many ports are available @@ -3486,31 +3446,31 @@ static struct pciserial_board pci_boards[] = { .base_baud = 115200, .uart_offset = 8, }, - [pbn_oxsemi_1_4000000] = { + [pbn_oxsemi_1_3906250] = { .flags = FL_BASE0, .num_ports = 1, - .base_baud = 4000000, + .base_baud = 3906250, .uart_offset = 0x200, .first_offset = 0x1000, }, - [pbn_oxsemi_2_4000000] = { + [pbn_oxsemi_2_3906250] = { .flags = FL_BASE0, .num_ports = 2, - .base_baud = 4000000, + .base_baud = 3906250, .uart_offset = 0x200, .first_offset = 0x1000, }, - [pbn_oxsemi_4_4000000] = { + [pbn_oxsemi_4_3906250] = { .flags = FL_BASE0, .num_ports = 4, - .base_baud = 4000000, + .base_baud = 3906250, .uart_offset = 0x200, .first_offset = 0x1000, }, - [pbn_oxsemi_8_4000000] = { + [pbn_oxsemi_8_3906250] = { .flags = FL_BASE0, .num_ports = 8, - .base_baud = 4000000, + .base_baud = 3906250, .uart_offset = 0x200, .first_offset = 0x1000, }, @@ -3770,6 +3730,34 @@ static struct pciserial_board pci_boards[] = { .base_baud = 921600, .uart_offset = 0x8, }, + [pbn_titan_1_4000000] = { + .flags = FL_BASE0, + .num_ports = 1, + .base_baud = 4000000, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_titan_2_4000000] = { + .flags = FL_BASE0, + .num_ports = 2, + .base_baud = 4000000, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_titan_4_4000000] = { + .flags = FL_BASE0, + .num_ports = 4, + .base_baud = 4000000, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_titan_8_4000000] = { + .flags = FL_BASE0, + .num_ports = 8, + .base_baud = 4000000, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, [pbn_moxa8250_2p] = { .flags = FL_BASE1, .num_ports = 2, @@ -3979,12 +3967,12 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) uart.port.irq = 0; } else { if (pci_match_id(pci_use_msi, dev)) { - dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n"); + pci_dbg(dev, "Using MSI(-X) interrupts\n"); pci_set_master(dev); uart.port.flags &= ~UPF_SHARE_IRQ; rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES); } else { - dev_dbg(&dev->dev, "Using legacy interrupts\n"); + pci_dbg(dev, "Using legacy interrupts\n"); rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY); } if (rc < 0) { @@ -4002,12 +3990,12 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) if (quirk->setup(priv, board, &uart, i)) break; - dev_dbg(&dev->dev, "Setup PCI port: port %lx, irq %d, type %d\n", + pci_dbg(dev, "Setup PCI port: port %lx, irq %d, type %d\n", uart.port.iobase, uart.port.irq, uart.port.iotype); priv->line[i] = serial8250_register_8250_port(&uart); if (priv->line[i] < 0) { - dev_err(&dev->dev, + pci_err(dev, "Couldn't register serial port %lx, irq %d, type %d, error %d\n", uart.port.iobase, uart.port.irq, uart.port.iotype, priv->line[i]); @@ -4103,8 +4091,7 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent) } if (ent->driver_data >= ARRAY_SIZE(pci_boards)) { - dev_err(&dev->dev, "invalid driver_data: %ld\n", - ent->driver_data); + pci_err(dev, "invalid driver_data: %ld\n", ent->driver_data); return -EINVAL; } @@ -4187,7 +4174,7 @@ static int pciserial_resume_one(struct device *dev) err = pci_enable_device(pdev); /* FIXME: We cannot simply error out here */ if (err) - dev_err(dev, "Unable to re-enable ports, trying to continue.\n"); + pci_err(pdev, "Unable to re-enable ports, trying to continue.\n"); pciserial_resume_ports(priv); } return 0; @@ -4380,13 +4367,6 @@ static const struct pci_device_id serial_pci_tbl[] = { { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_ROMULUS, 0x10b5, 0x106a, 0, 0, pbn_plx_romulus }, - /* - * EndRun Technologies. PCI express device range. - * EndRun PTP/1588 has 2 Native UARTs. - */ - { PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_endrun_2_4000000 }, /* * Quatech cards. These actually have configurable clocks but for * now we just use the default. @@ -4496,158 +4476,165 @@ static const struct pci_device_id serial_pci_tbl[] = { */ { PCI_VENDOR_ID_OXSEMI, 0xc101, /* OXPCIe952 1 Legacy UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_b0_1_4000000 }, + pbn_b0_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc105, /* OXPCIe952 1 Legacy UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_b0_1_4000000 }, + pbn_b0_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc11b, /* OXPCIe952 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc11f, /* OXPCIe952 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc120, /* OXPCIe952 1 Legacy UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_b0_1_4000000 }, + pbn_b0_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc124, /* OXPCIe952 1 Legacy UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_b0_1_4000000 }, + pbn_b0_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc138, /* OXPCIe952 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc13d, /* OXPCIe952 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc140, /* OXPCIe952 1 Legacy UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_b0_1_4000000 }, + pbn_b0_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc141, /* OXPCIe952 1 Legacy UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_b0_1_4000000 }, + pbn_b0_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc144, /* OXPCIe952 1 Legacy UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_b0_1_4000000 }, + pbn_b0_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc145, /* OXPCIe952 1 Legacy UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_b0_1_4000000 }, + pbn_b0_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc158, /* OXPCIe952 2 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_2_4000000 }, + pbn_oxsemi_2_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc15d, /* OXPCIe952 2 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_2_4000000 }, + pbn_oxsemi_2_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc208, /* OXPCIe954 4 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_4_4000000 }, + pbn_oxsemi_4_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc20d, /* OXPCIe954 4 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_4_4000000 }, + pbn_oxsemi_4_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc308, /* OXPCIe958 8 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_8_4000000 }, + pbn_oxsemi_8_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc30d, /* OXPCIe958 8 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_8_4000000 }, + pbn_oxsemi_8_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc40b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc40f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc41b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc41f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc42b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc42f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc43b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc43f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc44b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc44f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc45b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc45f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc46b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc46f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc47b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc47f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc48b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc48f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc49b, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc49f, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc4ab, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc4af, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc4bb, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc4bf, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc4cb, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_OXSEMI, 0xc4cf, /* OXPCIe200 1 Native UART */ PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, /* * Mainpine Inc. IQ Express "Rev3" utilizing OxSemi Tornado */ { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 1 Port V.34 Super-G3 Fax */ PCI_VENDOR_ID_MAINPINE, 0x4001, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_oxsemi_1_3906250 }, { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 2 Port V.34 Super-G3 Fax */ PCI_VENDOR_ID_MAINPINE, 0x4002, 0, 0, - pbn_oxsemi_2_4000000 }, + pbn_oxsemi_2_3906250 }, { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 4 Port V.34 Super-G3 Fax */ PCI_VENDOR_ID_MAINPINE, 0x4004, 0, 0, - pbn_oxsemi_4_4000000 }, + pbn_oxsemi_4_3906250 }, { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 8 Port V.34 Super-G3 Fax */ PCI_VENDOR_ID_MAINPINE, 0x4008, 0, 0, - pbn_oxsemi_8_4000000 }, + pbn_oxsemi_8_3906250 }, /* * Digi/IBM PCIe 2-port Async EIA-232 Adapter utilizing OxSemi Tornado */ { PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_2_OX_IBM, PCI_SUBVENDOR_ID_IBM, PCI_ANY_ID, 0, 0, - pbn_oxsemi_2_4000000 }, + pbn_oxsemi_2_3906250 }, + /* + * EndRun Technologies. PCI express device range. + * EndRun PTP/1588 has 2 Native UARTs utilizing OxSemi 952. + */ + { PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_oxsemi_2_3906250 }, /* * SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards, @@ -4721,22 +4708,22 @@ static const struct pci_device_id serial_pci_tbl[] = { pbn_b0_4_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_100E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_1_4000000 }, + pbn_titan_1_4000000 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_2_4000000 }, + pbn_titan_2_4000000 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_4_4000000 }, + pbn_titan_4_4000000 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_8_4000000 }, + pbn_titan_8_4000000 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EI, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_2_4000000 }, + pbn_titan_2_4000000 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EISI, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - pbn_oxsemi_2_4000000 }, + pbn_titan_2_4000000 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200V3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_921600 }, diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 0ed01c89fce4..61f225aeb520 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -537,27 +537,6 @@ serial_port_out_sync(struct uart_port *p, int offset, int value) } } -/* - * For the 16C950 - */ -static void serial_icr_write(struct uart_8250_port *up, int offset, int value) -{ - serial_out(up, UART_SCR, offset); - serial_out(up, UART_ICR, value); -} - -static unsigned int serial_icr_read(struct uart_8250_port *up, int offset) -{ - unsigned int value; - - serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD); - serial_out(up, UART_SCR, offset); - value = serial_in(up, UART_ICR); - serial_icr_write(up, UART_ACR, up->acr); - - return value; -} - /* * FIFO support. */ @@ -621,7 +600,7 @@ EXPORT_SYMBOL_GPL(serial8250_rpm_put); static int serial8250_em485_init(struct uart_8250_port *p) { if (p->em485) - return 0; + goto deassert_rts; p->em485 = kmalloc(sizeof(struct uart_8250_em485), GFP_ATOMIC); if (!p->em485) @@ -637,7 +616,9 @@ static int serial8250_em485_init(struct uart_8250_port *p) p->em485->active_timer = NULL; p->em485->tx_stopped = true; - p->rs485_stop_tx(p); +deassert_rts: + if (p->em485->tx_stopped) + p->rs485_stop_tx(p); return 0; } @@ -688,13 +669,6 @@ int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485) rs485->flags &= ~SER_RS485_RTS_AFTER_SEND; } - /* clamp the delays to [0, 100ms] */ - rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U); - rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U); - - memset(rs485->padding, 0, sizeof(rs485->padding)); - port->rs485 = *rs485; - gpiod_set_value(port->rs485_term_gpio, rs485->flags & SER_RS485_TERMINATE_BUS); @@ -702,15 +676,8 @@ int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485) * Both serial8250_em485_init() and serial8250_em485_destroy() * are idempotent. */ - if (rs485->flags & SER_RS485_ENABLED) { - int ret = serial8250_em485_init(up); - - if (ret) { - rs485->flags &= ~SER_RS485_ENABLED; - port->rs485.flags &= ~SER_RS485_ENABLED; - } - return ret; - } + if (rs485->flags & SER_RS485_ENABLED) + return serial8250_em485_init(up); serial8250_em485_destroy(up); return 0; @@ -1540,6 +1507,8 @@ static inline void __stop_tx(struct uart_8250_port *p) if (em485) { unsigned char lsr = serial_in(p, UART_LSR); + p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; + /* * To provide required timeing and allow FIFO transfer, * __stop_tx_rs485() must be called only when both FIFO and @@ -1918,10 +1887,13 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir) return up->dma->rx_dma(up); #else switch (iir & 0x3f) { - case UART_IIR_RX_TIMEOUT: - serial8250_rx_dma_flush(up); + case UART_IIR_RDI: + if (!up->dma->rx_running) + break; fallthrough; case UART_IIR_RLSI: + case UART_IIR_RX_TIMEOUT: + serial8250_rx_dma_flush(up); return true; } return up->dma->rx_dma(up); @@ -2105,6 +2077,9 @@ EXPORT_SYMBOL_GPL(serial8250_do_set_mctrl); static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl) { + if (port->rs485.flags & SER_RS485_ENABLED) + return; + if (port->set_mctrl) port->set_mctrl(port, mctrl); else @@ -3054,8 +3029,10 @@ static int serial8250_request_std_resource(struct uart_8250_port *up) case UPIO_MEM32BE: case UPIO_MEM16: case UPIO_MEM: - if (!port->mapbase) + if (!port->mapbase) { + ret = -EINVAL; break; + } if (!request_mem_region(port->mapbase, size, "serial")) { ret = -EBUSY; @@ -3279,9 +3256,6 @@ static void serial8250_config_port(struct uart_port *port, int flags) if (flags & UART_CONFIG_TYPE) autoconfig(up); - if (port->rs485.flags & SER_RS485_ENABLED) - port->rs485_config(port, &port->rs485); - /* if access method is AU, it is a 16550 with a quirk */ if (port->type == PORT_16550A && port->iotype == UPIO_AU) up->bugs |= UART_BUG_NOMSR; @@ -3414,7 +3388,7 @@ static void serial8250_console_restore(struct uart_8250_port *up) serial8250_set_divisor(port, baud, quot, frac); serial_port_out(port, UART_LCR, up->lcr); - serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS); + serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS); } /* diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index 603137da4736..136f2b1460f9 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig @@ -119,7 +119,7 @@ config SERIAL_8250_CONSOLE config SERIAL_8250_GSC tristate - depends on SERIAL_8250 && GSC + depends on SERIAL_8250 && PARISC default SERIAL_8250 config SERIAL_8250_DMA diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 07b19e97f850..9900ee3f9068 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1326,6 +1326,15 @@ static void pl011_stop_rx(struct uart_port *port) pl011_dma_rx_stop(uap); } +static void pl011_throttle_rx(struct uart_port *port) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + pl011_stop_rx(port); + spin_unlock_irqrestore(&port->lock, flags); +} + static void pl011_enable_ms(struct uart_port *port) { struct uart_amba_port *uap = @@ -1717,9 +1726,10 @@ static int pl011_allocate_irq(struct uart_amba_port *uap) */ static void pl011_enable_interrupts(struct uart_amba_port *uap) { + unsigned long flags; unsigned int i; - spin_lock_irq(&uap->port.lock); + spin_lock_irqsave(&uap->port.lock, flags); /* Clear out any spuriously appearing RX interrupts */ pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR); @@ -1741,7 +1751,14 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap) if (!pl011_dma_rx_running(uap)) uap->im |= UART011_RXIM; pl011_write(uap->im, uap, REG_IMSC); - spin_unlock_irq(&uap->port.lock); + spin_unlock_irqrestore(&uap->port.lock, flags); +} + +static void pl011_unthrottle_rx(struct uart_port *port) +{ + struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); + + pl011_enable_interrupts(uap); } static int pl011_startup(struct uart_port *port) @@ -2116,6 +2133,8 @@ static const struct uart_ops amba_pl011_pops = { .stop_tx = pl011_stop_tx, .start_tx = pl011_start_tx, .stop_rx = pl011_stop_rx, + .throttle = pl011_throttle_rx, + .unthrottle = pl011_unthrottle_rx, .enable_ms = pl011_enable_ms, .break_ctl = pl011_break_ctl, .startup = pl011_startup, diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c index c2be7cf91399..fcbaff894193 100644 --- a/drivers/tty/serial/ar933x_uart.c +++ b/drivers/tty/serial/ar933x_uart.c @@ -593,6 +593,11 @@ static int ar933x_config_rs485(struct uart_port *port, dev_err(port->dev, "RS485 needs rts-gpio\n"); return 1; } + + if (rs485conf->flags & SER_RS485_ENABLED) + gpiod_set_value(up->rts_gpiod, + !!(rs485conf->flags & SER_RS485_RTS_AFTER_SEND)); + port->rs485 = *rs485conf; return 0; } diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 602065bfc9bb..b7872ad3e762 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -295,20 +295,16 @@ static int atmel_config_rs485(struct uart_port *port, mode = atmel_uart_readl(port, ATMEL_US_MR); - /* Resetting serial mode to RS232 (0x0) */ - mode &= ~ATMEL_US_USMODE; - - port->rs485 = *rs485conf; - if (rs485conf->flags & SER_RS485_ENABLED) { dev_dbg(port->dev, "Setting UART to RS485\n"); - if (port->rs485.flags & SER_RS485_RX_DURING_TX) + if (rs485conf->flags & SER_RS485_RX_DURING_TX) atmel_port->tx_done_mask = ATMEL_US_TXRDY; else atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; atmel_uart_writel(port, ATMEL_US_TTGR, rs485conf->delay_rts_after_send); + mode &= ~ATMEL_US_USMODE; mode |= ATMEL_US_USMODE_RS485; } else { dev_dbg(port->dev, "Setting UART to RS232\n"); diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c index 13ac36e2da4f..5fea9bf86e85 100644 --- a/drivers/tty/serial/digicolor-usart.c +++ b/drivers/tty/serial/digicolor-usart.c @@ -309,6 +309,8 @@ static void digicolor_uart_set_termios(struct uart_port *port, case CS8: default: config |= UA_CONFIG_CHAR_LEN; + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS8; break; } @@ -471,11 +473,10 @@ static int digicolor_uart_probe(struct platform_device *pdev) if (IS_ERR(uart_clk)) return PTR_ERR(uart_clk); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dp->port.mapbase = res->start; - dp->port.membase = devm_ioremap_resource(&pdev->dev, res); + dp->port.membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(dp->port.membase)) return PTR_ERR(dp->port.membase); + dp->port.mapbase = res->start; irq = platform_get_irq(pdev, 0); if (irq < 0) diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index b9f8add284e3..43aca5a2ef0f 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -229,8 +229,6 @@ /* IMX lpuart has four extra unused regs located at the beginning */ #define IMX_REG_OFF 0x10 -static DEFINE_IDA(fsl_lpuart_ida); - enum lpuart_type { VF610_LPUART, LS1021A_LPUART, @@ -265,7 +263,6 @@ struct lpuart_port { int rx_dma_rng_buf_len; unsigned int dma_tx_nents; wait_queue_head_t dma_wait; - bool id_allocated; }; struct lpuart_soc_data { @@ -1379,9 +1376,9 @@ static int lpuart32_config_rs485(struct uart_port *port, * Note: UART is assumed to be active high. */ if (rs485->flags & SER_RS485_RTS_ON_SEND) - modem &= ~UARTMODEM_TXRTSPOL; - else if (rs485->flags & SER_RS485_RTS_AFTER_SEND) modem |= UARTMODEM_TXRTSPOL; + else if (rs485->flags & SER_RS485_RTS_AFTER_SEND) + modem &= ~UARTMODEM_TXRTSPOL; } /* Store the new configuration */ @@ -1728,6 +1725,7 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport) if (sport->lpuart_dma_rx_use) { del_timer_sync(&sport->lpuart_timer); lpuart_dma_rx_free(&sport->port); + sport->lpuart_dma_rx_use = false; } if (sport->lpuart_dma_tx_use) { @@ -1736,6 +1734,7 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport) sport->dma_tx_in_progress = false; dmaengine_terminate_all(sport->dma_tx_chan); } + sport->lpuart_dma_tx_use = false; } if (sport->dma_tx_chan) @@ -2141,6 +2140,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, uart_update_timeout(port, termios->c_cflag, baud); /* wait transmit engin complete */ + lpuart32_write(&sport->port, 0, UARTMODIR); lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC); /* disable transmit and receive */ @@ -2638,23 +2638,18 @@ static int lpuart_probe(struct platform_device *pdev) ret = of_alias_get_id(np, "serial"); if (ret < 0) { - ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL); - if (ret < 0) { - dev_err(&pdev->dev, "port line is full, add device failed\n"); - return ret; - } - sport->id_allocated = true; + dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); + return ret; } if (ret >= ARRAY_SIZE(lpuart_ports)) { dev_err(&pdev->dev, "serial%d out of range\n", ret); - ret = -EINVAL; - goto failed_out_of_range; + return -EINVAL; } sport->port.line = ret; ret = lpuart_enable_clks(sport); if (ret) - goto failed_clock_enable; + return ret; sport->port.uartclk = lpuart_get_baud_clk_rate(sport); lpuart_ports[sport->port.line] = sport; @@ -2674,10 +2669,6 @@ static int lpuart_probe(struct platform_device *pdev) if (ret) goto failed_irq_request; - ret = uart_add_one_port(&lpuart_reg, &sport->port); - if (ret) - goto failed_attach_port; - ret = uart_get_rs485_mode(&sport->port); if (ret) goto failed_get_rs485; @@ -2689,7 +2680,9 @@ static int lpuart_probe(struct platform_device *pdev) sport->port.rs485.delay_rts_after_send) dev_err(&pdev->dev, "driver doesn't support RTS delays\n"); - sport->port.rs485_config(&sport->port, &sport->port.rs485); + ret = uart_add_one_port(&lpuart_reg, &sport->port); + if (ret) + goto failed_attach_port; return 0; @@ -2697,10 +2690,6 @@ failed_get_rs485: failed_attach_port: failed_irq_request: lpuart_disable_clks(sport); -failed_clock_enable: -failed_out_of_range: - if (sport->id_allocated) - ida_simple_remove(&fsl_lpuart_ida, sport->port.line); return ret; } @@ -2710,9 +2699,6 @@ static int lpuart_remove(struct platform_device *pdev) uart_remove_one_port(&lpuart_reg, &sport->port); - if (sport->id_allocated) - ida_simple_remove(&fsl_lpuart_ida, sport->port.line); - lpuart_disable_clks(sport); if (sport->dma_tx_chan) @@ -2842,7 +2828,6 @@ static int __init lpuart_serial_init(void) static void __exit lpuart_serial_exit(void) { - ida_destroy(&fsl_lpuart_ida); platform_driver_unregister(&lpuart_driver); uart_unregister_driver(&lpuart_reg); } diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c index 94c8281ddb5f..74b325c344da 100644 --- a/drivers/tty/serial/icom.c +++ b/drivers/tty/serial/icom.c @@ -1503,7 +1503,7 @@ static int icom_probe(struct pci_dev *dev, retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg); if (retval) { dev_err(&dev->dev, "PCI Config read FAILED\n"); - return retval; + goto probe_exit0; } pci_write_config_dword(dev, PCI_COMMAND, diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 93cd8ad57f38..164597e2e004 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -398,8 +398,7 @@ static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2) { *ucr2 &= ~(UCR2_CTSC | UCR2_CTS); - sport->port.mctrl |= TIOCM_RTS; - mctrl_gpio_set(sport->gpios, sport->port.mctrl); + mctrl_gpio_set(sport->gpios, sport->port.mctrl | TIOCM_RTS); } /* called with port.lock taken and irqs caller dependent */ @@ -408,8 +407,7 @@ static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2) *ucr2 &= ~UCR2_CTSC; *ucr2 |= UCR2_CTS; - sport->port.mctrl &= ~TIOCM_RTS; - mctrl_gpio_set(sport->gpios, sport->port.mctrl); + mctrl_gpio_set(sport->gpios, sport->port.mctrl & ~TIOCM_RTS); } static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec) @@ -1470,7 +1468,7 @@ static int imx_uart_startup(struct uart_port *port) imx_uart_writel(sport, ucr1, UCR1); ucr4 = imx_uart_readl(sport, UCR4) & ~(UCR4_OREN | UCR4_INVR); - if (!sport->dma_is_enabled) + if (!dma_is_inited) ucr4 |= UCR4_OREN; if (sport->inverted_rx) ucr4 |= UCR4_INVR; @@ -2381,8 +2379,6 @@ static int imx_uart_probe(struct platform_device *pdev) dev_err(&pdev->dev, "low-active RTS not possible when receiver is off, enabling receiver\n"); - imx_uart_rs485_config(&sport->port, &sport->port.rs485); - /* Disable interrupts before requesting them */ ucr1 = imx_uart_readl(sport, UCR1); ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN | UCR1_RTSDEN); @@ -2630,6 +2626,7 @@ static const struct dev_pm_ops imx_uart_pm_ops = { .suspend_noirq = imx_uart_suspend_noirq, .resume_noirq = imx_uart_resume_noirq, .freeze_noirq = imx_uart_suspend_noirq, + .thaw_noirq = imx_uart_resume_noirq, .restore_noirq = imx_uart_resume_noirq, .suspend = imx_uart_suspend, .resume = imx_uart_resume, diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c index cd30da0ef083..b5b61e598b53 100644 --- a/drivers/tty/serial/jsm/jsm_driver.c +++ b/drivers/tty/serial/jsm/jsm_driver.c @@ -212,7 +212,8 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) break; default: - return -ENXIO; + rc = -ENXIO; + goto out_kfree_brd; } rc = request_irq(brd->irq, brd->bd_ops->intr, IRQF_SHARED, "JSM", brd); diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c index b5898c932036..a9802308ff60 100644 --- a/drivers/tty/serial/lpc32xx_hs.c +++ b/drivers/tty/serial/lpc32xx_hs.c @@ -344,7 +344,7 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id) LPC32XX_HSUART_IIR(port->membase)); port->icount.overrun++; tty_insert_flip_char(tport, 0, TTY_OVERRUN); - tty_schedule_flip(tport); + tty_flip_buffer_push(tport); } /* Data received? */ diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c index 69eeef9edfa5..3f8ccb87c604 100644 --- a/drivers/tty/serial/meson_uart.c +++ b/drivers/tty/serial/meson_uart.c @@ -255,6 +255,14 @@ static const char *meson_uart_type(struct uart_port *port) return (port->type == PORT_MESON) ? "meson_uart" : NULL; } +/* + * This function is called only from probe() using a temporary io mapping + * in order to perform a reset before setting up the device. Since the + * temporarily mapped region was successfully requested, there can be no + * console on this port at this time. Hence it is not necessary for this + * function to acquire the port->lock. (Since there is no console on this + * port at this time, the port->lock is not initialized yet.) + */ static void meson_uart_reset(struct uart_port *port) { u32 val; @@ -269,9 +277,12 @@ static void meson_uart_reset(struct uart_port *port) static int meson_uart_startup(struct uart_port *port) { + unsigned long flags; u32 val; int ret = 0; + spin_lock_irqsave(&port->lock, flags); + val = readl(port->membase + AML_UART_CONTROL); val |= AML_UART_CLEAR_ERR; writel(val, port->membase + AML_UART_CONTROL); @@ -287,6 +298,8 @@ static int meson_uart_startup(struct uart_port *port) val = (AML_UART_RECV_IRQ(1) | AML_UART_XMIT_IRQ(port->fifosize / 2)); writel(val, port->membase + AML_UART_MISC); + spin_unlock_irqrestore(&port->lock, flags); + ret = request_irq(port->irq, meson_uart_interrupt, 0, port->name, port); diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 26bcbec5422e..27023a56f3ac 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -1593,6 +1593,7 @@ static inline struct uart_port *msm_get_port_from_line(unsigned int line) static void __msm_console_write(struct uart_port *port, const char *s, unsigned int count, bool is_uartdm) { + unsigned long flags; int i; int num_newlines = 0; bool replaced = false; @@ -1610,6 +1611,8 @@ static void __msm_console_write(struct uart_port *port, const char *s, num_newlines++; count += num_newlines; + local_irq_save(flags); + if (port->sysrq) locked = 0; else if (oops_in_progress) @@ -1655,6 +1658,8 @@ static void __msm_console_write(struct uart_port *port, const char *s, if (locked) spin_unlock(&port->lock); + + local_irq_restore(flags); } static void msm_console_write(struct console *co, const char *s, diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index 34ff2181afd1..bbf1b0b37b11 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c @@ -238,6 +238,7 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status) struct tty_port *tport = &port->state->port; unsigned char ch = 0; char flag = 0; + int ret; do { if (status & STAT_RX_RDY(port)) { @@ -250,6 +251,16 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status) port->icount.parity++; } + /* + * For UART2, error bits are not cleared on buffer read. + * This causes interrupt loop and system hang. + */ + if (IS_EXTENDED(port) && (status & STAT_BRK_ERR)) { + ret = readl(port->membase + UART_STAT); + ret |= STAT_BRK_ERR; + writel(ret, port->membase + UART_STAT); + } + if (status & STAT_BRK_DET) { port->icount.brk++; status &= ~(STAT_FRM_ERR | STAT_PAR_ERR); @@ -443,13 +454,13 @@ static void mvebu_uart_shutdown(struct uart_port *port) } } -static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud) +static unsigned int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud) { unsigned int d_divisor, m_divisor; u32 brdv, osamp; if (!port->uartclk) - return -EOPNOTSUPP; + return 0; /* * The baudrate is derived from the UART clock thanks to two divisors: @@ -473,7 +484,7 @@ static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud) osamp &= ~OSAMP_DIVISORS_MASK; writel(osamp, port->membase + UART_OSAMP); - return 0; + return DIV_ROUND_CLOSEST(port->uartclk, d_divisor * m_divisor); } static void mvebu_uart_set_termios(struct uart_port *port, @@ -510,15 +521,11 @@ static void mvebu_uart_set_termios(struct uart_port *port, max_baud = 230400; baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud); - if (mvebu_uart_baud_rate_set(port, baud)) { - /* No clock available, baudrate cannot be changed */ - if (old) - baud = uart_get_baud_rate(port, old, NULL, - min_baud, max_baud); - } else { - tty_termios_encode_baud_rate(termios, baud, baud); - uart_update_timeout(port, termios->c_cflag, baud); - } + baud = mvebu_uart_baud_rate_set(port, baud); + + /* In case baudrate cannot be changed, report previous old value */ + if (baud == 0 && old) + baud = tty_termios_baud_rate(old); /* Only the following flag changes are supported */ if (old) { @@ -529,6 +536,11 @@ static void mvebu_uart_set_termios(struct uart_port *port, termios->c_cflag |= CS8; } + if (baud != 0) { + tty_termios_encode_baud_rate(termios, baud, baud); + uart_update_timeout(port, termios->c_cflag, baud); + } + spin_unlock_irqrestore(&port->lock, flags); } diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c index c149f8c30007..a0d4bffe70bd 100644 --- a/drivers/tty/serial/owl-uart.c +++ b/drivers/tty/serial/owl-uart.c @@ -695,6 +695,7 @@ static int owl_uart_probe(struct platform_device *pdev) owl_port->port.uartclk = clk_get_rate(owl_port->clk); if (owl_port->port.uartclk == 0) { dev_err(&pdev->dev, "clock rate is zero\n"); + clk_disable_unprepare(owl_port->clk); return -EINVAL; } owl_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_LOW_LATENCY; diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index a7363bc66c11..351ad0b02029 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -628,22 +628,6 @@ static int push_rx(struct eg20t_port *priv, const unsigned char *buf, return 0; } -static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf) -{ - int ret = 0; - struct uart_port *port = &priv->port; - - if (port->x_char) { - dev_dbg(priv->port.dev, "%s:X character send %02x (%lu)\n", - __func__, port->x_char, jiffies); - buf[0] = port->x_char; - port->x_char = 0; - ret = 1; - } - - return ret; -} - static int dma_push_rx(struct eg20t_port *priv, int size) { int room; @@ -893,9 +877,10 @@ static unsigned int handle_tx(struct eg20t_port *priv) fifo_size = max(priv->fifo_size, 1); tx_empty = 1; - if (pop_tx_x(priv, xmit->buf)) { - pch_uart_hal_write(priv, xmit->buf, 1); + if (port->x_char) { + pch_uart_hal_write(priv, &port->x_char, 1); port->icount.tx++; + port->x_char = 0; tx_empty = 0; fifo_size--; } @@ -950,9 +935,11 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv) } fifo_size = max(priv->fifo_size, 1); - if (pop_tx_x(priv, xmit->buf)) { - pch_uart_hal_write(priv, xmit->buf, 1); + + if (port->x_char) { + pch_uart_hal_write(priv, &port->x_char, 1); port->icount.tx++; + port->x_char = 0; fifo_size--; } diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c index 85366e059258..a45069e7ebea 100644 --- a/drivers/tty/serial/rda-uart.c +++ b/drivers/tty/serial/rda-uart.c @@ -262,6 +262,8 @@ static void rda_uart_set_termios(struct uart_port *port, fallthrough; case CS7: ctrl &= ~RDA_UART_DBITS_8; + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS7; break; default: ctrl |= RDA_UART_DBITS_8; diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c index f5fab1dd96bc..aa1cf2ae17a9 100644 --- a/drivers/tty/serial/sa1100.c +++ b/drivers/tty/serial/sa1100.c @@ -448,6 +448,8 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios, baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); quot = uart_get_divisor(port, baud); + del_timer_sync(&sport->timer); + spin_lock_irqsave(&sport->port.lock, flags); sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS); @@ -478,8 +480,6 @@ sa1100_set_termios(struct uart_port *port, struct ktermios *termios, UTSR1_TO_SM(UTSR1_ROR); } - del_timer_sync(&sport->timer); - /* * Update the per-port timeout. */ diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c index 8ae3e03fbd8c..263c33260d8a 100644 --- a/drivers/tty/serial/samsung_tty.c +++ b/drivers/tty/serial/samsung_tty.c @@ -361,8 +361,7 @@ static void enable_tx_dma(struct s3c24xx_uart_port *ourport) /* Enable tx dma mode */ ucon = rd_regl(port, S3C2410_UCON); ucon &= ~(S3C64XX_UCON_TXBURST_MASK | S3C64XX_UCON_TXMODE_MASK); - ucon |= (dma_get_cache_alignment() >= 16) ? - S3C64XX_UCON_TXBURST_16 : S3C64XX_UCON_TXBURST_1; + ucon |= S3C64XX_UCON_TXBURST_1; ucon |= S3C64XX_UCON_TXMODE_DMA; wr_regl(port, S3C2410_UCON, ucon); @@ -634,7 +633,7 @@ static void enable_rx_dma(struct s3c24xx_uart_port *ourport) S3C64XX_UCON_DMASUS_EN | S3C64XX_UCON_TIMEOUT_EN | S3C64XX_UCON_RXMODE_MASK); - ucon |= S3C64XX_UCON_RXBURST_16 | + ucon |= S3C64XX_UCON_RXBURST_1 | 0xf << S3C64XX_UCON_TIMEOUT_SHIFT | S3C64XX_UCON_EMPTYINT_EN | S3C64XX_UCON_TIMEOUT_EN | @@ -883,11 +882,8 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id) goto out; } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) { - spin_unlock(&port->lock); + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); - spin_lock(&port->lock); - } if (uart_circ_empty(xmit)) s3c24xx_serial_stop_tx(port); diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index c2be22c3b7d1..cda71802b698 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -520,7 +520,7 @@ static void tegra_uart_tx_dma_complete(void *args) count = tup->tx_bytes_requested - state.residue; async_tx_ack(tup->tx_dma_desc); spin_lock_irqsave(&tup->uport.lock, flags); - xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); + uart_xmit_advance(&tup->uport, count); tup->tx_in_progress = 0; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&tup->uport); @@ -608,7 +608,6 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u) static void tegra_uart_stop_tx(struct uart_port *u) { struct tegra_uart_port *tup = to_tegra_uport(u); - struct circ_buf *xmit = &tup->uport.state->xmit; struct dma_tx_state state; unsigned int count; @@ -619,7 +618,7 @@ static void tegra_uart_stop_tx(struct uart_port *u) dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state); count = tup->tx_bytes_requested - state.residue; async_tx_ack(tup->tx_dma_desc); - xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); + uart_xmit_advance(&tup->uport, count); tup->tx_in_progress = 0; } diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 6ff86bebc8b7..d6747321cded 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -42,6 +42,11 @@ static struct lock_class_key port_lock_key; #define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8) +/* + * Max time with active RTS before/after data is sent. + */ +#define RS485_MAX_RTS_DELAY 100 /* msecs */ + static void uart_change_speed(struct tty_struct *tty, struct uart_state *state, struct ktermios *old_termios); static void uart_wait_until_sent(struct tty_struct *tty, int timeout); @@ -147,7 +152,7 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear) spin_lock_irqsave(&port->lock, flags); old = port->mctrl; port->mctrl = (old & ~clear) | set; - if (old != port->mctrl) + if (old != port->mctrl && !(port->rs485.flags & SER_RS485_ENABLED)) port->ops->set_mctrl(port, port->mctrl); spin_unlock_irqrestore(&port->lock, flags); } @@ -157,23 +162,10 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear) static void uart_port_dtr_rts(struct uart_port *uport, int raise) { - int rs485_on = uport->rs485_config && - (uport->rs485.flags & SER_RS485_ENABLED); - int RTS_after_send = !!(uport->rs485.flags & SER_RS485_RTS_AFTER_SEND); - - if (raise) { - if (rs485_on && RTS_after_send) { - uart_set_mctrl(uport, TIOCM_DTR); - uart_clear_mctrl(uport, TIOCM_RTS); - } else { - uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS); - } - } else { - unsigned int clear = TIOCM_DTR; - - clear |= (!rs485_on || RTS_after_send) ? TIOCM_RTS : 0; - uart_clear_mctrl(uport, clear); - } + if (raise) + uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS); + else + uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS); } /* @@ -1109,11 +1101,6 @@ uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) goto out; if (!tty_io_error(tty)) { - if (uport->rs485.flags & SER_RS485_ENABLED) { - set &= ~TIOCM_RTS; - clear &= ~TIOCM_RTS; - } - uart_update_mctrl(uport, set, clear); ret = 0; } @@ -1332,8 +1319,41 @@ static int uart_set_rs485_config(struct uart_port *port, if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user))) return -EFAULT; + /* pick sane settings if the user hasn't */ + if (!(rs485.flags & SER_RS485_RTS_ON_SEND) == + !(rs485.flags & SER_RS485_RTS_AFTER_SEND)) { + dev_warn_ratelimited(port->dev, + "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n", + port->name, port->line); + rs485.flags |= SER_RS485_RTS_ON_SEND; + rs485.flags &= ~SER_RS485_RTS_AFTER_SEND; + } + + if (rs485.delay_rts_before_send > RS485_MAX_RTS_DELAY) { + rs485.delay_rts_before_send = RS485_MAX_RTS_DELAY; + dev_warn_ratelimited(port->dev, + "%s (%d): RTS delay before sending clamped to %u ms\n", + port->name, port->line, rs485.delay_rts_before_send); + } + + if (rs485.delay_rts_after_send > RS485_MAX_RTS_DELAY) { + rs485.delay_rts_after_send = RS485_MAX_RTS_DELAY; + dev_warn_ratelimited(port->dev, + "%s (%d): RTS delay after sending clamped to %u ms\n", + port->name, port->line, rs485.delay_rts_after_send); + } + /* Return clean padding area to userspace */ + memset(rs485.padding, 0, sizeof(rs485.padding)); + spin_lock_irqsave(&port->lock, flags); ret = port->rs485_config(port, &rs485); + if (!ret) { + port->rs485 = rs485; + + /* Reset RTS and other mctrl lines when disabling RS485 */ + if (!(rs485.flags & SER_RS485_ENABLED)) + port->ops->set_mctrl(port, port->mctrl); + } spin_unlock_irqrestore(&port->lock, flags); if (ret) return ret; @@ -1947,11 +1967,6 @@ static int uart_proc_show(struct seq_file *m, void *v) } #endif -static inline bool uart_console_enabled(struct uart_port *port) -{ - return uart_console(port) && (port->cons->flags & CON_ENABLED); -} - static void uart_port_spin_lock_init(struct uart_port *port) { spin_lock_init(&port->lock); @@ -2308,7 +2323,8 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) uart_change_pm(state, UART_PM_STATE_ON); spin_lock_irq(&uport->lock); - ops->set_mctrl(uport, 0); + if (!(uport->rs485.flags & SER_RS485_ENABLED)) + ops->set_mctrl(uport, 0); spin_unlock_irq(&uport->lock); if (console_suspend_enabled || !uart_console(uport)) { /* Protected by port mutex for now */ @@ -2319,7 +2335,10 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) if (tty) uart_change_speed(tty, state, NULL); spin_lock_irq(&uport->lock); - ops->set_mctrl(uport, uport->mctrl); + if (!(uport->rs485.flags & SER_RS485_ENABLED)) + ops->set_mctrl(uport, uport->mctrl); + else + uport->rs485_config(uport, &uport->rs485); ops->start_tx(uport); spin_unlock_irq(&uport->lock); tty_port_set_initialized(port, 1); @@ -2417,7 +2436,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, */ spin_lock_irqsave(&port->lock, flags); port->mctrl &= TIOCM_DTR; - port->ops->set_mctrl(port, port->mctrl); + if (!(port->rs485.flags & SER_RS485_ENABLED)) + port->ops->set_mctrl(port, port->mctrl); + else + port->rs485_config(port, &port->rs485); spin_unlock_irqrestore(&port->lock, flags); /* diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c index 7a07e7272de1..7beec331010c 100644 --- a/drivers/tty/serial/serial_txx9.c +++ b/drivers/tty/serial/serial_txx9.c @@ -644,6 +644,8 @@ serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios, case CS6: /* not supported */ case CS8: cval |= TXX9_SILCR_UMODE_8BIT; + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS8; break; } diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index f700bfaef129..8d924727d6f0 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -2392,8 +2392,12 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, int best_clk = -1; unsigned long flags; - if ((termios->c_cflag & CSIZE) == CS7) + if ((termios->c_cflag & CSIZE) == CS7) { smr_val |= SCSMR_CHR; + } else { + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS8; + } if (termios->c_cflag & PARENB) smr_val |= SCSMR_PE; if (termios->c_cflag & PARODD) diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c index 214bf3086c68..91952be01074 100644 --- a/drivers/tty/serial/sifive.c +++ b/drivers/tty/serial/sifive.c @@ -667,12 +667,16 @@ static void sifive_serial_set_termios(struct uart_port *port, int rate; char nstop; - if ((termios->c_cflag & CSIZE) != CS8) + if ((termios->c_cflag & CSIZE) != CS8) { dev_err_once(ssp->port.dev, "only 8-bit words supported\n"); + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS8; + } if (termios->c_iflag & (INPCK | PARMRK)) dev_err_once(ssp->port.dev, "parity checking not supported\n"); if (termios->c_iflag & BRKINT) dev_err_once(ssp->port.dev, "BREAK detection not supported\n"); + termios->c_iflag &= ~(INPCK|PARMRK|BRKINT); /* Set number of stop bits */ nstop = (termios->c_cflag & CSTOPB) ? 2 : 1; @@ -999,7 +1003,7 @@ static int sifive_serial_probe(struct platform_device *pdev) /* Set up clock divider */ ssp->clkin_rate = clk_get_rate(ssp->clk); ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE; - ssp->port.uartclk = ssp->baud_rate * 16; + ssp->port.uartclk = ssp->clkin_rate; __ssp_update_div(ssp); platform_set_drvdata(pdev, ssp); diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c index e7048515a79c..97d36f870f64 100644 --- a/drivers/tty/serial/st-asc.c +++ b/drivers/tty/serial/st-asc.c @@ -535,10 +535,14 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios, /* set character length */ if ((cflag & CSIZE) == CS7) { ctrl_val |= ASC_CTL_MODE_7BIT_PAR; + cflag |= PARENB; } else { ctrl_val |= (cflag & PARENB) ? ASC_CTL_MODE_8BIT_PAR : ASC_CTL_MODE_8BIT; + cflag &= ~CSIZE; + cflag |= CS8; } + termios->c_cflag = cflag; /* set stop bit */ ctrl_val |= (cflag & CSTOPB) ? ASC_CTL_STOP_2BIT : ASC_CTL_STOP_1BIT; diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 6afae051ba8d..9377da1e97c0 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -70,6 +70,8 @@ static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE, *cr3 |= USART_CR3_DEM; over8 = *cr1 & USART_CR1_OVER8; + *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); + if (over8) rs485_deat_dedt = delay_ADE * baud * 8; else @@ -810,13 +812,22 @@ static void stm32_usart_set_termios(struct uart_port *port, * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00 * M0 and M1 already cleared by cr1 initialization. */ - if (bits == 9) + if (bits == 9) { cr1 |= USART_CR1_M0; - else if ((bits == 7) && cfg->has_7bits_data) + } else if ((bits == 7) && cfg->has_7bits_data) { cr1 |= USART_CR1_M1; - else if (bits != 8) + } else if (bits != 8) { dev_dbg(port->dev, "Unsupported data bits config: %u bits\n" , bits); + cflag &= ~CSIZE; + cflag |= CS8; + termios->c_cflag = cflag; + bits = 8; + if (cflag & PARENB) { + bits++; + cr1 |= USART_CR1_M0; + } + } if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch || stm32_port->fifoen)) { diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c index aaf8748a6147..31ae705aa38b 100644 --- a/drivers/tty/serial/tegra-tcu.c +++ b/drivers/tty/serial/tegra-tcu.c @@ -101,7 +101,7 @@ static void tegra_tcu_uart_start_tx(struct uart_port *port) break; tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count); - xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); + uart_xmit_advance(port, count); } uart_write_wakeup(port); diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c index d6a8604157ab..d1fecc88330e 100644 --- a/drivers/tty/serial/ucc_uart.c +++ b/drivers/tty/serial/ucc_uart.c @@ -1137,6 +1137,8 @@ static unsigned int soc_info(unsigned int *rev_h, unsigned int *rev_l) /* No compatible property, so try the name. */ soc_string = np->name; + of_node_put(np); + /* Extract the SOC number from the "PowerPC," string */ if ((sscanf(soc_string, "PowerPC,%u", &soc) != 1) || !soc) return 0; diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index b5a8afbc452b..f7dfa123907a 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -375,6 +375,8 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id) isrstatus &= ~CDNS_UART_IXR_TXEMPTY; } + isrstatus &= port->read_status_mask; + isrstatus &= ~port->ignore_status_mask; /* * Skip RX processing if RX is disabled as RXEMPTY will never be set * as read bytes will not be removed from the FIFO. diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c index 1a0c7beec101..0569d5949133 100644 --- a/drivers/tty/synclink_gt.c +++ b/drivers/tty/synclink_gt.c @@ -1749,6 +1749,8 @@ static int hdlcdev_init(struct slgt_info *info) */ static void hdlcdev_exit(struct slgt_info *info) { + if (!info->netdev) + return; unregister_hdlc_device(info->netdev); free_netdev(info->netdev); info->netdev = NULL; diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 959f9e121cc6..7ca209d4e088 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -231,8 +231,10 @@ static void showacpu(void *dummy) unsigned long flags; /* Idle CPUs have no interesting backtrace. */ - if (idle_cpu(smp_processor_id())) + if (idle_cpu(smp_processor_id())) { + pr_info("CPU%d: backtrace skipped as idling\n", smp_processor_id()); return; + } raw_spin_lock_irqsave(&show_lock, flags); pr_info("CPU%d:\n", smp_processor_id()); @@ -259,10 +261,13 @@ static void sysrq_handle_showallcpus(int key) if (in_irq()) regs = get_irq_regs(); - if (regs) { - pr_info("CPU%d:\n", smp_processor_id()); + + pr_info("CPU%d:\n", smp_processor_id()); + if (regs) show_regs(regs); - } + else + show_stack(NULL, NULL, KERN_INFO); + schedule_work(&sysrq_showallcpus); } } diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index 0fc473321d3e..5bbc2e010b48 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -172,7 +172,8 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size) have queued and recycle that ? */ if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit) return NULL; - p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); + p = kmalloc(sizeof(struct tty_buffer) + 2 * size, + GFP_ATOMIC | __GFP_NOWARN); if (p == NULL) return NULL; @@ -393,27 +394,6 @@ int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag) } EXPORT_SYMBOL(__tty_insert_flip_char); -/** - * tty_schedule_flip - push characters to ldisc - * @port: tty port to push from - * - * Takes any pending buffers and transfers their ownership to the - * ldisc side of the queue. It then schedules those characters for - * processing by the line discipline. - */ - -void tty_schedule_flip(struct tty_port *port) -{ - struct tty_bufhead *buf = &port->buf; - - /* paired w/ acquire in flush_to_ldisc(); ensures - * flush_to_ldisc() sees buffer data. - */ - smp_store_release(&buf->tail->commit, buf->tail->used); - queue_work(system_unbound_wq, &buf->work); -} -EXPORT_SYMBOL(tty_schedule_flip); - /** * tty_prepare_flip_string - make room for characters * @port: tty port @@ -543,6 +523,15 @@ static void flush_to_ldisc(struct work_struct *work) } +static inline void tty_flip_buffer_commit(struct tty_buffer *tail) +{ + /* + * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees + * buffer data. + */ + smp_store_release(&tail->commit, tail->used); +} + /** * tty_flip_buffer_push - terminal * @port: tty port to push @@ -556,10 +545,44 @@ static void flush_to_ldisc(struct work_struct *work) void tty_flip_buffer_push(struct tty_port *port) { - tty_schedule_flip(port); + struct tty_bufhead *buf = &port->buf; + + tty_flip_buffer_commit(buf->tail); + queue_work(system_unbound_wq, &buf->work); } EXPORT_SYMBOL(tty_flip_buffer_push); +/** + * tty_insert_flip_string_and_push_buffer - add characters to the tty buffer and + * push + * @port: tty port + * @chars: characters + * @size: size + * + * The function combines tty_insert_flip_string() and tty_flip_buffer_push() + * with the exception of properly holding the @port->lock. + * + * To be used only internally (by pty currently). + * + * Returns: the number added. + */ +int tty_insert_flip_string_and_push_buffer(struct tty_port *port, + const unsigned char *chars, size_t size) +{ + struct tty_bufhead *buf = &port->buf; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + size = tty_insert_flip_string(port, chars, size); + if (size) + tty_flip_buffer_commit(buf->tail); + spin_unlock_irqrestore(&port->lock, flags); + + queue_work(system_unbound_wq, &buf->work); + + return size; +} + /** * tty_buffer_init - prepare a tty buffer structure * @port: tty port to initialise diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index 78acc270e39a..aa0026a9839c 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -311,7 +311,7 @@ int kbd_rate(struct kbd_repeat *rpt) static void put_queue(struct vc_data *vc, int ch) { tty_insert_flip_char(&vc->port, ch, 0); - tty_schedule_flip(&vc->port); + tty_flip_buffer_push(&vc->port); } static void puts_queue(struct vc_data *vc, char *cp) @@ -320,7 +320,7 @@ static void puts_queue(struct vc_data *vc, char *cp) tty_insert_flip_char(&vc->port, *cp, 0); cp++; } - tty_schedule_flip(&vc->port); + tty_flip_buffer_push(&vc->port); } static void applkey(struct vc_data *vc, int key, char mode) @@ -565,7 +565,7 @@ static void fn_inc_console(struct vc_data *vc) static void fn_send_intr(struct vc_data *vc) { tty_insert_flip_char(&vc->port, 0, TTY_BREAK); - tty_schedule_flip(&vc->port); + tty_flip_buffer_push(&vc->port); } static void fn_scroll_forw(struct vc_data *vc) diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index a7ee1171eeb3..0252c0562dbc 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -344,7 +344,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows) /* allocate everything in one go */ memsize = cols * rows * sizeof(char32_t); memsize += rows * sizeof(char32_t *); - p = vmalloc(memsize); + p = vzalloc(memsize); if (!p) return NULL; @@ -855,7 +855,7 @@ static void delete_char(struct vc_data *vc, unsigned int nr) unsigned short *p = (unsigned short *) vc->vc_pos; vc_uniscr_delete(vc, nr); - scr_memcpyw(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2); + scr_memmovew(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2); scr_memsetw(p + vc->vc_cols - vc->state.x - nr, vc->vc_video_erase_char, nr * 2); vc->vc_need_wrap = 0; @@ -1834,7 +1834,7 @@ static void csi_m(struct vc_data *vc) static void respond_string(const char *p, size_t len, struct tty_port *port) { tty_insert_flip_string(port, p, len); - tty_schedule_flip(port); + tty_flip_buffer_push(port); } static void cursor_report(struct vc_data *vc, struct tty_struct *tty) @@ -4625,16 +4625,8 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op) if (op->data && font.charcount > op->charcount) rc = -ENOSPC; - if (!(op->flags & KD_FONT_FLAG_OLD)) { - if (font.width > op->width || font.height > op->height) - rc = -ENOSPC; - } else { - if (font.width != 8) - rc = -EIO; - else if ((op->height && font.height > op->height) || - font.height > 32) - rc = -ENOSPC; - } + if (font.width > op->width || font.height > op->height) + rc = -ENOSPC; if (rc) goto out; @@ -4662,7 +4654,7 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op) return -EINVAL; if (op->charcount > 512) return -EINVAL; - if (op->width <= 0 || op->width > 32 || op->height > 32) + if (op->width <= 0 || op->width > 32 || !op->height || op->height > 32) return -EINVAL; size = (op->width+7)/8 * 32 * op->charcount; if (size > max_font_size) @@ -4672,31 +4664,6 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op) if (IS_ERR(font.data)) return PTR_ERR(font.data); - if (!op->height) { /* Need to guess font height [compat] */ - int h, i; - u8 *charmap = font.data; - - /* - * If from KDFONTOP ioctl, don't allow things which can be done - * in userland,so that we can get rid of this soon - */ - if (!(op->flags & KD_FONT_FLAG_OLD)) { - kfree(font.data); - return -EINVAL; - } - - for (h = 32; h > 0; h--) - for (i = 0; i < op->charcount; i++) - if (charmap[32*i+h-1]) - goto nonzero; - - kfree(font.data); - return -EINVAL; - - nonzero: - op->height = h; - } - font.charcount = op->charcount; font.width = op->width; font.height = op->height; @@ -4704,9 +4671,11 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op) console_lock(); if (vc->vc_mode != KD_TEXT) rc = -EINVAL; - else if (vc->vc_sw->con_font_set) + else if (vc->vc_sw->con_font_set) { + if (vc_is_sel(vc)) + clear_selection(); rc = vc->vc_sw->con_font_set(vc, &font, op->flags); - else + } else rc = -ENOSYS; console_unlock(); kfree(font.data); @@ -4733,9 +4702,11 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op) console_unlock(); return -EINVAL; } - if (vc->vc_sw->con_font_default) + if (vc->vc_sw->con_font_default) { + if (vc_is_sel(vc)) + clear_selection(); rc = vc->vc_sw->con_font_default(vc, &font, s); - else + } else rc = -ENOSYS; console_unlock(); if (!rc) { diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index a9c6ea8986af..b10b86e2c17e 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -486,70 +486,6 @@ static int vt_k_ioctl(struct tty_struct *tty, unsigned int cmd, return 0; } -static inline int do_fontx_ioctl(struct vc_data *vc, int cmd, - struct consolefontdesc __user *user_cfd, - struct console_font_op *op) -{ - struct consolefontdesc cfdarg; - int i; - - if (copy_from_user(&cfdarg, user_cfd, sizeof(struct consolefontdesc))) - return -EFAULT; - - switch (cmd) { - case PIO_FONTX: - op->op = KD_FONT_OP_SET; - op->flags = KD_FONT_FLAG_OLD; - op->width = 8; - op->height = cfdarg.charheight; - op->charcount = cfdarg.charcount; - op->data = cfdarg.chardata; - return con_font_op(vc, op); - - case GIO_FONTX: - op->op = KD_FONT_OP_GET; - op->flags = KD_FONT_FLAG_OLD; - op->width = 8; - op->height = cfdarg.charheight; - op->charcount = cfdarg.charcount; - op->data = cfdarg.chardata; - i = con_font_op(vc, op); - if (i) - return i; - cfdarg.charheight = op->height; - cfdarg.charcount = op->charcount; - if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc))) - return -EFAULT; - return 0; - } - return -EINVAL; -} - -static int vt_io_fontreset(struct vc_data *vc, struct console_font_op *op) -{ - int ret; - - if (__is_defined(BROKEN_GRAPHICS_PROGRAMS)) { - /* - * With BROKEN_GRAPHICS_PROGRAMS defined, the default font is - * not saved. - */ - return -ENOSYS; - } - - op->op = KD_FONT_OP_SET_DEFAULT; - op->data = NULL; - ret = con_font_op(vc, op); - if (ret) - return ret; - - console_lock(); - con_set_default_unimap(vc); - console_unlock(); - - return 0; -} - static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, bool perm, struct vc_data *vc) { @@ -574,29 +510,7 @@ static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up, bool perm) { - struct console_font_op op; /* used in multiple places here */ - switch (cmd) { - case PIO_FONT: - if (!perm) - return -EPERM; - op.op = KD_FONT_OP_SET; - op.flags = KD_FONT_FLAG_OLD | KD_FONT_FLAG_DONT_RECALC; /* Compatibility */ - op.width = 8; - op.height = 0; - op.charcount = 256; - op.data = up; - return con_font_op(vc, &op); - - case GIO_FONT: - op.op = KD_FONT_OP_GET; - op.flags = KD_FONT_FLAG_OLD; - op.width = 8; - op.height = 32; - op.charcount = 256; - op.data = up; - return con_font_op(vc, &op); - case PIO_CMAP: if (!perm) return -EPERM; @@ -605,20 +519,6 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up, case GIO_CMAP: return con_get_cmap(up); - case PIO_FONTX: - if (!perm) - return -EPERM; - - fallthrough; - case GIO_FONTX: - return do_fontx_ioctl(vc, cmd, up, &op); - - case PIO_FONTRESET: - if (!perm) - return -EPERM; - - return vt_io_fontreset(vc, &op); - case PIO_SCRNMAP: if (!perm) return -EPERM; @@ -1099,54 +999,6 @@ void vc_SAK(struct work_struct *work) #ifdef CONFIG_COMPAT -struct compat_consolefontdesc { - unsigned short charcount; /* characters in font (256 or 512) */ - unsigned short charheight; /* scan lines per character (1-32) */ - compat_caddr_t chardata; /* font data in expanded form */ -}; - -static inline int -compat_fontx_ioctl(struct vc_data *vc, int cmd, - struct compat_consolefontdesc __user *user_cfd, - int perm, struct console_font_op *op) -{ - struct compat_consolefontdesc cfdarg; - int i; - - if (copy_from_user(&cfdarg, user_cfd, sizeof(struct compat_consolefontdesc))) - return -EFAULT; - - switch (cmd) { - case PIO_FONTX: - if (!perm) - return -EPERM; - op->op = KD_FONT_OP_SET; - op->flags = KD_FONT_FLAG_OLD; - op->width = 8; - op->height = cfdarg.charheight; - op->charcount = cfdarg.charcount; - op->data = compat_ptr(cfdarg.chardata); - return con_font_op(vc, op); - - case GIO_FONTX: - op->op = KD_FONT_OP_GET; - op->flags = KD_FONT_FLAG_OLD; - op->width = 8; - op->height = cfdarg.charheight; - op->charcount = cfdarg.charcount; - op->data = compat_ptr(cfdarg.chardata); - i = con_font_op(vc, op); - if (i) - return i; - cfdarg.charheight = op->height; - cfdarg.charcount = op->charcount; - if (copy_to_user(user_cfd, &cfdarg, sizeof(struct compat_consolefontdesc))) - return -EFAULT; - return 0; - } - return -EINVAL; -} - struct compat_console_font_op { compat_uint_t op; /* operation code KD_FONT_OP_* */ compat_uint_t flags; /* KD_FONT_FLAG_* */ @@ -1223,9 +1075,6 @@ long vt_compat_ioctl(struct tty_struct *tty, /* * these need special handlers for incompatible data structures */ - case PIO_FONTX: - case GIO_FONTX: - return compat_fontx_ioctl(vc, cmd, up, perm, &op); case KDFONTOP: return compat_kdfontop_ioctl(up, perm, &op, vc); diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c index 6eeb7ed8e91f..8fe7420de033 100644 --- a/drivers/usb/cdns3/core.c +++ b/drivers/usb/cdns3/core.c @@ -97,13 +97,23 @@ static int cdns3_core_init_role(struct cdns3 *cdns) * can be restricted later depending on strap pin configuration. */ if (dr_mode == USB_DR_MODE_UNKNOWN) { - if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) && - IS_ENABLED(CONFIG_USB_CDNS3_GADGET)) - dr_mode = USB_DR_MODE_OTG; - else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST)) - dr_mode = USB_DR_MODE_HOST; - else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET)) - dr_mode = USB_DR_MODE_PERIPHERAL; + if (cdns->version == CDNSP_CONTROLLER_V2) { + if (IS_ENABLED(CONFIG_USB_CDNSP_HOST) && + IS_ENABLED(CONFIG_USB_CDNSP_GADGET)) + dr_mode = USB_DR_MODE_OTG; + else if (IS_ENABLED(CONFIG_USB_CDNSP_HOST)) + dr_mode = USB_DR_MODE_HOST; + else if (IS_ENABLED(CONFIG_USB_CDNSP_GADGET)) + dr_mode = USB_DR_MODE_PERIPHERAL; + } else { + if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) && + IS_ENABLED(CONFIG_USB_CDNS3_GADGET)) + dr_mode = USB_DR_MODE_OTG; + else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST)) + dr_mode = USB_DR_MODE_HOST; + else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET)) + dr_mode = USB_DR_MODE_PERIPHERAL; + } } /* diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h index 3176f924293a..0d87871499ea 100644 --- a/drivers/usb/cdns3/core.h +++ b/drivers/usb/cdns3/core.h @@ -55,7 +55,9 @@ struct cdns3_platform_data { * @otg_res: the resource for otg * @otg_v0_regs: pointer to base of v0 otg registers * @otg_v1_regs: pointer to base of v1 otg registers + * @otg_cdnsp_regs: pointer to base of CDNSP otg registers * @otg_regs: pointer to base of otg registers + * @otg_irq_regs: pointer to interrupt registers * @otg_irq: irq number for otg controller * @dev_irq: irq number for device controller * @wakeup_irq: irq number for wakeup event, it is optional @@ -86,9 +88,12 @@ struct cdns3 { struct resource otg_res; struct cdns3_otg_legacy_regs *otg_v0_regs; struct cdns3_otg_regs *otg_v1_regs; + struct cdnsp_otg_regs *otg_cdnsp_regs; struct cdns3_otg_common_regs *otg_regs; + struct cdns3_otg_irq_regs *otg_irq_regs; #define CDNS3_CONTROLLER_V0 0 #define CDNS3_CONTROLLER_V1 1 +#define CDNSP_CONTROLLER_V2 2 u32 version; bool phyrst_a_enable; diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c index 38ccd29e4cde..95863d44e3e0 100644 --- a/drivers/usb/cdns3/drd.c +++ b/drivers/usb/cdns3/drd.c @@ -2,13 +2,12 @@ /* * Cadence USBSS DRD Driver. * - * Copyright (C) 2018-2019 Cadence. + * Copyright (C) 2018-2020 Cadence. * Copyright (C) 2019 Texas Instruments * * Author: Pawel Laszczak * Roger Quadros * - * */ #include #include @@ -28,8 +27,9 @@ * * Returns 0 on success otherwise negative errno */ -int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode) +static int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode) { + u32 __iomem *override_reg; u32 reg; switch (mode) { @@ -39,11 +39,24 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode) break; case USB_DR_MODE_OTG: dev_dbg(cdns->dev, "Set controller to OTG mode\n"); - if (cdns->version == CDNS3_CONTROLLER_V1) { - reg = readl(&cdns->otg_v1_regs->override); - reg |= OVERRIDE_IDPULLUP; - writel(reg, &cdns->otg_v1_regs->override); + if (cdns->version == CDNSP_CONTROLLER_V2) + override_reg = &cdns->otg_cdnsp_regs->override; + else if (cdns->version == CDNS3_CONTROLLER_V1) + override_reg = &cdns->otg_v1_regs->override; + else + override_reg = &cdns->otg_v0_regs->ctrl1; + + reg = readl(override_reg); + + if (cdns->version != CDNS3_CONTROLLER_V0) + reg |= OVERRIDE_IDPULLUP; + else + reg |= OVERRIDE_IDPULLUP_V0; + + writel(reg, override_reg); + + if (cdns->version == CDNS3_CONTROLLER_V1) { /* * Enable work around feature built into the * controller to address issue with RX Sensitivity @@ -55,10 +68,6 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode) reg |= PHYRST_CFG_PHYRST_A_ENABLE; writel(reg, &cdns->otg_v1_regs->phyrst_cfg); } - } else { - reg = readl(&cdns->otg_v0_regs->ctrl1); - reg |= OVERRIDE_IDPULLUP_V0; - writel(reg, &cdns->otg_v0_regs->ctrl1); } /* @@ -123,7 +132,7 @@ bool cdns3_is_device(struct cdns3 *cdns) */ static void cdns3_otg_disable_irq(struct cdns3 *cdns) { - writel(0, &cdns->otg_regs->ien); + writel(0, &cdns->otg_irq_regs->ien); } /** @@ -133,7 +142,7 @@ static void cdns3_otg_disable_irq(struct cdns3 *cdns) static void cdns3_otg_enable_irq(struct cdns3 *cdns) { writel(OTGIEN_ID_CHANGE_INT | OTGIEN_VBUSVALID_RISE_INT | - OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_regs->ien); + OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_irq_regs->ien); } /** @@ -144,16 +153,21 @@ static void cdns3_otg_enable_irq(struct cdns3 *cdns) */ int cdns3_drd_host_on(struct cdns3 *cdns) { - u32 val; + u32 val, ready_bit; int ret; /* Enable host mode. */ writel(OTGCMD_HOST_BUS_REQ | OTGCMD_OTG_DIS, &cdns->otg_regs->cmd); + if (cdns->version == CDNSP_CONTROLLER_V2) + ready_bit = OTGSTS_CDNSP_XHCI_READY; + else + ready_bit = OTGSTS_CDNS3_XHCI_READY; + dev_dbg(cdns->dev, "Waiting till Host mode is turned on\n"); ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val, - val & OTGSTS_XHCI_READY, 1, 100000); + val & ready_bit, 1, 100000); if (ret) dev_err(cdns->dev, "timeout waiting for xhci_ready\n"); @@ -189,17 +203,22 @@ void cdns3_drd_host_off(struct cdns3 *cdns) */ int cdns3_drd_gadget_on(struct cdns3 *cdns) { - int ret, val; u32 reg = OTGCMD_OTG_DIS; + u32 ready_bit; + int ret, val; /* switch OTG core */ writel(OTGCMD_DEV_BUS_REQ | reg, &cdns->otg_regs->cmd); dev_dbg(cdns->dev, "Waiting till Device mode is turned on\n"); + if (cdns->version == CDNSP_CONTROLLER_V2) + ready_bit = OTGSTS_CDNSP_DEV_READY; + else + ready_bit = OTGSTS_CDNS3_DEV_READY; + ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val, - val & OTGSTS_DEV_READY, - 1, 100000); + val & ready_bit, 1, 100000); if (ret) { dev_err(cdns->dev, "timeout waiting for dev_ready\n"); return ret; @@ -244,7 +263,7 @@ static int cdns3_init_otg_mode(struct cdns3 *cdns) cdns3_otg_disable_irq(cdns); /* clear all interrupts */ - writel(~0, &cdns->otg_regs->ivect); + writel(~0, &cdns->otg_irq_regs->ivect); ret = cdns3_set_mode(cdns, USB_DR_MODE_OTG); if (ret) @@ -313,7 +332,7 @@ static irqreturn_t cdns3_drd_irq(int irq, void *data) if (cdns->in_lpm) return ret; - reg = readl(&cdns->otg_regs->ivect); + reg = readl(&cdns->otg_irq_regs->ivect); if (!reg) return IRQ_NONE; @@ -332,7 +351,7 @@ static irqreturn_t cdns3_drd_irq(int irq, void *data) ret = IRQ_WAKE_THREAD; } - writel(~0, &cdns->otg_regs->ivect); + writel(~0, &cdns->otg_irq_regs->ivect); return ret; } @@ -347,28 +366,43 @@ int cdns3_drd_init(struct cdns3 *cdns) return PTR_ERR(regs); /* Detection of DRD version. Controller has been released - * in two versions. Both are similar, but they have same changes - * in register maps. - * The first register in old version is command register and it's read - * only, so driver should read 0 from it. On the other hand, in v1 - * the first register contains device ID number which is not set to 0. - * Driver uses this fact to detect the proper version of + * in three versions. All are very similar and are software compatible, + * but they have same changes in register maps. + * The first register in oldest version is command register and it's + * read only. Driver should read 0 from it. On the other hand, in v1 + * and v2 the first register contains device ID number which is not + * set to 0. Driver uses this fact to detect the proper version of * controller. */ cdns->otg_v0_regs = regs; if (!readl(&cdns->otg_v0_regs->cmd)) { cdns->version = CDNS3_CONTROLLER_V0; cdns->otg_v1_regs = NULL; + cdns->otg_cdnsp_regs = NULL; cdns->otg_regs = regs; + cdns->otg_irq_regs = (struct cdns3_otg_irq_regs *) + &cdns->otg_v0_regs->ien; writel(1, &cdns->otg_v0_regs->simulate); dev_dbg(cdns->dev, "DRD version v0 (%08x)\n", readl(&cdns->otg_v0_regs->version)); } else { cdns->otg_v0_regs = NULL; cdns->otg_v1_regs = regs; + cdns->otg_cdnsp_regs = regs; + cdns->otg_regs = (void *)&cdns->otg_v1_regs->cmd; - cdns->version = CDNS3_CONTROLLER_V1; - writel(1, &cdns->otg_v1_regs->simulate); + + if (cdns->otg_cdnsp_regs->did == OTG_CDNSP_DID) { + cdns->otg_irq_regs = (struct cdns3_otg_irq_regs *) + &cdns->otg_cdnsp_regs->ien; + cdns->version = CDNSP_CONTROLLER_V2; + } else { + cdns->otg_irq_regs = (struct cdns3_otg_irq_regs *) + &cdns->otg_v1_regs->ien; + writel(1, &cdns->otg_v1_regs->simulate); + cdns->version = CDNS3_CONTROLLER_V1; + } + dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n", readl(&cdns->otg_v1_regs->did), readl(&cdns->otg_v1_regs->rid)); @@ -378,10 +412,17 @@ int cdns3_drd_init(struct cdns3 *cdns) /* Update dr_mode according to STRAP configuration. */ cdns->dr_mode = USB_DR_MODE_OTG; - if (state == OTGSTS_STRAP_HOST) { + + if ((cdns->version == CDNSP_CONTROLLER_V2 && + state == OTGSTS_CDNSP_STRAP_HOST) || + (cdns->version != CDNSP_CONTROLLER_V2 && + state == OTGSTS_STRAP_HOST)) { dev_dbg(cdns->dev, "Controller strapped to HOST\n"); cdns->dr_mode = USB_DR_MODE_HOST; - } else if (state == OTGSTS_STRAP_GADGET) { + } else if ((cdns->version == CDNSP_CONTROLLER_V2 && + state == OTGSTS_CDNSP_STRAP_GADGET) || + (cdns->version != CDNSP_CONTROLLER_V2 && + state == OTGSTS_STRAP_GADGET)) { dev_dbg(cdns->dev, "Controller strapped to PERIPHERAL\n"); cdns->dr_mode = USB_DR_MODE_PERIPHERAL; } diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h index f1ccae285a16..a767b6893938 100644 --- a/drivers/usb/cdns3/drd.h +++ b/drivers/usb/cdns3/drd.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Cadence USB3 DRD header file. + * Cadence USB3 and USBSSP DRD header file. * - * Copyright (C) 2018-2019 Cadence. + * Copyright (C) 2018-2020 Cadence. * * Author: Pawel Laszczak */ @@ -13,7 +13,7 @@ #include #include "core.h" -/* DRD register interface for version v1. */ +/* DRD register interface for version v1 of cdns3 driver. */ struct cdns3_otg_regs { __le32 did; __le32 rid; @@ -38,7 +38,7 @@ struct cdns3_otg_regs { __le32 ctrl2; }; -/* DRD register interface for version v0. */ +/* DRD register interface for version v0 of cdns3 driver. */ struct cdns3_otg_legacy_regs { __le32 cmd; __le32 sts; @@ -57,14 +57,45 @@ struct cdns3_otg_legacy_regs { __le32 ctrl1; }; +/* DRD register interface for cdnsp driver */ +struct cdnsp_otg_regs { + __le32 did; + __le32 rid; + __le32 cfgs1; + __le32 cfgs2; + __le32 cmd; + __le32 sts; + __le32 state; + __le32 ien; + __le32 ivect; + __le32 tmr; + __le32 simulate; + __le32 adpbc_sts; + __le32 adp_ramp_time; + __le32 adpbc_ctrl1; + __le32 adpbc_ctrl2; + __le32 override; + __le32 vbusvalid_dbnc_cfg; + __le32 sessvalid_dbnc_cfg; + __le32 susp_timing_ctrl; +}; + +#define OTG_CDNSP_DID 0x0004034E + /* - * Common registers interface for both version of DRD. + * Common registers interface for both CDNS3 and CDNSP version of DRD. */ struct cdns3_otg_common_regs { __le32 cmd; __le32 sts; __le32 state; - __le32 different1; +}; + +/* + * Interrupt related registers. This registers are mapped in different + * location for CDNSP controller. + */ +struct cdns3_otg_irq_regs { __le32 ien; __le32 ivect; }; @@ -92,9 +123,9 @@ struct cdns3_otg_common_regs { #define OTGCMD_DEV_BUS_DROP BIT(8) /* Drop the bus for Host mode*/ #define OTGCMD_HOST_BUS_DROP BIT(9) -/* Power Down USBSS-DEV. */ +/* Power Down USBSS-DEV - only for CDNS3.*/ #define OTGCMD_DEV_POWER_OFF BIT(11) -/* Power Down CDNSXHCI. */ +/* Power Down CDNSXHCI - only for CDNS3. */ #define OTGCMD_HOST_POWER_OFF BIT(12) /* OTGIEN - bitmasks */ @@ -123,20 +154,31 @@ struct cdns3_otg_common_regs { #define OTGSTS_OTG_NRDY_MASK BIT(11) #define OTGSTS_OTG_NRDY(p) ((p) & OTGSTS_OTG_NRDY_MASK) /* - * Value of the strap pins. + * Value of the strap pins for: + * CDNS3: * 000 - no default configuration * 010 - Controller initiall configured as Host * 100 - Controller initially configured as Device + * CDNSP: + * 000 - No default configuration. + * 010 - Controller initiall configured as Host. + * 100 - Controller initially configured as Device. */ #define OTGSTS_STRAP(p) (((p) & GENMASK(14, 12)) >> 12) #define OTGSTS_STRAP_NO_DEFAULT_CFG 0x00 #define OTGSTS_STRAP_HOST_OTG 0x01 #define OTGSTS_STRAP_HOST 0x02 #define OTGSTS_STRAP_GADGET 0x04 +#define OTGSTS_CDNSP_STRAP_HOST 0x01 +#define OTGSTS_CDNSP_STRAP_GADGET 0x02 + /* Host mode is turned on. */ -#define OTGSTS_XHCI_READY BIT(26) +#define OTGSTS_CDNS3_XHCI_READY BIT(26) +#define OTGSTS_CDNSP_XHCI_READY BIT(27) + /* "Device mode is turned on .*/ -#define OTGSTS_DEV_READY BIT(27) +#define OTGSTS_CDNS3_DEV_READY BIT(27) +#define OTGSTS_CDNSP_DEV_READY BIT(26) /* OTGSTATE- bitmasks */ #define OTGSTATE_DEV_STATE_MASK GENMASK(2, 0) @@ -152,6 +194,8 @@ struct cdns3_otg_common_regs { #define OVERRIDE_IDPULLUP BIT(0) /* Only for CDNS3_CONTROLLER_V0 version */ #define OVERRIDE_IDPULLUP_V0 BIT(24) +/* Vbusvalid/Sesvalid override select. */ +#define OVERRIDE_SESS_VLD_SEL BIT(10) /* PHYRST_CFG - bitmasks */ #define PHYRST_CFG_PHYRST_A_ENABLE BIT(0) @@ -170,6 +214,5 @@ int cdns3_drd_gadget_on(struct cdns3 *cdns); void cdns3_drd_gadget_off(struct cdns3 *cdns); int cdns3_drd_host_on(struct cdns3 *cdns); void cdns3_drd_host_off(struct cdns3 *cdns); -int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode); #endif /* __LINUX_CDNS3_DRD */ diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index e11162294413..e3a8b6c71aa1 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -352,19 +352,6 @@ static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep) cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs); } -static void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req) -{ - struct cdns3_endpoint *priv_ep = priv_req->priv_ep; - int current_trb = priv_req->start_trb; - - while (current_trb != priv_req->end_trb) { - cdns3_ep_inc_deq(priv_ep); - current_trb = priv_ep->dequeue; - } - - cdns3_ep_inc_deq(priv_ep); -} - /** * cdns3_allow_enable_l1 - enable/disable permits to transition to L1. * @priv_dev: Extended gadget object @@ -655,9 +642,9 @@ static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep) trace_cdns3_wa2(priv_ep, "removes eldest request"); kfree(priv_req->request.buf); + list_del_init(&priv_req->list); cdns3_gadget_ep_free_request(&priv_ep->endpoint, &priv_req->request); - list_del_init(&priv_req->list); --priv_ep->wa2_counter; if (!chain) @@ -1518,10 +1505,11 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, trb = priv_ep->trb_pool + priv_ep->dequeue; - /* Request was dequeued and TRB was changed to TRB_LINK. */ - if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { + /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */ + while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { trace_cdns3_complete_trb(priv_ep, trb); - cdns3_move_deq_to_next_trb(priv_req); + cdns3_ep_inc_deq(priv_ep); + trb = priv_ep->trb_pool + priv_ep->dequeue; } if (!request->stream_id) { @@ -1543,7 +1531,8 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, TRB_LEN(le32_to_cpu(trb->length)); if (priv_req->num_of_trb > 1 && - le32_to_cpu(trb->control) & TRB_SMM) + le32_to_cpu(trb->control) & TRB_SMM && + le32_to_cpu(trb->control) & TRB_CHAIN) transfer_end = true; cdns3_ep_inc_deq(priv_ep); @@ -1703,6 +1692,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) ep_cfg &= ~EP_CFG_ENABLE; writel(ep_cfg, &priv_dev->regs->ep_cfg); priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN; + priv_ep->flags |= EP_UPDATE_EP_TRBADDR; } cdns3_transfer_completed(priv_dev, priv_ep); } else if (!(priv_ep->flags & EP_STALLED) && @@ -2293,11 +2283,16 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep, int ret = 0; int val; + if (!ep) { + pr_debug("usbss: ep not configured?\n"); + return -EINVAL; + } + priv_ep = ep_to_cdns3_ep(ep); priv_dev = priv_ep->cdns3_dev; comp_desc = priv_ep->endpoint.comp_desc; - if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { + if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT) { dev_dbg(priv_dev->dev, "usbss: invalid parameters\n"); return -EINVAL; } @@ -2609,7 +2604,7 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep, struct usb_request *request) { struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); - struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + struct cdns3_device *priv_dev; struct usb_request *req, *req_temp; struct cdns3_request *priv_req; struct cdns3_trb *link_trb; @@ -2620,6 +2615,8 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep, if (!ep || !request || !ep->desc) return -EINVAL; + priv_dev = priv_ep->cdns3_dev; + spin_lock_irqsave(&priv_dev->lock, flags); priv_req = to_cdns3_request(request); @@ -2697,6 +2694,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) struct usb_request *request; struct cdns3_request *priv_req; struct cdns3_trb *trb = NULL; + struct cdns3_trb trb_tmp; int ret; int val; @@ -2706,8 +2704,10 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) if (request) { priv_req = to_cdns3_request(request); trb = priv_req->trb; - if (trb) + if (trb) { + trb_tmp = *trb; trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); + } } writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); @@ -2722,7 +2722,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) if (request) { if (trb) - trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); + *trb = trb_tmp; cdns3_rearm_transfer(priv_ep, 1); } diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c index 6ed4b00dba96..7a2a9559693f 100644 --- a/drivers/usb/chipidea/otg_fsm.c +++ b/drivers/usb/chipidea/otg_fsm.c @@ -256,8 +256,10 @@ static void ci_otg_del_timer(struct ci_hdrc *ci, enum otg_fsm_timer t) ci->enabled_otg_timer_bits &= ~(1 << t); if (ci->next_otg_timer == t) { if (ci->enabled_otg_timer_bits == 0) { + spin_unlock_irqrestore(&ci->lock, flags); /* No enabled timers after delete it */ hrtimer_cancel(&ci->otg_fsm_hrtimer); + spin_lock_irqsave(&ci->lock, flags); ci->next_otg_timer = NUM_OTG_FSM_TIMERS; } else { /* Find the next timer */ diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 5f35cdd2cf1d..67d8da04848e 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1034,6 +1034,9 @@ isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req) struct ci_hdrc *ci = req->context; unsigned long flags; + if (req->status < 0) + return; + if (ci->setaddr) { hw_usb_set_address(ci, ci->address); ci->setaddr = false; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 7950d5b3af42..070b838c7da9 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1830,6 +1830,9 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */ .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */ }, + { USB_DEVICE(0x0c26, 0x0020), /* Icom ICF3400 Serie */ + .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ + }, { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ }, diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index d1e4a7379beb..80332b6a1963 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -755,6 +755,7 @@ static int wdm_release(struct inode *inode, struct file *file) poison_urbs(desc); spin_lock_irq(&desc->iuspin); desc->resp_count = 0; + clear_bit(WDM_RESPONDING, &desc->flags); spin_unlock_irq(&desc->iuspin); desc->manage_power(desc->intf, 0); unpoison_urbs(desc); diff --git a/drivers/usb/common/debug.c b/drivers/usb/common/debug.c index a76a086b9c54..f0c0e8db7038 100644 --- a/drivers/usb/common/debug.c +++ b/drivers/usb/common/debug.c @@ -207,30 +207,28 @@ static void usb_decode_set_isoch_delay(__u8 wValue, char *str, size_t size) snprintf(str, size, "Set Isochronous Delay(Delay = %d ns)", wValue); } -/** - * usb_decode_ctrl - Returns human readable representation of control request. - * @str: buffer to return a human-readable representation of control request. - * This buffer should have about 200 bytes. - * @size: size of str buffer. - * @bRequestType: matches the USB bmRequestType field - * @bRequest: matches the USB bRequest field - * @wValue: matches the USB wValue field (CPU byte order) - * @wIndex: matches the USB wIndex field (CPU byte order) - * @wLength: matches the USB wLength field (CPU byte order) - * - * Function returns decoded, formatted and human-readable description of - * control request packet. - * - * The usage scenario for this is for tracepoints, so function as a return - * use the same value as in parameters. This approach allows to use this - * function in TP_printk - * - * Important: wValue, wIndex, wLength parameters before invoking this function - * should be processed by le16_to_cpu macro. - */ -const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType, - __u8 bRequest, __u16 wValue, __u16 wIndex, - __u16 wLength) +static void usb_decode_ctrl_generic(char *str, size_t size, __u8 bRequestType, + __u8 bRequest, __u16 wValue, __u16 wIndex, + __u16 wLength) +{ + u8 recip = bRequestType & USB_RECIP_MASK; + u8 type = bRequestType & USB_TYPE_MASK; + + snprintf(str, size, + "Type=%s Recipient=%s Dir=%s bRequest=%u wValue=%u wIndex=%u wLength=%u", + (type == USB_TYPE_STANDARD) ? "Standard" : + (type == USB_TYPE_VENDOR) ? "Vendor" : + (type == USB_TYPE_CLASS) ? "Class" : "Unknown", + (recip == USB_RECIP_DEVICE) ? "Device" : + (recip == USB_RECIP_INTERFACE) ? "Interface" : + (recip == USB_RECIP_ENDPOINT) ? "Endpoint" : "Unknown", + (bRequestType & USB_DIR_IN) ? "IN" : "OUT", + bRequest, wValue, wIndex, wLength); +} + +static void usb_decode_ctrl_standard(char *str, size_t size, __u8 bRequestType, + __u8 bRequest, __u16 wValue, __u16 wIndex, + __u16 wLength) { switch (bRequest) { case USB_REQ_GET_STATUS: @@ -271,14 +269,48 @@ const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType, usb_decode_set_isoch_delay(wValue, str, size); break; default: - snprintf(str, size, "%02x %02x %02x %02x %02x %02x %02x %02x", - bRequestType, bRequest, - (u8)(cpu_to_le16(wValue) & 0xff), - (u8)(cpu_to_le16(wValue) >> 8), - (u8)(cpu_to_le16(wIndex) & 0xff), - (u8)(cpu_to_le16(wIndex) >> 8), - (u8)(cpu_to_le16(wLength) & 0xff), - (u8)(cpu_to_le16(wLength) >> 8)); + usb_decode_ctrl_generic(str, size, bRequestType, bRequest, + wValue, wIndex, wLength); + break; + } +} + +/** + * usb_decode_ctrl - Returns human readable representation of control request. + * @str: buffer to return a human-readable representation of control request. + * This buffer should have about 200 bytes. + * @size: size of str buffer. + * @bRequestType: matches the USB bmRequestType field + * @bRequest: matches the USB bRequest field + * @wValue: matches the USB wValue field (CPU byte order) + * @wIndex: matches the USB wIndex field (CPU byte order) + * @wLength: matches the USB wLength field (CPU byte order) + * + * Function returns decoded, formatted and human-readable description of + * control request packet. + * + * The usage scenario for this is for tracepoints, so function as a return + * use the same value as in parameters. This approach allows to use this + * function in TP_printk + * + * Important: wValue, wIndex, wLength parameters before invoking this function + * should be processed by le16_to_cpu macro. + */ +const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType, + __u8 bRequest, __u16 wValue, __u16 wIndex, + __u16 wLength) +{ + switch (bRequestType & USB_TYPE_MASK) { + case USB_TYPE_STANDARD: + usb_decode_ctrl_standard(str, size, bRequestType, bRequest, + wValue, wIndex, wLength); + break; + case USB_TYPE_VENDOR: + case USB_TYPE_CLASS: + default: + usb_decode_ctrl_generic(str, size, bRequestType, bRequest, + wValue, wIndex, wLength); + break; } return str; diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index ec0d6c50610c..eee78cbfaa72 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -614,10 +614,10 @@ const struct dev_pm_ops usb_hcd_pci_pm_ops = { .suspend_noirq = hcd_pci_suspend_noirq, .resume_noirq = hcd_pci_resume_noirq, .resume = hcd_pci_resume, - .freeze = check_root_hub_suspended, + .freeze = hcd_pci_suspend, .freeze_noirq = check_root_hub_suspended, .thaw_noirq = NULL, - .thaw = NULL, + .thaw = hcd_pci_resume, .poweroff = hcd_pci_suspend, .poweroff_noirq = hcd_pci_suspend_noirq, .restore_noirq = hcd_pci_resume_noirq, diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index ddd1d3eef912..bf5e37667697 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2661,6 +2661,7 @@ int usb_add_hcd(struct usb_hcd *hcd, { int retval; struct usb_device *rhdev; + struct usb_hcd *shared_hcd; if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) { hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev); @@ -2817,13 +2818,26 @@ int usb_add_hcd(struct usb_hcd *hcd, goto err_hcd_driver_start; } - /* starting here, usbcore will pay attention to this root hub */ - retval = register_root_hub(hcd); - if (retval != 0) - goto err_register_root_hub; + /* starting here, usbcore will pay attention to the shared HCD roothub */ + shared_hcd = hcd->shared_hcd; + if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) { + retval = register_root_hub(shared_hcd); + if (retval != 0) + goto err_register_root_hub; - if (hcd->uses_new_polling && HCD_POLL_RH(hcd)) - usb_hcd_poll_rh_status(hcd); + if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd)) + usb_hcd_poll_rh_status(shared_hcd); + } + + /* starting here, usbcore will pay attention to this root hub */ + if (!HCD_DEFER_RH_REGISTER(hcd)) { + retval = register_root_hub(hcd); + if (retval != 0) + goto err_register_root_hub; + + if (hcd->uses_new_polling && HCD_POLL_RH(hcd)) + usb_hcd_poll_rh_status(hcd); + } return retval; @@ -2866,6 +2880,7 @@ EXPORT_SYMBOL_GPL(usb_add_hcd); void usb_remove_hcd(struct usb_hcd *hcd) { struct usb_device *rhdev = hcd->self.root_hub; + bool rh_registered; dev_info(hcd->self.controller, "remove, state %x\n", hcd->state); @@ -2876,6 +2891,7 @@ void usb_remove_hcd(struct usb_hcd *hcd) dev_dbg(hcd->self.controller, "roothub graceful disconnect\n"); spin_lock_irq (&hcd_root_hub_lock); + rh_registered = hcd->rh_registered; hcd->rh_registered = 0; spin_unlock_irq (&hcd_root_hub_lock); @@ -2885,7 +2901,8 @@ void usb_remove_hcd(struct usb_hcd *hcd) cancel_work_sync(&hcd->died_work); mutex_lock(&usb_bus_idr_lock); - usb_disconnect(&rhdev); /* Sets rhdev to NULL */ + if (rh_registered) + usb_disconnect(&rhdev); /* Sets rhdev to NULL */ mutex_unlock(&usb_bus_idr_lock); /* diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index ad57ebf49154..8510a7d8ee94 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -366,6 +366,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM }, { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM }, + /* Realforce 87U Keyboard */ + { USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM }, + /* M-Systems Flash Disk Pioneers */ { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -392,6 +395,15 @@ static const struct usb_device_id usb_quirk_list[] = { /* Kingston DataTraveler 3.0 */ { USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM }, + /* NVIDIA Jetson devices in Force Recovery mode */ + { USB_DEVICE(0x0955, 0x7018), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x0955, 0x7019), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x0955, 0x7418), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x0955, 0x7721), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x0955, 0x7c18), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x0955, 0x7e19), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x0955, 0x7f21), .driver_info = USB_QUIRK_RESET_RESUME }, + /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, @@ -408,6 +420,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0b05, 0x17e0), .driver_info = USB_QUIRK_IGNORE_REMOTE_WAKEUP }, + /* Realtek Semiconductor Corp. Mass Storage Device (Multicard Reader)*/ + { USB_DEVICE(0x0bda, 0x0151), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Realtek hub in Dell WD19 (Type-C) */ { USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM }, { USB_DEVICE(0x0bda, 0x5487), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -444,6 +459,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1532, 0x0116), .driver_info = USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, + /* Lenovo ThinkPad OneLink+ Dock twin hub controllers (VIA Labs VL812) */ + { USB_DEVICE(0x17ef, 0x1018), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x17ef, 0x1019), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Lenovo USB-C to Ethernet Adapter RTL8153-04 */ { USB_DEVICE(0x17ef, 0x720c), .driver_info = USB_QUIRK_NO_LPM }, @@ -520,6 +539,12 @@ static const struct usb_device_id usb_quirk_list[] = { /* DJI CineSSD */ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, + /* DELL USB GEN2 */ + { USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME }, + + /* VCOM device */ + { USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 0e185ef474ac..2abbaf25b502 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -3628,7 +3628,8 @@ void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg) { /* remove the soft-disconnect and let's go */ - dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON); + if (!hsotg->role_sw || (dwc2_readl(hsotg, GOTGCTL) & GOTGCTL_BSESVLD)) + dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON); } /** @@ -4553,7 +4554,6 @@ static int dwc2_hsotg_udc_start(struct usb_gadget *gadget, WARN_ON(hsotg->driver); - driver->driver.bus = NULL; hsotg->driver = driver; hsotg->gadget.dev.of_node = hsotg->dev->of_node; hsotg->gadget.speed = USB_SPEED_UNKNOWN; diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 89334427477f..c602848aa418 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -5098,7 +5098,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { retval = -EINVAL; - goto error1; + goto error2; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 2c8f5f45255f..297a11ff7193 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -138,9 +138,9 @@ static int __dwc2_lowlevel_phy_enable(struct dwc2_hsotg *hsotg) } else if (hsotg->plat && hsotg->plat->phy_init) { ret = hsotg->plat->phy_init(pdev, hsotg->plat->phy_type); } else { - ret = phy_power_on(hsotg->phy); + ret = phy_init(hsotg->phy); if (ret == 0) - ret = phy_init(hsotg->phy); + ret = phy_power_on(hsotg->phy); } return ret; @@ -172,9 +172,9 @@ static int __dwc2_lowlevel_phy_disable(struct dwc2_hsotg *hsotg) } else if (hsotg->plat && hsotg->plat->phy_exit) { ret = hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type); } else { - ret = phy_exit(hsotg->phy); + ret = phy_power_off(hsotg->phy); if (ret == 0) - ret = phy_power_off(hsotg->phy); + ret = phy_exit(hsotg->phy); } return ret; diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 35fdf9d54ebf..8384e534bc0c 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -169,8 +169,13 @@ static void __dwc3_set_mode(struct work_struct *work) break; } - /* For DRD host or device mode only */ - if (dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG) { + /* + * When current_dr_role is not set, there's no role switching. + * Only perform GCTL.CoreSoftReset when there's DRD role switching. + */ + if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) || + DWC3_VER_IS_PRIOR(DWC31, 190A)) && + dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) { reg = dwc3_readl(dwc->regs, DWC3_GCTL); reg |= DWC3_GCTL_CORESOFTRESET; dwc3_writel(dwc->regs, DWC3_GCTL, reg); @@ -305,7 +310,8 @@ int dwc3_core_soft_reset(struct dwc3 *dwc) reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg |= DWC3_DCTL_CSFTRST; - dwc3_writel(dwc->regs, DWC3_DCTL, reg); + reg &= ~DWC3_DCTL_RUN_STOP; + dwc3_gadget_dctl_write_safe(dwc, reg); /* * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit @@ -756,15 +762,16 @@ static void dwc3_core_exit(struct dwc3 *dwc) { dwc3_event_buffers_cleanup(dwc); + usb_phy_set_suspend(dwc->usb2_phy, 1); + usb_phy_set_suspend(dwc->usb3_phy, 1); + phy_power_off(dwc->usb2_generic_phy); + phy_power_off(dwc->usb3_generic_phy); + usb_phy_shutdown(dwc->usb2_phy); usb_phy_shutdown(dwc->usb3_phy); phy_exit(dwc->usb2_generic_phy); phy_exit(dwc->usb3_generic_phy); - usb_phy_set_suspend(dwc->usb2_phy, 1); - usb_phy_set_suspend(dwc->usb3_phy, 1); - phy_power_off(dwc->usb2_generic_phy); - phy_power_off(dwc->usb3_generic_phy); clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks); reset_control_assert(dwc->reset); } @@ -1299,10 +1306,10 @@ static void dwc3_get_properties(struct dwc3 *dwc) u8 lpm_nyet_threshold; u8 tx_de_emphasis; u8 hird_threshold; - u8 rx_thr_num_pkt_prd; - u8 rx_max_burst_prd; - u8 tx_thr_num_pkt_prd; - u8 tx_max_burst_prd; + u8 rx_thr_num_pkt_prd = 0; + u8 rx_max_burst_prd = 0; + u8 tx_thr_num_pkt_prd = 0; + u8 tx_max_burst_prd = 0; u8 tx_fifo_resize_max_num; const char *usb_psy_name; int ret; @@ -1694,16 +1701,16 @@ err5: dwc3_debugfs_exit(dwc); dwc3_event_buffers_cleanup(dwc); - usb_phy_shutdown(dwc->usb2_phy); - usb_phy_shutdown(dwc->usb3_phy); - phy_exit(dwc->usb2_generic_phy); - phy_exit(dwc->usb3_generic_phy); - usb_phy_set_suspend(dwc->usb2_phy, 1); usb_phy_set_suspend(dwc->usb3_phy, 1); phy_power_off(dwc->usb2_generic_phy); phy_power_off(dwc->usb3_generic_phy); + usb_phy_shutdown(dwc->usb2_phy); + usb_phy_shutdown(dwc->usb3_phy); + phy_exit(dwc->usb2_generic_phy); + phy_exit(dwc->usb3_generic_phy); + dwc3_ulpi_exit(dwc); err4: diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c index adcdf9ad66c7..f6f63cfc1f47 100644 --- a/drivers/usb/dwc3/drd.c +++ b/drivers/usb/dwc3/drd.c @@ -595,16 +595,15 @@ int dwc3_drd_init(struct dwc3 *dwc) { int ret, irq; + if (ROLE_SWITCH && + device_property_read_bool(dwc->dev, "usb-role-switch")) + return dwc3_setup_role_switch(dwc); + dwc->edev = dwc3_get_extcon(dwc); if (IS_ERR(dwc->edev)) return PTR_ERR(dwc->edev); - if (ROLE_SWITCH && - device_property_read_bool(dwc->dev, "usb-role-switch")) { - ret = dwc3_setup_role_switch(dwc); - if (ret < 0) - return ret; - } else if (dwc->edev) { + if (dwc->edev) { dwc->edev_nb.notifier_call = dwc3_drd_notifier; ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST, &dwc->edev_nb); diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index 90bb022737da..ee7b71827216 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -37,15 +37,6 @@ struct dwc3_exynos { struct regulator *vdd10; }; -static int dwc3_exynos_remove_child(struct device *dev, void *unused) -{ - struct platform_device *pdev = to_platform_device(dev); - - platform_device_unregister(pdev); - - return 0; -} - static int dwc3_exynos_probe(struct platform_device *pdev) { struct dwc3_exynos *exynos; @@ -142,7 +133,7 @@ static int dwc3_exynos_remove(struct platform_device *pdev) struct dwc3_exynos *exynos = platform_get_drvdata(pdev); int i; - device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child); + of_platform_depopulate(&pdev->dev); for (i = exynos->num_clks - 1; i >= 0; i--) clk_disable_unprepare(exynos->clks[i]); diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index e196673f5c64..efaf0db595f4 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c @@ -242,7 +242,7 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, break; case OMAP_DWC3_ID_FLOAT: - if (omap->vbus_reg) + if (omap->vbus_reg && regulator_is_enabled(omap->vbus_reg)) regulator_disable(omap->vbus_reg); val = dwc3_omap_read_utmi_ctrl(omap); val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG; diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 492977514d95..73c20a93208e 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -227,7 +227,7 @@ static void dwc3_pci_resume_work(struct work_struct *work) int ret; ret = pm_runtime_get_sync(&dwc3->dev); - if (ret) { + if (ret < 0) { pm_runtime_put_sync_autosuspend(&dwc3->dev); return; } diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 504f8af4d0f8..c711528f1995 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -296,6 +296,14 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom) icc_put(qcom->icc_path_apps); } +/* Only usable in contexts where the role can not change. */ +static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom) +{ + struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3); + + return dwc->xhci; +} + static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom) { if (qcom->hs_phy_irq) { @@ -342,7 +350,7 @@ static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom) } } -static int dwc3_qcom_suspend(struct dwc3_qcom *qcom) +static int dwc3_qcom_suspend(struct dwc3_qcom *qcom, bool wakeup) { u32 val; int i, ret; @@ -361,7 +369,7 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom) if (ret) dev_warn(qcom->dev, "failed to disable interconnect: %d\n", ret); - if (device_may_wakeup(qcom->dev)) + if (wakeup) dwc3_qcom_enable_interrupts(qcom); qcom->is_suspended = true; @@ -369,7 +377,7 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom) return 0; } -static int dwc3_qcom_resume(struct dwc3_qcom *qcom) +static int dwc3_qcom_resume(struct dwc3_qcom *qcom, bool wakeup) { int ret; int i; @@ -377,7 +385,7 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom) if (!qcom->is_suspended) return 0; - if (device_may_wakeup(qcom->dev)) + if (wakeup) dwc3_qcom_disable_interrupts(qcom); for (i = 0; i < qcom->num_clocks; i++) { @@ -411,7 +419,11 @@ static irqreturn_t qcom_dwc3_resume_irq(int irq, void *data) if (qcom->pm_suspended) return IRQ_HANDLED; - if (dwc->xhci) + /* + * This is safe as role switching is done from a freezable workqueue + * and the wakeup interrupts are disabled as part of resume. + */ + if (dwc3_qcom_is_host(qcom)) pm_runtime_resume(&dwc->xhci->dev); return IRQ_HANDLED; @@ -443,9 +455,9 @@ static int dwc3_qcom_get_irq(struct platform_device *pdev, int ret; if (np) - ret = platform_get_irq_byname(pdev_irq, name); + ret = platform_get_irq_byname_optional(pdev_irq, name); else - ret = platform_get_irq(pdev_irq, num); + ret = platform_get_irq_optional(pdev_irq, num); return ret; } @@ -873,9 +885,11 @@ static int dwc3_qcom_remove(struct platform_device *pdev) static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev) { struct dwc3_qcom *qcom = dev_get_drvdata(dev); + bool wakeup = device_may_wakeup(dev); int ret = 0; - ret = dwc3_qcom_suspend(qcom); + + ret = dwc3_qcom_suspend(qcom, wakeup); if (!ret) qcom->pm_suspended = true; @@ -885,9 +899,10 @@ static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev) static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev) { struct dwc3_qcom *qcom = dev_get_drvdata(dev); + bool wakeup = device_may_wakeup(dev); int ret; - ret = dwc3_qcom_resume(qcom); + ret = dwc3_qcom_resume(qcom, wakeup); if (!ret) qcom->pm_suspended = false; @@ -898,14 +913,14 @@ static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev) { struct dwc3_qcom *qcom = dev_get_drvdata(dev); - return dwc3_qcom_suspend(qcom); + return dwc3_qcom_suspend(qcom, true); } static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev) { struct dwc3_qcom *qcom = dev_get_drvdata(dev); - return dwc3_qcom_resume(qcom); + return dwc3_qcom_resume(qcom, true); } static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = { diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 7ae1b4a663d1..132c903c9ae6 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -826,9 +826,9 @@ static void dwc3_ep0_inspect_setup(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { struct usb_ctrlrequest *ctrl = (void *) dwc->ep0_trb; - struct dwc3_vendor *vdwc = container_of(dwc, struct dwc3_vendor, dwc); int ret = -EINVAL; u32 len; + struct dwc3_vendor *vdwc = container_of(dwc, struct dwc3_vendor, dwc); if (!dwc->gadget_driver || !vdwc->softconnect || !dwc->connected) goto out; @@ -1132,7 +1132,7 @@ void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep) static void dwc3_ep0_xfernotready(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { - struct dwc3_vendor *vdwc = container_of(dwc, struct dwc3_vendor, dwc); + struct dwc3_vendor *vdwc = container_of(dwc, struct dwc3_vendor, dwc); switch (event->status) { case DEPEVT_STATUS_CONTROL_DATA: diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 637bb2ea78f7..68ca3b99396e 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -291,7 +291,8 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, * * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2 */ - if (dwc->gadget->speed <= USB_SPEED_HIGH) { + if (dwc->gadget->speed <= USB_SPEED_HIGH || + DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER) { reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) { saved_config |= DWC3_GUSB2PHYCFG_SUSPHY; @@ -331,9 +332,17 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, } } - dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0); - dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1); - dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2); + /* + * For some commands such as Update Transfer command, DEPCMDPARn + * registers are reserved. Since the driver often sends Update Transfer + * command, don't write to DEPCMDPARn to avoid register write delays and + * improve performance. + */ + if (DWC3_DEPCMD_CMD(cmd) != DWC3_DEPCMD_UPDATETRANSFER) { + dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0); + dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1); + dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2); + } /* * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're @@ -1157,13 +1166,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) reg &= ~DWC3_DALEPENA_EP(dep->number); dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); - /* Clear out the ep descriptors for non-ep0 */ - if (dep->number > 1) { - dep->endpoint.comp_desc = NULL; - dep->endpoint.desc = NULL; - } - - dwc3_remove_requests(dwc, dep, -ECONNRESET); + dwc3_remove_requests(dwc, dep, -ESHUTDOWN); dep->stream_capable = false; dep->type = 0; @@ -1177,6 +1180,12 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) mask |= (DWC3_EP_DELAY_STOP | DWC3_EP_TRANSFER_STARTED); dep->flags &= mask; + /* Clear out the ep descriptors for non-ep0 */ + if (dep->number > 1) { + dep->endpoint.comp_desc = NULL; + dep->endpoint.desc = NULL; + } + return 0; } @@ -1332,17 +1341,49 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) return trbs_left; } -static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, - dma_addr_t dma, unsigned int length, unsigned int chain, - unsigned int node, unsigned int stream_id, - unsigned int short_not_ok, unsigned int no_interrupt, - unsigned int is_last, bool must_interrupt) +/** + * dwc3_prepare_one_trb - setup one TRB from one request + * @dep: endpoint for which this request is prepared + * @req: dwc3_request pointer + * @trb_length: buffer size of the TRB + * @chain: should this TRB be chained to the next? + * @node: only for isochronous endpoints. First TRB needs different type. + * @use_bounce_buffer: set to use bounce buffer + * @must_interrupt: set to interrupt on TRB completion + */ +static void dwc3_prepare_one_trb(struct dwc3_ep *dep, + struct dwc3_request *req, unsigned int trb_length, + unsigned int chain, unsigned int node, bool use_bounce_buffer, + bool must_interrupt) { + struct dwc3_trb *trb; + dma_addr_t dma; + unsigned int stream_id = req->request.stream_id; + unsigned int short_not_ok = req->request.short_not_ok; + unsigned int no_interrupt = req->request.no_interrupt; + unsigned int is_last = req->request.is_last; struct dwc3 *dwc = dep->dwc; struct usb_gadget *gadget = dwc->gadget; enum usb_device_speed speed = gadget->speed; - trb->size = DWC3_TRB_SIZE_LENGTH(length); + if (use_bounce_buffer) + dma = dep->dwc->bounce_addr; + else if (req->request.num_sgs > 0) + dma = sg_dma_address(req->start_sg); + else + dma = req->request.dma; + + trb = &dep->trb_pool[dep->trb_enqueue]; + + if (!req->trb) { + dwc3_gadget_move_started_request(req); + req->trb = trb; + req->trb_dma = dwc3_trb_dma_offset(dep, trb); + } + + req->num_trbs++; + + trb->size = DWC3_TRB_SIZE_LENGTH(trb_length); trb->bpl = lower_32_bits(dma); trb->bph = upper_32_bits(dma); @@ -1382,10 +1423,10 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, unsigned int mult = 2; unsigned int maxp = usb_endpoint_maxp(ep->desc); - if (length <= (2 * maxp)) + if (req->request.length <= (2 * maxp)) mult--; - if (length <= maxp) + if (req->request.length <= maxp) mult--; trb->size |= DWC3_TRB_SIZE_PCM1(mult); @@ -1394,8 +1435,8 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; } - /* always enable Interrupt on Missed ISOC */ - trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; + if (!no_interrupt && !chain) + trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; break; case USB_ENDPOINT_XFER_BULK: @@ -1454,50 +1495,6 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, trace_dwc3_prepare_trb(dep, trb); } -/** - * dwc3_prepare_one_trb - setup one TRB from one request - * @dep: endpoint for which this request is prepared - * @req: dwc3_request pointer - * @trb_length: buffer size of the TRB - * @chain: should this TRB be chained to the next? - * @node: only for isochronous endpoints. First TRB needs different type. - * @use_bounce_buffer: set to use bounce buffer - * @must_interrupt: set to interrupt on TRB completion - */ -static void dwc3_prepare_one_trb(struct dwc3_ep *dep, - struct dwc3_request *req, unsigned int trb_length, - unsigned int chain, unsigned int node, bool use_bounce_buffer, - bool must_interrupt) -{ - struct dwc3_trb *trb; - dma_addr_t dma; - unsigned int stream_id = req->request.stream_id; - unsigned int short_not_ok = req->request.short_not_ok; - unsigned int no_interrupt = req->request.no_interrupt; - unsigned int is_last = req->request.is_last; - - if (use_bounce_buffer) - dma = dep->dwc->bounce_addr; - else if (req->request.num_sgs > 0) - dma = sg_dma_address(req->start_sg); - else - dma = req->request.dma; - - trb = &dep->trb_pool[dep->trb_enqueue]; - - if (!req->trb) { - dwc3_gadget_move_started_request(req); - req->trb = trb; - req->trb_dma = dwc3_trb_dma_offset(dep, trb); - } - - req->num_trbs++; - - __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node, - stream_id, short_not_ok, no_interrupt, is_last, - must_interrupt); -} - static bool dwc3_needs_extra_trb(struct dwc3_ep *dep, struct dwc3_request *req) { unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); @@ -2094,13 +2091,11 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) * errors which will force us issue EndTransfer command. */ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { - if (!(dep->flags & DWC3_EP_PENDING_REQUEST) && - !(dep->flags & DWC3_EP_TRANSFER_STARTED)) - return 0; - - if ((dep->flags & DWC3_EP_PENDING_REQUEST)) { - if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) + if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) { + if ((dep->flags & DWC3_EP_PENDING_REQUEST)) return __dwc3_gadget_start_isoc(dep); + + return 0; } } @@ -3171,6 +3166,7 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) struct dwc3 *dwc = dep->dwc; u32 mdwidth; int size; + int maxpacket; mdwidth = dwc3_mdwidth(dwc); @@ -3183,21 +3179,24 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) else size = DWC31_GTXFIFOSIZ_TXFDEP(size); - /* FIFO Depth is in MDWDITH bytes. Multiply */ - size *= mdwidth; - /* - * To meet performance requirement, a minimum TxFIFO size of 3x - * MaxPacketSize is recommended for endpoints that support burst and a - * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't - * support burst. Use those numbers and we can calculate the max packet - * limit as below. + * maxpacket size is determined as part of the following, after assuming + * a mult value of one maxpacket: + * DWC3 revision 280A and prior: + * fifo_size = mult * (max_packet / mdwidth) + 1; + * maxpacket = mdwidth * (fifo_size - 1); + * + * DWC3 revision 290A and onwards: + * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1 + * maxpacket = mdwidth * ((fifo_size - 1) - 1) - mdwidth; */ - if (dwc->maximum_speed >= USB_SPEED_SUPER) - size /= 3; + if (DWC3_VER_IS_PRIOR(DWC3, 290A)) + maxpacket = mdwidth * (size - 1); else - size /= 2; + maxpacket = mdwidth * ((size - 1) - 1) - mdwidth; + /* Functionally, space for one max packet is sufficient */ + size = min_t(int, maxpacket, 1024); /* * If enable tx fifos resize, set each in ep maxpacket * to 1024, it can avoid being dependent on the default @@ -3205,7 +3204,6 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) */ if (dwc->do_fifo_resize) size = 1024; - usb_ep_set_maxpacket_limit(&dep->endpoint, size); dep->endpoint.max_streams = 16; @@ -3432,6 +3430,10 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep, if (event->status & DEPEVT_STATUS_SHORT && !chain) return 1; + if ((trb->ctrl & DWC3_TRB_CTRL_ISP_IMI) && + DWC3_TRB_SIZE_TRBSTS(trb->size) == DWC3_TRBSTS_MISSED_ISOC) + return 1; + if ((trb->ctrl & DWC3_TRB_CTRL_IOC) || (trb->ctrl & DWC3_TRB_CTRL_LST)) return 1; @@ -3485,6 +3487,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, struct dwc3_request *req, int status) { struct dwc3 *dwc = dep->dwc; + int request_status; int ret; if (req->request.num_mapped_sgs) @@ -3527,7 +3530,35 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, goto out; } - dwc3_gadget_giveback(dep, req, status); + /* + * The event status only reflects the status of the TRB with IOC set. + * For the requests that don't set interrupt on completion, the driver + * needs to check and return the status of the completed TRBs associated + * with the request. Use the status of the last TRB of the request. + */ + if (req->request.no_interrupt) { + struct dwc3_trb *trb; + + trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue); + switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) { + case DWC3_TRBSTS_MISSED_ISOC: + /* Isoc endpoint only */ + request_status = -EXDEV; + break; + case DWC3_TRB_STS_XFER_IN_PROG: + /* Applicable when End Transfer with ForceRM=0 */ + case DWC3_TRBSTS_SETUP_PENDING: + /* Control endpoint only */ + case DWC3_TRBSTS_OK: + default: + request_status = 0; + break; + } + } else { + request_status = status; + } + + dwc3_gadget_giveback(dep, req, request_status); out: return ret; @@ -4472,7 +4503,6 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) struct dwc3 *dwc = evt->dwc; irqreturn_t ret = IRQ_NONE; int left; - u32 reg; left = evt->count; @@ -4500,19 +4530,20 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) } evt->count = 0; - evt->flags &= ~DWC3_EVENT_PENDING; ret = IRQ_HANDLED; /* Unmask interrupt */ - reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); - reg &= ~DWC3_GEVNTSIZ_INTMASK; - dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); + dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), + DWC3_GEVNTSIZ_SIZE(evt->length)); if (dwc->imod_interval) { dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); } + /* Keep the clearing of DWC3_EVENT_PENDING at the end */ + evt->flags &= ~DWC3_EVENT_PENDING; + return ret; } @@ -4537,7 +4568,6 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) struct dwc3 *dwc = evt->dwc; u32 amount; u32 count; - u32 reg; if (pm_runtime_suspended(dwc->dev)) { pm_runtime_get(dwc->dev); @@ -4564,9 +4594,8 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) evt->flags |= DWC3_EVENT_PENDING; /* Mask interrupt */ - reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0)); - reg |= DWC3_GEVNTSIZ_INTMASK; - dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); + dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), + DWC3_GEVNTSIZ_INTMASK | DWC3_GEVNTSIZ_SIZE(evt->length)); amount = min(count, evt->length - evt->lpos); memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount); diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index a66f16e8816e..90ff19865242 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -133,4 +133,5 @@ err: void dwc3_host_exit(struct dwc3 *dwc) { platform_device_unregister(dwc->xhci); + dwc->xhci = NULL; } diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index f2a2f11936b3..81f680852def 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -16,7 +16,7 @@ #include #ifdef CONFIG_USB_CONFIGFS_F_ACC -extern int acc_ctrlrequest(struct usb_composite_dev *cdev, +extern int acc_ctrlrequest_composite(struct usb_composite_dev *cdev, const struct usb_ctrlrequest *ctrl); void acc_disconnect(void); #endif @@ -1576,7 +1576,7 @@ static int android_setup(struct usb_gadget *gadget, #ifdef CONFIG_USB_CONFIGFS_F_ACC if (value < 0) - value = acc_ctrlrequest(cdev, c); + value = acc_ctrlrequest_composite(cdev, c); #endif if (value < 0) diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c index 3510f6d39f0c..7d35d6c58e47 100644 --- a/drivers/usb/gadget/function/f_accessory.c +++ b/drivers/usb/gadget/function/f_accessory.c @@ -1085,6 +1085,26 @@ err: } EXPORT_SYMBOL_GPL(acc_ctrlrequest); +int acc_ctrlrequest_composite(struct usb_composite_dev *cdev, + const struct usb_ctrlrequest *ctrl) +{ + u16 w_length = le16_to_cpu(ctrl->wLength); + + if (w_length > USB_COMP_EP0_BUFSIZ) { + if (ctrl->bRequestType & USB_DIR_IN) { + /* Cast away the const, we are going to overwrite on purpose. */ + __le16 *temp = (__le16 *)&ctrl->wLength; + + *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ); + w_length = USB_COMP_EP0_BUFSIZ; + } else { + return -EINVAL; + } + } + return acc_ctrlrequest(cdev, ctrl); +} +EXPORT_SYMBOL_GPL(acc_ctrlrequest_composite); + static int __acc_function_bind(struct usb_configuration *c, struct usb_function *f, bool configfs) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 9b806afcb71b..8514ab7a3568 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -279,6 +279,11 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len) struct usb_request *req = ffs->ep0req; int ret; + if (!req) { + spin_unlock_irq(&ffs->ev.waitq.lock); + return -EINVAL; + } + req->zero = len < le16_to_cpu(ffs->ev.setup.wLength); spin_unlock_irq(&ffs->ev.waitq.lock); @@ -1891,10 +1896,14 @@ static void functionfs_unbind(struct ffs_data *ffs) ENTER(); if (!WARN_ON(!ffs->gadget)) { + /* dequeue before freeing ep0req */ + usb_ep_dequeue(ffs->gadget->ep0, ffs->ep0req); + mutex_lock(&ffs->mutex); usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req); ffs->ep0req = NULL; ffs->gadget = NULL; clear_bit(FFS_FL_BOUND, &ffs->flags); + mutex_unlock(&ffs->mutex); ffs_data_put(ffs); } } diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 8cb199f52b52..97e927eacc62 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -71,7 +71,7 @@ struct f_hidg { wait_queue_head_t write_queue; struct usb_request *req; - int minor; + struct device dev; struct cdev cdev; struct usb_function func; @@ -84,6 +84,14 @@ static inline struct f_hidg *func_to_hidg(struct usb_function *f) return container_of(f, struct f_hidg, func); } +static void hidg_release(struct device *dev) +{ + struct f_hidg *hidg = container_of(dev, struct f_hidg, dev); + + kfree(hidg->set_report_buf); + kfree(hidg); +} + /*-------------------------------------------------------------------------*/ /* Static descriptors */ @@ -904,9 +912,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) struct usb_ep *ep; struct f_hidg *hidg = func_to_hidg(f); struct usb_string *us; - struct device *device; int status; - dev_t dev; /* maybe allocate device-global string IDs, and patch descriptors */ us = usb_gstrings_attach(c->cdev, ct_func_strings, @@ -999,21 +1005,11 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) /* create char device */ cdev_init(&hidg->cdev, &f_hidg_fops); - dev = MKDEV(major, hidg->minor); - status = cdev_add(&hidg->cdev, dev, 1); + status = cdev_device_add(&hidg->cdev, &hidg->dev); if (status) goto fail_free_descs; - device = device_create(hidg_class, NULL, dev, NULL, - "%s%d", "hidg", hidg->minor); - if (IS_ERR(device)) { - status = PTR_ERR(device); - goto del; - } - return 0; -del: - cdev_del(&hidg->cdev); fail_free_descs: usb_free_all_descriptors(f); fail: @@ -1244,9 +1240,7 @@ static void hidg_free(struct usb_function *f) hidg = func_to_hidg(f); opts = container_of(f->fi, struct f_hid_opts, func_inst); - kfree(hidg->report_desc); - kfree(hidg->set_report_buf); - kfree(hidg); + put_device(&hidg->dev); mutex_lock(&opts->lock); --opts->refcnt; mutex_unlock(&opts->lock); @@ -1256,8 +1250,7 @@ static void hidg_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_hidg *hidg = func_to_hidg(f); - device_destroy(hidg_class, MKDEV(major, hidg->minor)); - cdev_del(&hidg->cdev); + cdev_device_del(&hidg->cdev, &hidg->dev); usb_free_all_descriptors(f); } @@ -1266,6 +1259,7 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi) { struct f_hidg *hidg; struct f_hid_opts *opts; + int ret; /* allocate and initialize one new instance */ hidg = kzalloc(sizeof(*hidg), GFP_KERNEL); @@ -1277,17 +1271,27 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi) mutex_lock(&opts->lock); ++opts->refcnt; - hidg->minor = opts->minor; + device_initialize(&hidg->dev); + hidg->dev.release = hidg_release; + hidg->dev.class = hidg_class; + hidg->dev.devt = MKDEV(major, opts->minor); + ret = dev_set_name(&hidg->dev, "hidg%d", opts->minor); + if (ret) { + --opts->refcnt; + mutex_unlock(&opts->lock); + return ERR_PTR(ret); + } + hidg->bInterfaceSubClass = opts->subclass; hidg->bInterfaceProtocol = opts->protocol; hidg->report_length = opts->report_length; hidg->report_desc_length = opts->report_desc_length; if (opts->report_desc) { - hidg->report_desc = kmemdup(opts->report_desc, - opts->report_desc_length, - GFP_KERNEL); + hidg->report_desc = devm_kmemdup(&hidg->dev, opts->report_desc, + opts->report_desc_length, + GFP_KERNEL); if (!hidg->report_desc) { - kfree(hidg); + put_device(&hidg->dev); mutex_unlock(&opts->lock); return ERR_PTR(-ENOMEM); } diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 12fe0be35528..7790fbed443c 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -1191,13 +1191,14 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh) u8 format; int i, len; + format = common->cmnd[2] & 0xf; + if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */ - start_track > 1) { + (start_track > 1 && format != 0x1)) { curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } - format = common->cmnd[2] & 0xf; /* * Check if CDB is old style SFF-8020i * i.e. format is in 2 MSBs of byte 9 @@ -1207,8 +1208,8 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh) format = (common->cmnd[9] >> 6) & 0x3; switch (format) { - case 0: - /* Formatted TOC */ + case 0: /* Formatted TOC */ + case 1: /* Multi-session info */ len = 4 + 2*8; /* 4 byte header + 2 descriptors */ memset(buf, 0, len); buf[1] = len - 2; /* TOC Length excludes length field */ @@ -1249,7 +1250,7 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh) return len; default: - /* Multi-session, PMA, ATIP, CD-TEXT not supported/required */ + /* PMA, ATIP, CD-TEXT not supported/required */ curlun->sense_data = SS_INVALID_FIELD_IN_CDB; return -EINVAL; } diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 61bbf27b5203..939f174a956c 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -89,7 +89,7 @@ struct printer_dev { u8 printer_cdev_open; wait_queue_head_t wait; unsigned q_len; - char *pnp_string; /* We don't own memory! */ + char **pnp_string; /* We don't own memory! */ struct usb_function function; }; @@ -1001,16 +1001,16 @@ static int printer_func_setup(struct usb_function *f, if ((wIndex>>8) != dev->interface) break; - if (!dev->pnp_string) { + if (!*dev->pnp_string) { value = 0; break; } - value = strlen(dev->pnp_string); + value = strlen(*dev->pnp_string); buf[0] = (value >> 8) & 0xFF; buf[1] = value & 0xFF; - memcpy(buf + 2, dev->pnp_string, value); + memcpy(buf + 2, *dev->pnp_string, value); DBG(dev, "1284 PNP String: %x %s\n", value, - dev->pnp_string); + *dev->pnp_string); break; case GET_PORT_STATUS: /* Get Port Status */ @@ -1476,7 +1476,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi) kref_init(&dev->kref); ++opts->refcnt; dev->minor = opts->minor; - dev->pnp_string = opts->pnp_string; + dev->pnp_string = &opts->pnp_string; dev->q_len = opts->q_len; mutex_unlock(&opts->lock); diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index f810e8e0b29a..08726e4c68a5 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -1274,11 +1274,9 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) agdev->out_ep_maxpsize = max_t(u16, agdev->out_ep_maxpsize, le16_to_cpu(ss_epout_desc.wMaxPacketSize)); - ss_epin_desc_comp.wBytesPerInterval = ss_epin_desc.wMaxPacketSize; ss_epout_desc_comp.wBytesPerInterval = ss_epout_desc.wMaxPacketSize; - // HS and SS endpoint addresses are copied from autoconfigured FS descriptors hs_ep_int_desc.bEndpointAddress = fs_ep_int_desc.bEndpointAddress; hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress; diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index 4b073436e25c..4b67731118b7 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c @@ -284,8 +284,9 @@ uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req) memset(&v4l2_event, 0, sizeof(v4l2_event)); v4l2_event.type = UVC_EVENT_DATA; - uvc_event->data.length = req->actual; - memcpy(&uvc_event->data.data, req->buf, req->actual); + uvc_event->data.length = min_t(unsigned int, req->actual, + sizeof(uvc_event->data.data)); + memcpy(&uvc_event->data.data, req->buf, uvc_event->data.length); v4l2_event_queue(&uvc->vdev, &v4l2_event); } } @@ -1192,13 +1193,14 @@ static void uvc_free(struct usb_function *f) kfree(uvc); } -static void uvc_unbind(struct usb_configuration *c, struct usb_function *f) +static void uvc_function_unbind(struct usb_configuration *c, + struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct uvc_device *uvc = to_uvc(f); long wait_ret = 1; - uvcg_info(f, "%s\n", __func__); + uvcg_info(f, "%s()\n", __func__); /* If we know we're connected via v4l2, then there should be a cleanup * of the device from userspace either via UVC_EVENT_DISCONNECT or @@ -1276,7 +1278,7 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi) /* Register the function. */ uvc->func.name = "uvc"; uvc->func.bind = uvc_function_bind; - uvc->func.unbind = uvc_unbind; + uvc->func.unbind = uvc_function_unbind; uvc->func.get_alt = uvc_function_get_alt; uvc->func.set_alt = uvc_function_set_alt; uvc->func.disable = uvc_function_disable; diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c index 2451e45ada6e..87bfe87daa86 100644 --- a/drivers/usb/gadget/function/storage_common.c +++ b/drivers/usb/gadget/function/storage_common.c @@ -294,8 +294,10 @@ EXPORT_SYMBOL_GPL(fsg_lun_fsync_sub); void store_cdrom_address(u8 *dest, int msf, u32 addr) { if (msf) { - /* Convert to Minutes-Seconds-Frames */ - addr >>= 2; /* Convert to 2048-byte frames */ + /* + * Convert to Minutes-Seconds-Frames. + * Sector size is already set to 2048 bytes. + */ addr += 2*75; /* Lead-in occupies 2 seconds */ dest[3] = addr % 75; /* Frames */ addr /= 75; diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index e1c7826f33e0..785e03fa1045 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -773,9 +773,13 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, dev->qmult = qmult; snprintf(net->name, sizeof(net->name), "%s%%d", netname); - if (get_ether_addr(dev_addr, net->dev_addr)) + if (get_ether_addr(dev_addr, net->dev_addr)) { + net->addr_assign_type = NET_ADDR_RANDOM; dev_warn(&g->dev, "using random %s ethernet address\n", "self"); + } else { + net->addr_assign_type = NET_ADDR_SET; + } if (get_ether_addr(host_addr, dev->host_mac)) dev_warn(&g->dev, "using random %s ethernet address\n", "host"); @@ -832,6 +836,9 @@ struct net_device *gether_setup_name_default(const char *netname) INIT_LIST_HEAD(&dev->tx_reqs); INIT_LIST_HEAD(&dev->rx_reqs); + /* by default we always have a random MAC address */ + net->addr_assign_type = NET_ADDR_RANDOM; + skb_queue_head_init(&dev->rx_frames); /* network device setup */ @@ -869,7 +876,6 @@ int gether_register_netdev(struct net_device *net) g = dev->gadget; memcpy(net->dev_addr, dev->dev_mac, ETH_ALEN); - net->addr_assign_type = NET_ADDR_RANDOM; status = register_netdev(net); if (status < 0) { @@ -909,6 +915,7 @@ int gether_set_dev_addr(struct net_device *net, const char *dev_addr) if (get_ether_addr(dev_addr, new_addr)) return -EINVAL; memcpy(dev->dev_mac, new_addr, ETH_ALEN); + net->addr_assign_type = NET_ADDR_SET; return 0; } EXPORT_SYMBOL_GPL(gether_set_dev_addr); diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index 61a75e1a886d..4e242f2aae85 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c @@ -2848,6 +2848,7 @@ static ssize_t f_uvc_opts_string_##cname##_store(struct config_item *item,\ const char *page, size_t len) \ { \ struct f_uvc_opts *opts = to_f_uvc_opts(item); \ + int size = min(sizeof(opts->aname), len + 1); \ int ret = 0; \ \ mutex_lock(&opts->lock); \ @@ -2856,8 +2857,9 @@ static ssize_t f_uvc_opts_string_##cname##_store(struct config_item *item,\ goto end; \ } \ \ - ret = snprintf(opts->aname, min(sizeof(opts->aname), len), \ - "%s", page); \ + ret = strscpy(opts->aname, page, size); \ + if (ret == -E2BIG) \ + ret = size - 1; \ \ end: \ mutex_unlock(&opts->lock); \ diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c index bec9e6155d61..b69ae5e73a72 100644 --- a/drivers/usb/gadget/function/uvc_video.c +++ b/drivers/usb/gadget/function/uvc_video.c @@ -235,7 +235,7 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req) break; default: - uvcg_info(&video->uvc->func, + uvcg_warn(&video->uvc->func, "VS request completed with status %d.\n", req->status); uvcg_queue_cancel(queue, 0); diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 454860d52ce7..cd097474b6c3 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -362,6 +362,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) spin_unlock_irq (&epdata->dev->lock); DBG (epdata->dev, "endpoint gone\n"); + wait_for_completion(&done); epdata->status = -ENODEV; } } diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index 33efa6915b91..b496ca937dee 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -35,6 +36,9 @@ MODULE_LICENSE("GPL"); /*----------------------------------------------------------------------*/ +static DEFINE_IDA(driver_id_numbers); +#define DRIVER_DRIVER_NAME_LENGTH_MAX 32 + #define RAW_EVENT_QUEUE_SIZE 16 struct raw_event_queue { @@ -144,6 +148,7 @@ enum dev_state { STATE_DEV_INVALID = 0, STATE_DEV_OPENED, STATE_DEV_INITIALIZED, + STATE_DEV_REGISTERING, STATE_DEV_RUNNING, STATE_DEV_CLOSED, STATE_DEV_FAILED @@ -159,6 +164,9 @@ struct raw_dev { /* Reference to misc device: */ struct device *dev; + /* Make driver names unique */ + int driver_id_number; + /* Protected by lock: */ enum dev_state state; bool gadget_registered; @@ -187,6 +195,7 @@ static struct raw_dev *dev_new(void) spin_lock_init(&dev->lock); init_completion(&dev->ep0_done); raw_event_queue_init(&dev->queue); + dev->driver_id_number = -1; return dev; } @@ -197,6 +206,9 @@ static void dev_free(struct kref *kref) kfree(dev->udc_name); kfree(dev->driver.udc_name); + kfree(dev->driver.driver.name); + if (dev->driver_id_number >= 0) + ida_free(&driver_id_numbers, dev->driver_id_number); if (dev->req) { if (dev->ep0_urb_queued) usb_ep_dequeue(dev->gadget->ep0, dev->req); @@ -417,9 +429,11 @@ out_put: static int raw_ioctl_init(struct raw_dev *dev, unsigned long value) { int ret = 0; + int driver_id_number; struct usb_raw_init arg; char *udc_driver_name; char *udc_device_name; + char *driver_driver_name; unsigned long flags; if (copy_from_user(&arg, (void __user *)value, sizeof(arg))) @@ -438,36 +452,43 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value) return -EINVAL; } + driver_id_number = ida_alloc(&driver_id_numbers, GFP_KERNEL); + if (driver_id_number < 0) + return driver_id_number; + + driver_driver_name = kmalloc(DRIVER_DRIVER_NAME_LENGTH_MAX, GFP_KERNEL); + if (!driver_driver_name) { + ret = -ENOMEM; + goto out_free_driver_id_number; + } + snprintf(driver_driver_name, DRIVER_DRIVER_NAME_LENGTH_MAX, + DRIVER_NAME ".%d", driver_id_number); + udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL); - if (!udc_driver_name) - return -ENOMEM; + if (!udc_driver_name) { + ret = -ENOMEM; + goto out_free_driver_driver_name; + } ret = strscpy(udc_driver_name, &arg.driver_name[0], UDC_NAME_LENGTH_MAX); - if (ret < 0) { - kfree(udc_driver_name); - return ret; - } + if (ret < 0) + goto out_free_udc_driver_name; ret = 0; udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL); if (!udc_device_name) { - kfree(udc_driver_name); - return -ENOMEM; + ret = -ENOMEM; + goto out_free_udc_driver_name; } ret = strscpy(udc_device_name, &arg.device_name[0], UDC_NAME_LENGTH_MAX); - if (ret < 0) { - kfree(udc_driver_name); - kfree(udc_device_name); - return ret; - } + if (ret < 0) + goto out_free_udc_device_name; ret = 0; spin_lock_irqsave(&dev->lock, flags); if (dev->state != STATE_DEV_OPENED) { dev_dbg(dev->dev, "fail, device is not opened\n"); - kfree(udc_driver_name); - kfree(udc_device_name); ret = -EINVAL; goto out_unlock; } @@ -482,14 +503,25 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value) dev->driver.suspend = gadget_suspend; dev->driver.resume = gadget_resume; dev->driver.reset = gadget_reset; - dev->driver.driver.name = DRIVER_NAME; + dev->driver.driver.name = driver_driver_name; dev->driver.udc_name = udc_device_name; dev->driver.match_existing_only = 1; + dev->driver_id_number = driver_id_number; dev->state = STATE_DEV_INITIALIZED; + spin_unlock_irqrestore(&dev->lock, flags); + return ret; out_unlock: spin_unlock_irqrestore(&dev->lock, flags); +out_free_udc_device_name: + kfree(udc_device_name); +out_free_udc_driver_name: + kfree(udc_driver_name); +out_free_driver_driver_name: + kfree(driver_driver_name); +out_free_driver_id_number: + ida_free(&driver_id_numbers, driver_id_number); return ret; } @@ -507,6 +539,7 @@ static int raw_ioctl_run(struct raw_dev *dev, unsigned long value) ret = -EINVAL; goto out_unlock; } + dev->state = STATE_DEV_REGISTERING; spin_unlock_irqrestore(&dev->lock, flags); ret = usb_gadget_probe_driver(&dev->driver); diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index 933e80d5053a..f28e1bbd5724 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -311,7 +311,7 @@ source "drivers/usb/gadget/udc/bdc/Kconfig" config USB_AMD5536UDC tristate "AMD5536 UDC" - depends on USB_PCI + depends on USB_PCI && HAS_DMA select USB_SNP_CORE help The AMD5536 UDC is part of the AMD Geode CS5536, an x86 southbridge. diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c index bfd8e77788e2..3a4ccc722db5 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c @@ -1033,8 +1033,10 @@ static int ast_vhub_init_desc(struct ast_vhub *vhub) /* Initialize vhub String Descriptors. */ INIT_LIST_HEAD(&vhub->vhub_str_desc); desc_np = of_get_child_by_name(vhub_np, "vhub-strings"); - if (desc_np) + if (desc_np) { ret = ast_vhub_of_parse_str_desc(vhub, desc_np); + of_node_put(desc_np); + } else ret = ast_vhub_str_alloc_add(vhub, &ast_vhub_strings); diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c index 248426a3e88a..5f0b3fd93631 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_udc.c +++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c @@ -151,6 +151,7 @@ static void bdc_uspc_disconnected(struct bdc *bdc, bool reinit) bdc->delayed_status = false; bdc->reinit = reinit; bdc->test_mode = false; + usb_gadget_set_state(&bdc->gadget, USB_STATE_NOTATTACHED); } /* TNotify wkaeup timer */ diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index 3f1c62adce4b..314cb5ea061a 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -3015,6 +3015,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev) } udc->isp1301_i2c_client = isp1301_get_client(isp1301_node); + of_node_put(isp1301_node); if (!udc->isp1301_i2c_client) { return -EPROBE_DEFER; } diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index 57ee72fead45..3ebc8c5416e3 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -32,9 +32,6 @@ #include /* XUSB_DEV registers */ -#define SPARAM 0x000 -#define SPARAM_ERSTMAX_MASK GENMASK(20, 16) -#define SPARAM_ERSTMAX(x) (((x) << 16) & SPARAM_ERSTMAX_MASK) #define DB 0x004 #define DB_TARGET_MASK GENMASK(15, 8) #define DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK) @@ -275,8 +272,10 @@ BUILD_EP_CONTEXT_RW(deq_hi, deq_hi, 0, 0xffffffff) BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff) BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff) BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff) -BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 24, 0xff) +BUILD_EP_CONTEXT_RW(rsvd, rsvd[0], 24, 0x1) BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1) +BUILD_EP_CONTEXT_RW(splitxstate, rsvd[0], 26, 0x1) +BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 27, 0x1f) BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3) BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff) BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f) @@ -1557,6 +1556,9 @@ static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt) ep_reload(xudc, ep->index); ep_ctx_write_state(ep->context, EP_STATE_RUNNING); + ep_ctx_write_rsvd(ep->context, 0); + ep_ctx_write_partial_td(ep->context, 0); + ep_ctx_write_splitxstate(ep->context, 0); ep_ctx_write_seq_num(ep->context, 0); ep_reload(xudc, ep->index); @@ -2812,7 +2814,10 @@ static void tegra_xudc_reset(struct tegra_xudc *xudc) xudc->setup_seq_num = 0; xudc->queued_setup_packet = false; - ep_ctx_write_seq_num(ep0->context, xudc->setup_seq_num); + ep_ctx_write_rsvd(ep0->context, 0); + ep_ctx_write_partial_td(ep0->context, 0); + ep_ctx_write_splitxstate(ep0->context, 0); + ep_ctx_write_seq_num(ep0->context, 0); deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]); @@ -3295,11 +3300,6 @@ static void tegra_xudc_init_event_ring(struct tegra_xudc *xudc) unsigned int i; u32 val; - val = xudc_readl(xudc, SPARAM); - val &= ~(SPARAM_ERSTMAX_MASK); - val |= SPARAM_ERSTMAX(XUDC_NR_EVENT_RINGS); - xudc_writel(xudc, val, SPARAM); - for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) { memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE * sizeof(*xudc->event_ring[i])); @@ -3693,15 +3693,15 @@ static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc) int err; xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, "dev"); - if (IS_ERR(xudc->genpd_dev_device)) { - err = PTR_ERR(xudc->genpd_dev_device); + if (IS_ERR_OR_NULL(xudc->genpd_dev_device)) { + err = PTR_ERR(xudc->genpd_dev_device) ? : -ENODATA; dev_err(dev, "failed to get device power domain: %d\n", err); return err; } xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss"); - if (IS_ERR(xudc->genpd_dev_ss)) { - err = PTR_ERR(xudc->genpd_dev_ss); + if (IS_ERR_OR_NULL(xudc->genpd_dev_ss)) { + err = PTR_ERR(xudc->genpd_dev_ss) ? : -ENODATA; dev_err(dev, "failed to get SuperSpeed power domain: %d\n", err); return err; } diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c index 2df52f75f6b3..7558cc4d90cc 100644 --- a/drivers/usb/host/bcma-hcd.c +++ b/drivers/usb/host/bcma-hcd.c @@ -285,7 +285,7 @@ static void bcma_hci_platform_power_gpio(struct bcma_device *dev, bool val) { struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev); - if (IS_ERR_OR_NULL(usb_dev->gpio_desc)) + if (!usb_dev->gpio_desc) return; gpiod_set_value(usb_dev->gpio_desc, val); @@ -406,9 +406,11 @@ static int bcma_hcd_probe(struct bcma_device *core) return -ENOMEM; usb_dev->core = core; - if (core->dev.of_node) - usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc", - GPIOD_OUT_HIGH); + usb_dev->gpio_desc = devm_gpiod_get_optional(&core->dev, "vcc", + GPIOD_OUT_HIGH); + if (IS_ERR(usb_dev->gpio_desc)) + return dev_err_probe(&core->dev, PTR_ERR(usb_dev->gpio_desc), + "error obtaining VCC GPIO"); switch (core->id.id) { case BCMA_CORE_USB20_HOST: diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index e87cf3a00fa4..638f03b89739 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -21,6 +21,9 @@ static const char hcd_name[] = "ehci-pci"; /* defined here to avoid adding to pci_ids.h for single instance use */ #define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70 +#define PCI_VENDOR_ID_ASPEED 0x1a03 +#define PCI_DEVICE_ID_ASPEED_EHCI 0x2603 + /*-------------------------------------------------------------------------*/ #define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC 0x0939 static inline bool is_intel_quark_x1000(struct pci_dev *pdev) @@ -222,6 +225,12 @@ static int ehci_pci_setup(struct usb_hcd *hcd) ehci->has_synopsys_hc_bug = 1; } break; + case PCI_VENDOR_ID_ASPEED: + if (pdev->device == PCI_DEVICE_ID_ASPEED_EHCI) { + ehci_info(ehci, "applying Aspeed HC workaround\n"); + ehci->is_aspeed = 1; + } + break; } /* optional debug port, normally in the first BAR */ diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c index 6bbaee74f7e7..28a19693c19f 100644 --- a/drivers/usb/host/ehci-ppc-of.c +++ b/drivers/usb/host/ehci-ppc-of.c @@ -148,6 +148,7 @@ static int ehci_hcd_ppc_of_probe(struct platform_device *op) } else { ehci->has_amcc_usb23 = 1; } + of_node_put(np); } if (of_get_property(dn, "big-endian", NULL)) { diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index 3055d9abfec3..3e5c54742bef 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c @@ -1541,10 +1541,12 @@ static int isp116x_remove(struct platform_device *pdev) iounmap(isp116x->data_reg); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - release_mem_region(res->start, 2); + if (res) + release_mem_region(res->start, 2); iounmap(isp116x->addr_reg); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, 2); + if (res) + release_mem_region(res->start, 2); usb_put_hcd(hcd); return 0; diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c index 85878e8ad331..106a6bcefb08 100644 --- a/drivers/usb/host/ohci-nxp.c +++ b/drivers/usb/host/ohci-nxp.c @@ -164,6 +164,7 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev) } isp1301_i2c_client = isp1301_get_client(isp1301_node); + of_node_put(isp1301_node); if (!isp1301_i2c_client) return -EPROBE_DEFER; diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c index 45f7cceb6df3..98e46725999e 100644 --- a/drivers/usb/host/ohci-ppc-of.c +++ b/drivers/usb/host/ohci-ppc-of.c @@ -169,6 +169,7 @@ static int ohci_hcd_ppc_of_probe(struct platform_device *op) release_mem_region(res.start, 0x4); } else pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__); + of_node_put(np); } irq_dispose_mapping(irq); diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index e832909a924f..6df2881cd7b9 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c @@ -3908,8 +3908,10 @@ static int oxu_bus_suspend(struct usb_hcd *hcd) } } + spin_unlock_irq(&oxu->lock); /* turn off now-idle HC */ del_timer_sync(&oxu->watchdog); + spin_lock_irq(&oxu->lock); ehci_halt(oxu); hcd->state = HC_STATE_SUSPENDED; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 57c94c60cecc..721b35612c9a 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1353,7 +1353,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, } spin_unlock_irqrestore(&xhci->lock, flags); if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex], - msecs_to_jiffies(100))) + msecs_to_jiffies(500))) xhci_dbg(xhci, "missing U0 port change event for port %d-%d\n", hcd->self.busnum, wIndex + 1); spin_lock_irqsave(&xhci->lock, flags); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 644ddcb26e76..92218383ba07 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -714,7 +714,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, num_stream_ctxs, &stream_info->ctx_array_dma, mem_flags); if (!stream_info->stream_ctx_array) - goto cleanup_ctx; + goto cleanup_ring_array; memset(stream_info->stream_ctx_array, 0, sizeof(struct xhci_stream_ctx)*num_stream_ctxs); @@ -775,6 +775,11 @@ cleanup_rings: } xhci_free_command(xhci, stream_info->free_streams_command); cleanup_ctx: + xhci_free_stream_ctx(xhci, + stream_info->num_stream_ctxs, + stream_info->stream_ctx_array, + stream_info->ctx_array_dma); +cleanup_ring_array: kfree(stream_info->stream_rings); cleanup_info: kfree(stream_info); @@ -965,15 +970,19 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) if (dev->eps[i].stream_info) xhci_free_stream_info(xhci, dev->eps[i].stream_info); - /* Endpoints on the TT/root port lists should have been removed - * when usb_disable_device() was called for the device. - * We can't drop them anyway, because the udev might have gone - * away by this point, and we can't tell what speed it was. + /* + * Endpoints are normally deleted from the bandwidth list when + * endpoints are dropped, before device is freed. + * If host is dying or being removed then endpoints aren't + * dropped cleanly, so delete the endpoint from list here. + * Only applicable for hosts with software bandwidth checking. */ - if (!list_empty(&dev->eps[i].bw_endpoint_list)) - xhci_warn(xhci, "Slot %u endpoint %u " - "not removed from BW list!\n", - slot_id, i); + + if (!list_empty(&dev->eps[i].bw_endpoint_list)) { + list_del_init(&dev->eps[i].bw_endpoint_list); + xhci_dbg(xhci, "Slot %u endpoint %u not removed from BW list!\n", + slot_id, i); + } } /* If this is a hub, free the TT(s) from the TT list */ xhci_free_tt_info(xhci, dev, slot_id); diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index 5e003baabac9..3bd6d23ab807 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -449,16 +449,17 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset) u32 num_esit, tmp; int base; int i, j; + u8 uframes = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX); num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit; + + if (sch_ep->ep_type == INT_IN_EP || sch_ep->ep_type == ISOC_IN_EP) + offset++; + for (i = 0; i < num_esit; i++) { base = offset + i * sch_ep->esit; - /* - * Compared with hs bus, no matter what ep type, - * the hub will always delay one uframe to send data - */ - for (j = 0; j < sch_ep->cs_count; j++) { + for (j = 0; j < uframes; j++) { tmp = tt->fs_bus_bw[base + j] + sch_ep->bw_cost_per_microframe; if (tmp > FS_PAYLOAD_MAX) return -ESCH_BW_OVERFLOW; @@ -470,11 +471,9 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset) static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset) { - struct mu3h_sch_tt *tt = sch_ep->sch_tt; u32 extra_cs_count; u32 start_ss, last_ss; u32 start_cs, last_cs; - int i; start_ss = offset % 8; @@ -488,10 +487,6 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset) if (!(start_ss == 7 || last_ss < 6)) return -ESCH_SS_Y6; - for (i = 0; i < sch_ep->cs_count; i++) - if (test_bit(offset + i, tt->ss_bit_map)) - return -ESCH_SS_OVERLAP; - } else { u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX); @@ -518,9 +513,6 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset) if (cs_count > 7) cs_count = 7; /* HW limit */ - if (test_bit(offset, tt->ss_bit_map)) - return -ESCH_SS_OVERLAP; - sch_ep->cs_count = cs_count; /* one for ss, the other for idle */ sch_ep->num_budget_microframes = cs_count + 2; @@ -541,28 +533,24 @@ static void update_sch_tt(struct mu3h_sch_ep_info *sch_ep, bool used) struct mu3h_sch_tt *tt = sch_ep->sch_tt; u32 base, num_esit; int bw_updated; - int bits; int i, j; + int offset = sch_ep->offset; + u8 uframes = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX); num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit; - bits = (sch_ep->ep_type == ISOC_OUT_EP) ? sch_ep->cs_count : 1; if (used) bw_updated = sch_ep->bw_cost_per_microframe; else bw_updated = -sch_ep->bw_cost_per_microframe; + if (sch_ep->ep_type == INT_IN_EP || sch_ep->ep_type == ISOC_IN_EP) + offset++; + for (i = 0; i < num_esit; i++) { - base = sch_ep->offset + i * sch_ep->esit; + base = offset + i * sch_ep->esit; - for (j = 0; j < bits; j++) { - if (used) - set_bit(base + j, tt->ss_bit_map); - else - clear_bit(base + j, tt->ss_bit_map); - } - - for (j = 0; j < sch_ep->cs_count; j++) + for (j = 0; j < uframes; j++) tt->fs_bus_bw[base + j] += bw_updated; } diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h index 4ccd08e20a15..607dbe474d91 100644 --- a/drivers/usb/host/xhci-mtk.h +++ b/drivers/usb/host/xhci-mtk.h @@ -20,12 +20,10 @@ #define XHCI_MTK_MAX_ESIT 64 /** - * @ss_bit_map: used to avoid start split microframes overlay * @fs_bus_bw: array to keep track of bandwidth already used for FS * @ep_list: Endpoints using this TT */ struct mu3h_sch_tt { - DECLARE_BITMAP(ss_bit_map, XHCI_MTK_MAX_ESIT); u32 fs_bus_bw[XHCI_MTK_MAX_ESIT]; struct list_head ep_list; }; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index dafb58f05c9f..9168b492c02b 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -58,20 +58,13 @@ #define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13 #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138 -#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_1 0x161a -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_2 0x161b -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 0x161d -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 0x161e -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 0x15d6 -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 0x15d7 -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 0x161c -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8 0x161f #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 @@ -249,6 +242,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) xhci->quirks |= XHCI_MISSING_CAS; + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + (pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI)) + xhci->quirks |= XHCI_RESET_TO_DEFAULT; + if (pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI || @@ -260,8 +258,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI)) + pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI)) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; if (pdev->vendor == PCI_VENDOR_ID_ETRON && @@ -294,8 +291,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && - pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) { + /* + * try to tame the ASMedia 1042 controller which reports 0.96 + * but appears to behave more like 1.0 + */ + xhci->quirks |= XHCI_SPURIOUS_SUCCESS; xhci->quirks |= XHCI_BROKEN_STREAMS; + } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) { xhci->quirks |= XHCI_TRUST_TX_LENGTH; @@ -324,15 +327,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4)) xhci->quirks |= XHCI_NO_SOFT_RETRY; - if (pdev->vendor == PCI_VENDOR_ID_AMD && - (pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_1 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_2 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8)) + /* xHC spec requires PCI devices to support D3hot and D3cold */ + if (xhci->hci_version >= 0x120) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; if (xhci->quirks & XHCI_RESET_ON_RESUME) diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index f76cdb306ea8..1c59275e8737 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -134,7 +134,7 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = { }; static const struct xhci_plat_priv xhci_plat_brcm = { - .quirks = XHCI_RESET_ON_RESUME, + .quirks = XHCI_RESET_ON_RESUME | XHCI_SUSPEND_RESUME_CLKS, }; static const struct of_device_id usb_xhci_of_match[] = { @@ -497,7 +497,16 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev) * xhci_suspend() needs `do_wakeup` to know whether host is allowed * to do wakeup during suspend. */ - return xhci_suspend(xhci, device_may_wakeup(dev)); + ret = xhci_suspend(xhci, device_may_wakeup(dev)); + if (ret) + return ret; + + if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) { + clk_disable_unprepare(xhci->clk); + clk_disable_unprepare(xhci->reg_clk); + } + + return 0; } static int __maybe_unused xhci_plat_resume(struct device *dev) @@ -506,6 +515,11 @@ static int __maybe_unused xhci_plat_resume(struct device *dev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); int ret; + if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) { + clk_prepare_enable(xhci->clk); + clk_prepare_enable(xhci->reg_clk); + } + ret = xhci_priv_resume_quirk(hcd); if (ret) return ret; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index eb148a3629ec..70d8b33afb88 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -3158,6 +3158,8 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) if (event_loop++ < TRBS_PER_SEGMENT / 2) continue; xhci_update_erst_dequeue(xhci, event_ring_deq); + event_ring_deq = xhci->event_ring->dequeue; + event_loop = 0; } diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 50bb91b6a4b8..246a3d274142 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -1042,15 +1042,15 @@ static int tegra_xusb_powerdomain_init(struct device *dev, int err; tegra->genpd_dev_host = dev_pm_domain_attach_by_name(dev, "xusb_host"); - if (IS_ERR(tegra->genpd_dev_host)) { - err = PTR_ERR(tegra->genpd_dev_host); + if (IS_ERR_OR_NULL(tegra->genpd_dev_host)) { + err = PTR_ERR(tegra->genpd_dev_host) ? : -ENODATA; dev_err(dev, "failed to get host pm-domain: %d\n", err); return err; } tegra->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "xusb_ss"); - if (IS_ERR(tegra->genpd_dev_ss)) { - err = PTR_ERR(tegra->genpd_dev_ss); + if (IS_ERR_OR_NULL(tegra->genpd_dev_ss)) { + err = PTR_ERR(tegra->genpd_dev_ss) ? : -ENODATA; dev_err(dev, "failed to get superspeed pm-domain: %d\n", err); return err; } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index d6a8f5ae2d7a..cd344c52c682 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -778,11 +778,28 @@ void xhci_shutdown(struct usb_hcd *hcd) if (xhci->quirks & XHCI_SPURIOUS_REBOOT) usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); + /* Don't poll the roothubs after shutdown. */ + xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", + __func__, hcd->self.busnum); + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); + del_timer_sync(&hcd->rh_timer); + + if (xhci->shared_hcd) { + clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); + del_timer_sync(&xhci->shared_hcd->rh_timer); + } + spin_lock_irq(&xhci->lock); xhci_halt(xhci); - /* Workaround for spurious wakeups at shutdown with HSW */ - if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) + + /* + * Workaround for spurious wakeps at shutdown with HSW, and for boot + * firmware delay in ADL-P PCH if port are left in U3 at shutdown + */ + if (xhci->quirks & XHCI_SPURIOUS_WAKEUP || + xhci->quirks & XHCI_RESET_TO_DEFAULT) xhci_reset(xhci, XHCI_RESET_SHORT_USEC); + spin_unlock_irq(&xhci->lock); xhci_cleanup_msix(xhci); @@ -1150,7 +1167,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) /* re-initialize the HC on Restore Error, or Host Controller Error */ if (temp & (STS_SRE | STS_HCE)) { reinit_xhc = true; - xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); + if (!xhci->broken_suspend) + xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); } if (reinit_xhc) { @@ -1323,7 +1341,6 @@ unsigned int xhci_get_endpoint_address(unsigned int ep_index) unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN; return direction | number; } -EXPORT_SYMBOL_GPL(xhci_get_endpoint_address); /* Find the flag for this endpoint (for use in the control context). Use the * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 16cc027d2560..deebf0872204 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1906,7 +1906,9 @@ struct xhci_hcd { #define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39) #define XHCI_NO_SOFT_RETRY BIT_ULL(40) #define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42) -#define XHCI_U2_BROKEN_SUSPEND BIT_ULL(43) +#define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43) +#define XHCI_RESET_TO_DEFAULT BIT_ULL(44) +#define XHCI_U2_BROKEN_SUSPEND BIT_ULL(45) unsigned int num_active_eps; unsigned int limit_active_eps; @@ -2456,7 +2458,7 @@ static inline const char *xhci_decode_trb(char *str, size_t size, field3 & TRB_CYCLE ? 'C' : 'c'); break; case TRB_STOP_RING: - sprintf(str, + snprintf(str, size, "%s: slot %d sp %d ep %d flags %c", xhci_trb_type_string(type), TRB_TO_SLOT_ID(field3), diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c index e9437a176518..ea39243efee3 100644 --- a/drivers/usb/misc/idmouse.c +++ b/drivers/usb/misc/idmouse.c @@ -177,10 +177,6 @@ static int idmouse_create_image(struct usb_idmouse *dev) bytes_read += bulk_read; } - /* reset the device */ -reset: - ftip_command(dev, FTIP_RELEASE, 0, 0); - /* check for valid image */ /* right border should be black (0x00) */ for (bytes_read = sizeof(HEADER)-1 + WIDTH-1; bytes_read < IMGSIZE; bytes_read += WIDTH) @@ -192,6 +188,10 @@ reset: if (dev->bulk_in_buffer[bytes_read] != 0xFF) return -EAGAIN; + /* reset the device */ +reset: + ftip_command(dev, FTIP_RELEASE, 0, 0); + /* should be IMGSIZE == 65040 */ dev_dbg(&dev->interface->dev, "read %d bytes fingerprint data\n", bytes_read); diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index 748139d26263..0be8efcda15d 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -71,6 +71,7 @@ static void destroy_priv(struct kref *kref) dev_dbg(&priv->usbdev->dev, "destroying priv datastructure\n"); usb_put_dev(priv->usbdev); + priv->usbdev = NULL; kfree(priv); } @@ -736,7 +737,6 @@ static int uss720_probe(struct usb_interface *intf, parport_announce_port(pp); usb_set_intfdata(intf, pp); - usb_put_dev(usbdev); return 0; probe_abort: @@ -754,7 +754,6 @@ static void uss720_disconnect(struct usb_interface *intf) usb_set_intfdata(intf, NULL); if (pp) { priv = pp->private_data; - priv->usbdev = NULL; priv->pp = NULL; dev_dbg(&intf->dev, "parport_remove_port\n"); parport_remove_port(pp); diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index f48a23adbc35..094e812e9e69 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -1268,6 +1268,11 @@ static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) { /* don't do anything here: "fault" will set up page table entries */ vma->vm_ops = &mon_bin_vm_ops; + + if (vma->vm_flags & VM_WRITE) + return -EPERM; + + vma->vm_flags &= ~VM_MAYWRITE; vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = filp->private_data; mon_bin_vma_open(vma); diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c index 04f666e85731..16731324e5d8 100644 --- a/drivers/usb/mtu3/mtu3_dr.c +++ b/drivers/usb/mtu3/mtu3_dr.c @@ -41,10 +41,8 @@ static char *mailbox_state_string(enum mtu3_vbus_id_state state) static void toggle_opstate(struct ssusb_mtk *ssusb) { - if (!ssusb->otg_switch.is_u3_drd) { - mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION); - mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN); - } + mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION); + mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN); } /* only port0 supports dual-role mode */ diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index fb806b33178a..c273eee35aaa 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -760,6 +760,9 @@ static void rxstate(struct musb *musb, struct musb_request *req) musb_writew(epio, MUSB_RXCSR, csr); buffer_aint_mapped: + fifo_count = min_t(unsigned int, + request->length - request->actual, + (unsigned int)fifo_count); musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) (request->buf + request->actual)); request->actual += fifo_count; diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 4232f1ce3fbf..1d435e4ee857 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -360,6 +360,7 @@ static int omap2430_probe(struct platform_device *pdev) control_node = of_parse_phandle(np, "ctrl-module", 0); if (control_node) { control_pdev = of_find_device_by_node(control_node); + of_node_put(control_node); if (!control_pdev) { dev_err(&pdev->dev, "Failed to get control device\n"); ret = -EINVAL; diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c index 661a229c105d..34b9f8140187 100644 --- a/drivers/usb/phy/phy-generic.c +++ b/drivers/usb/phy/phy-generic.c @@ -268,6 +268,13 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop) return -EPROBE_DEFER; } + nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus"); + if (PTR_ERR(nop->vbus_draw) == -ENODEV) + nop->vbus_draw = NULL; + if (IS_ERR(nop->vbus_draw)) + return dev_err_probe(dev, PTR_ERR(nop->vbus_draw), + "could not get vbus regulator\n"); + nop->dev = dev; nop->phy.dev = nop->dev; nop->phy.label = "nop-xceiv"; diff --git a/drivers/usb/renesas_usbhs/rza.c b/drivers/usb/renesas_usbhs/rza.c index 24de64edb674..2d77edefb4b3 100644 --- a/drivers/usb/renesas_usbhs/rza.c +++ b/drivers/usb/renesas_usbhs/rza.c @@ -23,6 +23,10 @@ static int usbhs_rza1_hardware_init(struct platform_device *pdev) extal_clk = of_find_node_by_name(NULL, "extal"); of_property_read_u32(usb_x1_clk, "clock-frequency", &freq_usb); of_property_read_u32(extal_clk, "clock-frequency", &freq_extal); + + of_node_put(usb_x1_clk); + of_node_put(extal_clk); + if (freq_usb == 0) { if (freq_extal == 12000000) { /* Select 12MHz XTAL */ diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index a2a38fc76ca5..97a250e75ab7 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -97,7 +97,10 @@ struct ch341_private { u8 mcr; u8 msr; u8 lcr; + unsigned long quirks; + u8 version; + unsigned long break_end; }; @@ -256,8 +259,12 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev, /* * CH341A buffers data until a full endpoint-size packet (32 bytes) * has been received unless bit 7 is set. + * + * At least one device with version 0x27 appears to have this bit + * inverted. */ - val |= BIT(7); + if (priv->version > 0x27) + val |= BIT(7); r = ch341_control_out(dev, CH341_REQ_WRITE_REG, CH341_REG_DIVISOR << 8 | CH341_REG_PRESCALER, @@ -271,6 +278,9 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev, * (stop bits, parity and word length). Version 0x30 and above use * CH341_REG_LCR only and CH341_REG_LCR2 is always set to zero. */ + if (priv->version < 0x30) + return 0; + r = ch341_control_out(dev, CH341_REQ_WRITE_REG, CH341_REG_LCR2 << 8 | CH341_REG_LCR, lcr); if (r) @@ -323,7 +333,9 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv) r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size); if (r < 0) goto out; - dev_dbg(&dev->dev, "Chip version: 0x%02x\n", buffer[0]); + + priv->version = buffer[0]; + dev_dbg(&dev->dev, "Chip version: 0x%02x\n", priv->version); r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0); if (r < 0) diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 7ac668023da8..8a4a0d4dbc13 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -134,6 +134,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */ { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */ { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ + { USB_DEVICE(0x10C4, 0x8414) }, /* Decagon USB Cable Adapter */ { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ @@ -198,6 +199,10 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */ { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */ + { USB_DEVICE(0x17A8, 0x0011) }, /* Kamstrup 444 MHz RF sniffer */ + { USB_DEVICE(0x17A8, 0x0013) }, /* Kamstrup 870 MHz RF sniffer */ + { USB_DEVICE(0x17A8, 0x0101) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Int Ant) */ + { USB_DEVICE(0x17A8, 0x0102) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Ext Ant) */ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c index 0c7eacc630e0..11fe49543f26 100644 --- a/drivers/usb/serial/f81232.c +++ b/drivers/usb/serial/f81232.c @@ -130,9 +130,6 @@ static u8 const clock_table[] = { F81232_CLK_1_846_MHZ, F81232_CLK_14_77_MHZ, static int calc_baud_divisor(speed_t baudrate, speed_t clockrate) { - if (!baudrate) - return 0; - return DIV_ROUND_CLOSEST(clockrate, baudrate); } @@ -523,9 +520,14 @@ static void f81232_set_baudrate(struct tty_struct *tty, speed_t baud_list[] = { baudrate, old_baudrate, F81232_DEF_BAUDRATE }; for (i = 0; i < ARRAY_SIZE(baud_list); ++i) { - idx = f81232_find_clk(baud_list[i]); + baudrate = baud_list[i]; + if (baudrate == 0) { + tty_encode_baud_rate(tty, 0, 0); + return; + } + + idx = f81232_find_clk(baudrate); if (idx >= 0) { - baudrate = baud_list[i]; tty_encode_baud_rate(tty, baudrate, baudrate); break; } diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c index 5661fd03e545..e952be683c63 100644 --- a/drivers/usb/serial/f81534.c +++ b/drivers/usb/serial/f81534.c @@ -538,9 +538,6 @@ static int f81534_submit_writer(struct usb_serial_port *port, gfp_t mem_flags) static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate) { - if (!baudrate) - return 0; - /* Round to nearest divisor */ return DIV_ROUND_CLOSEST(clockrate, baudrate); } @@ -570,9 +567,14 @@ static int f81534_set_port_config(struct usb_serial_port *port, u32 baud_list[] = {baudrate, old_baudrate, F81534_DEFAULT_BAUD_RATE}; for (i = 0; i < ARRAY_SIZE(baud_list); ++i) { - idx = f81534_find_clk(baud_list[i]); + baudrate = baud_list[i]; + if (baudrate == 0) { + tty_encode_baud_rate(tty, 0, 0); + return 0; + } + + idx = f81534_find_clk(baudrate); if (idx >= 0) { - baudrate = baud_list[i]; tty_encode_baud_rate(tty, baudrate, baudrate); break; } diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index b74621dc2a65..3bfa395c3112 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1023,6 +1023,9 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) }, + /* Belimo Automation devices */ + { USB_DEVICE(FTDI_VID, BELIMO_ZTH_PID) }, + { USB_DEVICE(FTDI_VID, BELIMO_ZIP_PID) }, /* ICP DAS I-756xU devices */ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) }, { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, @@ -1042,6 +1045,8 @@ static const struct usb_device_id id_table_combined[] = { /* IDS GmbH devices */ { USB_DEVICE(IDS_VID, IDS_SI31A_PID) }, { USB_DEVICE(IDS_VID, IDS_CM31A_PID) }, + /* Omron devices */ + { USB_DEVICE(OMRON_VID, OMRON_CS1W_CIF31_PID) }, /* U-Blox devices */ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) }, { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) }, @@ -1315,8 +1320,7 @@ static u32 get_ftdi_divisor(struct tty_struct *tty, case 38400: div_value = ftdi_sio_b38400; break; case 57600: div_value = ftdi_sio_b57600; break; case 115200: div_value = ftdi_sio_b115200; break; - } /* baud */ - if (div_value == 0) { + default: dev_dbg(dev, "%s - Baudrate (%d) requested is not supported\n", __func__, baud); div_value = ftdi_sio_b9600; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index d1a9564697a4..31c8ccabbbb7 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -661,6 +661,12 @@ #define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */ #define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */ +/* + * Omron corporation (https://www.omron.com) + */ + #define OMRON_VID 0x0590 + #define OMRON_CS1W_CIF31_PID 0x00b2 + /* * Acton Research Corp. */ @@ -1568,6 +1574,12 @@ #define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */ #define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */ +/* + * Belimo Automation + */ +#define BELIMO_ZTH_PID 0x8050 +#define BELIMO_ZIP_PID 0xC811 + /* * Unjo AB */ diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index c327d4cf7928..03bcab3b9bd0 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -168,6 +168,7 @@ static const struct usb_device_id edgeport_2port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) }, + { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) }, { } }; @@ -206,6 +207,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) }, + { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) }, { } }; diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h index 52cbc353051f..9a6f742ad3ab 100644 --- a/drivers/usb/serial/io_usbvend.h +++ b/drivers/usb/serial/io_usbvend.h @@ -212,6 +212,7 @@ // // Definitions for other product IDs #define ION_DEVICE_ID_MT4X56USB 0x1403 // OEM device +#define ION_DEVICE_ID_E5805A 0x1A01 // OEM device (rebranded Edgeport/4) #define GENERATION_ID_FROM_USB_PRODUCT_ID(ProductId) \ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index b878f4c87fee..5636b8f52216 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -162,6 +162,8 @@ static void option_instat_callback(struct urb *urb); #define NOVATELWIRELESS_PRODUCT_G2 0xA010 #define NOVATELWIRELESS_PRODUCT_MC551 0xB001 +#define UBLOX_VENDOR_ID 0x1546 + /* AMOI PRODUCTS */ #define AMOI_VENDOR_ID 0x1614 #define AMOI_PRODUCT_H01 0x0800 @@ -240,7 +242,6 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_UC15 0x9090 /* These u-blox products use Qualcomm's vendor ID */ #define UBLOX_PRODUCT_R410M 0x90b2 -#define UBLOX_PRODUCT_R6XX 0x90fa /* These Yuga products use Qualcomm's vendor ID */ #define YUGA_PRODUCT_CLM920_NC5 0x9625 @@ -252,10 +253,15 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EG95 0x0195 #define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_EP06 0x0306 +#define QUECTEL_PRODUCT_EM05G 0x030a +#define QUECTEL_PRODUCT_EM060K 0x030b +#define QUECTEL_PRODUCT_EM05G_SG 0x0311 #define QUECTEL_PRODUCT_EM12 0x0512 #define QUECTEL_PRODUCT_RM500Q 0x0800 +#define QUECTEL_PRODUCT_RM520N 0x0801 #define QUECTEL_PRODUCT_EC200S_CN 0x6002 #define QUECTEL_PRODUCT_EC200T 0x6026 +#define QUECTEL_PRODUCT_RM500K 0x7001 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -432,6 +438,12 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_CLS8 0x00b0 #define CINTERION_PRODUCT_MV31_MBIM 0x00b3 #define CINTERION_PRODUCT_MV31_RMNET 0x00b7 +#define CINTERION_PRODUCT_MV31_2_MBIM 0x00b8 +#define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9 +#define CINTERION_PRODUCT_MV32_WA 0x00f1 +#define CINTERION_PRODUCT_MV32_WB 0x00f2 +#define CINTERION_PRODUCT_MV32_WA_RMNET 0x00f3 +#define CINTERION_PRODUCT_MV32_WB_RMNET 0x00f4 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c @@ -567,6 +579,13 @@ static void option_instat_callback(struct urb *urb); #define WETELECOM_PRODUCT_6802 0x6802 #define WETELECOM_PRODUCT_WMD300 0x6803 +/* OPPO products */ +#define OPPO_VENDOR_ID 0x22d9 +#define OPPO_PRODUCT_R11 0x276c + +/* Sierra Wireless products */ +#define SIERRA_VENDOR_ID 0x1199 +#define SIERRA_PRODUCT_EM9191 0x90d3 /* Device flags */ @@ -1110,8 +1129,16 @@ static const struct usb_device_id option_ids[] = { /* u-blox products using Qualcomm vendor ID */ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), .driver_info = RSVD(1) | RSVD(3) }, - { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX), + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x908b), /* u-blox LARA-R6 00B */ + .driver_info = RSVD(4) }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x90fa), .driver_info = RSVD(3) }, + /* u-blox products */ + { USB_DEVICE(UBLOX_VENDOR_ID, 0x1341) }, /* u-blox LARA-L6 */ + { USB_DEVICE(UBLOX_VENDOR_ID, 0x1342), /* u-blox LARA-L6 (RMNET) */ + .driver_info = RSVD(4) }, + { USB_DEVICE(UBLOX_VENDOR_ID, 0x1343), /* u-blox LARA-L6 (ECM) */ + .driver_info = RSVD(4) }, /* Quectel products using Quectel vendor ID */ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff), .driver_info = NUMEP2 }, @@ -1125,22 +1152,37 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff), .driver_info = NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */ + .driver_info = ZLP }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff), + .driver_info = RSVD(6) | ZLP }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_SG, 0xff), + .driver_info = RSVD(6) | ZLP }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */ + .driver_info = RSVD(3) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), .driver_info = ZLP }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, @@ -1217,6 +1259,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */ .driver_info = NCTRL(2) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1057, 0xff), /* Telit FN980 */ + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1058, 0xff), /* Telit FN980 (PCIe) */ + .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff), /* Telit LN920 (rmnet) */ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff), /* Telit LN920 (MBIM) */ @@ -1233,6 +1279,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */ .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */ + .driver_info = RSVD(0) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), @@ -1267,6 +1315,7 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */ .driver_info = NCTRL(2) | RSVD(3) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x1250, 0xff, 0x00, 0x00) }, /* Telit LE910Cx (rmnet) */ { USB_DEVICE(TELIT_VENDOR_ID, 0x1260), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x1261), @@ -1969,6 +2018,18 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(3)}, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), .driver_info = RSVD(0)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_MBIM, 0xff), + .driver_info = RSVD(3)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_RMNET, 0xff), + .driver_info = RSVD(0)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff), + .driver_info = RSVD(3)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA_RMNET, 0xff), + .driver_info = RSVD(0) }, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff), + .driver_info = RSVD(3)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB_RMNET, 0xff), + .driver_info = RSVD(0) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), .driver_info = RSVD(4) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), @@ -2111,12 +2172,17 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(3) }, { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, + { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */ + { USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */ { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ .driver_info = RSVD(4) | RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ .driver_info = RSVD(6) }, + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0106, 0xff) }, /* Fibocom MA510 (ECM mode w/ diag intf.) */ + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) }, /* Fibocom MA510 (ECM mode) */ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */ + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */ @@ -2125,6 +2191,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ + { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index d736822e95e1..16118d9f2392 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -106,6 +106,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LM930_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) }, { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index c5406452b774..732f9b13ad5d 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -135,6 +135,7 @@ #define HP_TD620_PRODUCT_ID 0x0956 #define HP_LD960_PRODUCT_ID 0x0b39 #define HP_LD381_PRODUCT_ID 0x0f7f +#define HP_LM930_PRODUCT_ID 0x0f9b #define HP_LCM220_PRODUCT_ID 0x3139 #define HP_LCM960_PRODUCT_ID 0x3239 #define HP_LD220_PRODUCT_ID 0x3524 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index c18bf8164bc2..b1e844bf31f8 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -166,6 +166,8 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */ {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */ {DEVICE_SWI(0x1199, 0x90d2)}, /* Sierra Wireless EM9191 QDL */ + {DEVICE_SWI(0x1199, 0xc080)}, /* Sierra Wireless EM7590 QDL */ + {DEVICE_SWI(0x1199, 0xc081)}, /* Sierra Wireless EM7590 */ {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ @@ -175,6 +177,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ + {DEVICE_SWI(0x413c, 0x81c2)}, /* Dell Wireless 5811e */ {DEVICE_SWI(0x413c, 0x81cb)}, /* Dell Wireless 5816e QDL */ {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */ {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 57fc3c31712e..018a27d879b8 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -737,7 +737,8 @@ static void sierra_close(struct usb_serial_port *port) /* * Need to take susp_lock to make sure port is not already being - * resumed, but no need to hold it due to initialized + * resumed, but no need to hold it due to the tty-port initialized + * flag. */ spin_lock_irq(&intfdata->susp_lock); if (--intfdata->open_ports == 0) diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 27e3bb58c872..e8dd4603b201 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -254,7 +254,7 @@ static int serial_open(struct tty_struct *tty, struct file *filp) * * Shut down a USB serial port. Serialized against activate by the * tport mutex and kept to matching open/close pairs - * of calls by the initialized flag. + * of calls by the tty-port initialized flag. * * Not called if tty is console. */ diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index b2285d5a869d..628a75d1232a 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -435,7 +435,8 @@ void usb_wwan_close(struct usb_serial_port *port) /* * Need to take susp_lock to make sure port is not already being - * resumed, but no need to hold it due to initialized + * resumed, but no need to hold it due to the tty-port initialized + * flag. */ spin_lock_irq(&intfdata->susp_lock); if (--intfdata->open_ports == 0) diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index ca3bd58f2025..5576cfa50bc8 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -599,9 +599,8 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command, switch (command) { case WHITEHEAT_GET_DTR_RTS: info = usb_get_serial_port_data(port); - memcpy(&info->mcr, command_info->result_buffer, - sizeof(struct whiteheat_dr_info)); - break; + info->mcr = command_info->result_buffer[0]; + break; } } exit: diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c index 05cec81dcd3f..38ddfedef629 100644 --- a/drivers/usb/storage/karma.c +++ b/drivers/usb/storage/karma.c @@ -174,24 +174,25 @@ static void rio_karma_destructor(void *extra) static int rio_karma_init(struct us_data *us) { - int ret = 0; struct karma_data *data = kzalloc(sizeof(struct karma_data), GFP_NOIO); if (!data) - goto out; + return -ENOMEM; data->recv = kmalloc(RIO_RECV_LEN, GFP_NOIO); if (!data->recv) { kfree(data); - goto out; + return -ENOMEM; } us->extra = data; us->extra_destructor = rio_karma_destructor; - ret = rio_karma_send_command(RIO_ENTER_STORAGE, us); - data->in_storage = (ret == 0); -out: - return ret; + if (rio_karma_send_command(RIO_ENTER_STORAGE, us)) + return -EIO; + + data->in_storage = 1; + + return 0; } static struct scsi_host_template karma_host_template; diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 4a8399abfea3..ede0d8c9b3d0 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1281,12 +1281,6 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999, USB_SC_RBC, USB_PR_BULK, NULL, 0 ), -UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100, - "Samsung", - "Flash Drive FIT", - USB_SC_DEVICE, USB_PR_DEVICE, NULL, - US_FL_MAX_SECTORS_64), - /* aeb */ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, "Feiya", @@ -2300,6 +2294,13 @@ UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ), +/* Reported by Witold Lipieta */ +UNUSUAL_DEV( 0x1fc9, 0x0117, 0x0100, 0x0100, + "NXP Semiconductors", + "PN7462AU", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_RESIDUE ), + /* Supplied with some Castlewood ORB removable drives */ UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999, "Double-H Technology", diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 1fcb871c6919..d009e69b700e 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -52,6 +52,13 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME), +/* Reported-by: Hongling Zeng */ +UNUSUAL_DEV(0x090c, 0x2000, 0x0000, 0x9999, + "Hiksemi", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI * commands in UAS mode. Observed with the 1.28 firmware; are there others? @@ -62,6 +69,13 @@ UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_UAS), +/* Reported-by: Tom Hu */ +UNUSUAL_DEV(0x0b05, 0x1932, 0x0000, 0x9999, + "ASUS", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + UNUSUAL_DEV(0x0bc2, 0x2321, 0x0000, 0x9999, "Seagate", "Expansion HDD", @@ -75,6 +89,13 @@ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_REPORT_LUNS), +/* Reported-by: Hongling Zeng */ +UNUSUAL_DEV(0x0bda, 0x9210, 0x0000, 0x9999, + "Hiksemi", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* Reported-by: Benjamin Tissoires */ UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, "Initio Corporation", @@ -129,6 +150,13 @@ UNUSUAL_DEV(0x174c, 0x55aa, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES), +/* Reported-by: Hongling Zeng */ +UNUSUAL_DEV(0x17ef, 0x3899, 0x0000, 0x9999, + "Thinkplus", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* Reported-by: Hans de Goede */ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, "VIA", diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index 998e98a896e1..cb32a838ea5c 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -1776,6 +1776,7 @@ void typec_set_pwr_opmode(struct typec_port *port, partner->usb_pd = 1; sysfs_notify(&partner_dev->kobj, NULL, "supports_usb_power_delivery"); + kobject_uevent(&partner_dev->kobj, KOBJ_CHANGE); } put_device(partner_dev); } diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c index b9035c3407b5..a6e5028105d4 100644 --- a/drivers/usb/typec/mux.c +++ b/drivers/usb/typec/mux.c @@ -127,8 +127,11 @@ typec_switch_register(struct device *parent, sw->dev.class = &typec_mux_class; sw->dev.type = &typec_switch_dev_type; sw->dev.driver_data = desc->drvdata; - dev_set_name(&sw->dev, "%s-switch", - desc->name ? desc->name : dev_name(parent)); + ret = dev_set_name(&sw->dev, "%s-switch", desc->name ? desc->name : dev_name(parent)); + if (ret) { + put_device(&sw->dev); + return ERR_PTR(ret); + } ret = device_add(&sw->dev); if (ret) { @@ -331,8 +334,11 @@ typec_mux_register(struct device *parent, const struct typec_mux_desc *desc) mux->dev.class = &typec_mux_class; mux->dev.type = &typec_mux_dev_type; mux->dev.driver_data = desc->drvdata; - dev_set_name(&mux->dev, "%s-mux", - desc->name ? desc->name : dev_name(parent)); + ret = dev_set_name(&mux->dev, "%s-mux", desc->name ? desc->name : dev_name(parent)); + if (ret) { + put_device(&mux->dev); + return ERR_PTR(ret); + } ret = device_add(&mux->dev); if (ret) { diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c index 31b1b3b555c7..271e2740992f 100644 --- a/drivers/usb/typec/mux/intel_pmc_mux.c +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -83,8 +83,6 @@ enum { /* * Input Output Manager (IOM) PORT STATUS */ -#define IOM_PORT_STATUS_OFFSET 0x560 - #define IOM_PORT_STATUS_ACTIVITY_TYPE_MASK GENMASK(9, 6) #define IOM_PORT_STATUS_ACTIVITY_TYPE_SHIFT 6 #define IOM_PORT_STATUS_ACTIVITY_TYPE_USB 0x03 @@ -144,6 +142,7 @@ struct pmc_usb { struct pmc_usb_port *port; struct acpi_device *iom_adev; void __iomem *iom_base; + u32 iom_port_status_offset; }; static void update_port_status(struct pmc_usb_port *port) @@ -153,7 +152,8 @@ static void update_port_status(struct pmc_usb_port *port) /* SoC expects the USB Type-C port numbers to start with 0 */ port_num = port->usb3_port - 1; - port->iom_status = readl(port->pmc->iom_base + IOM_PORT_STATUS_OFFSET + + port->iom_status = readl(port->pmc->iom_base + + port->pmc->iom_port_status_offset + port_num * sizeof(u32)); } @@ -352,13 +352,24 @@ pmc_usb_mux_usb4(struct pmc_usb_port *port, struct typec_mux_state *state) return pmc_usb_command(port, (void *)&req, sizeof(req)); } -static int pmc_usb_mux_safe_state(struct pmc_usb_port *port) +static int pmc_usb_mux_safe_state(struct pmc_usb_port *port, + struct typec_mux_state *state) { u8 msg; if (IOM_PORT_ACTIVITY_IS(port->iom_status, SAFE_MODE)) return 0; + if ((IOM_PORT_ACTIVITY_IS(port->iom_status, DP) || + IOM_PORT_ACTIVITY_IS(port->iom_status, DP_MFD)) && + state->alt && state->alt->svid == USB_TYPEC_DP_SID) + return 0; + + if ((IOM_PORT_ACTIVITY_IS(port->iom_status, TBT) || + IOM_PORT_ACTIVITY_IS(port->iom_status, ALT_MODE_TBT_USB)) && + state->alt && state->alt->svid == USB_TYPEC_TBT_SID) + return 0; + msg = PMC_USB_SAFE_MODE; msg |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; @@ -426,7 +437,7 @@ pmc_usb_mux_set(struct typec_mux *mux, struct typec_mux_state *state) return 0; if (state->mode == TYPEC_STATE_SAFE) - return pmc_usb_mux_safe_state(port); + return pmc_usb_mux_safe_state(port, state); if (state->mode == TYPEC_STATE_USB) return pmc_usb_connect(port, port->role); @@ -554,19 +565,42 @@ err_unregister_switch: static int is_memory(struct acpi_resource *res, void *data) { - struct resource r; + struct resource_win win = {}; + struct resource *r = &win.res; - return !acpi_dev_resource_memory(res, &r); + return !(acpi_dev_resource_memory(res, r) || + acpi_dev_resource_address_space(res, &win)); } +/* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */ +static const struct acpi_device_id iom_acpi_ids[] = { + /* TigerLake */ + { "INTC1072", 0x560, }, + + /* AlderLake */ + { "INTC1079", 0x160, }, + + /* Meteor Lake */ + { "INTC107A", 0x160, }, + {} +}; + static int pmc_usb_probe_iom(struct pmc_usb *pmc) { struct list_head resource_list; struct resource_entry *rentry; - struct acpi_device *adev; + static const struct acpi_device_id *dev_id; + struct acpi_device *adev = NULL; int ret; - adev = acpi_dev_get_first_match_dev("INTC1072", NULL, -1); + for (dev_id = &iom_acpi_ids[0]; dev_id->id[0]; dev_id++) { + if (acpi_dev_present(dev_id->id, NULL, -1)) { + pmc->iom_port_status_offset = (u32)dev_id->driver_data; + adev = acpi_dev_get_first_match_dev(dev_id->id, NULL, -1); + break; + } + } + if (!adev) return -ENODEV; diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig index 0751e9b8b70d..c7795fdd5047 100644 --- a/drivers/usb/typec/tcpm/Kconfig +++ b/drivers/usb/typec/tcpm/Kconfig @@ -70,7 +70,6 @@ config TYPEC_WCOVE tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver" depends on ACPI depends on MFD_INTEL_PMC_BXT - depends on INTEL_SOC_PMIC depends on BXT_WC_PMIC_OPREGION help This driver adds support for USB Type-C on Intel Broxton platforms diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index f7141b135270..17cea37c73fd 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -85,9 +85,25 @@ static int tcpci_write16(struct tcpci *tcpci, unsigned int reg, u16 val) static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc) { struct tcpci *tcpci = tcpc_to_tcpci(tcpc); + bool vconn_pres; + enum typec_cc_polarity polarity = TYPEC_POLARITY_CC1; unsigned int reg; int ret; + ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, ®); + if (ret < 0) + return ret; + + vconn_pres = !!(reg & TCPC_POWER_STATUS_VCONN_PRES); + if (vconn_pres) { + ret = regmap_read(tcpci->regmap, TCPC_TCPC_CTRL, ®); + if (ret < 0) + return ret; + + if (reg & TCPC_TCPC_CTRL_ORIENTATION) + polarity = TYPEC_POLARITY_CC2; + } + switch (cc) { case TYPEC_CC_RA: reg = (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC1_SHIFT) | @@ -122,6 +138,16 @@ static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc) break; } + if (vconn_pres) { + if (polarity == TYPEC_POLARITY_CC2) { + reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT); + reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT); + } else { + reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT); + reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT); + } + } + ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg); if (ret < 0) return ret; @@ -878,7 +904,7 @@ static int tcpci_remove(struct i2c_client *client) /* Disable chip interrupts before unregistering port */ err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, 0); if (err < 0) - return err; + dev_warn(&client->dev, "Failed to disable irqs (%pe)\n", ERR_PTR(err)); tcpci_unregister_port(chip->tcpci); diff --git a/drivers/usb/typec/tcpm/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h index 2be7a77d400e..b2edd45f13c6 100644 --- a/drivers/usb/typec/tcpm/tcpci.h +++ b/drivers/usb/typec/tcpm/tcpci.h @@ -98,6 +98,7 @@ #define TCPC_POWER_STATUS_SOURCING_VBUS BIT(4) #define TCPC_POWER_STATUS_VBUS_DET BIT(3) #define TCPC_POWER_STATUS_VBUS_PRES BIT(2) +#define TCPC_POWER_STATUS_VCONN_PRES BIT(1) #define TCPC_POWER_STATUS_SINKING_VBUS BIT(0) #define TCPC_FAULT_STATUS 0x1f diff --git a/drivers/usb/typec/tcpm/tcpci_mt6360.c b/drivers/usb/typec/tcpm/tcpci_mt6360.c index f1bd9e09bc87..8a952eaf9016 100644 --- a/drivers/usb/typec/tcpm/tcpci_mt6360.c +++ b/drivers/usb/typec/tcpm/tcpci_mt6360.c @@ -15,6 +15,9 @@ #include "tcpci.h" +#define MT6360_REG_PHYCTRL1 0x80 +#define MT6360_REG_PHYCTRL3 0x82 +#define MT6360_REG_PHYCTRL7 0x86 #define MT6360_REG_VCONNCTRL1 0x8C #define MT6360_REG_MODECTRL2 0x8F #define MT6360_REG_SWRESET 0xA0 @@ -22,6 +25,8 @@ #define MT6360_REG_DRPCTRL1 0xA2 #define MT6360_REG_DRPCTRL2 0xA3 #define MT6360_REG_I2CTORST 0xBF +#define MT6360_REG_PHYCTRL11 0xCA +#define MT6360_REG_RXCTRL1 0xCE #define MT6360_REG_RXCTRL2 0xCF #define MT6360_REG_CTDCTRL2 0xEC @@ -106,6 +111,27 @@ static int mt6360_tcpc_init(struct tcpci *tcpci, struct tcpci_data *tdata) if (ret) return ret; + /* BMC PHY */ + ret = mt6360_tcpc_write16(regmap, MT6360_REG_PHYCTRL1, 0x3A70); + if (ret) + return ret; + + ret = regmap_write(regmap, MT6360_REG_PHYCTRL3, 0x82); + if (ret) + return ret; + + ret = regmap_write(regmap, MT6360_REG_PHYCTRL7, 0x36); + if (ret) + return ret; + + ret = mt6360_tcpc_write16(regmap, MT6360_REG_PHYCTRL11, 0x3C60); + if (ret) + return ret; + + ret = regmap_write(regmap, MT6360_REG_RXCTRL1, 0xE8); + if (ret) + return ret; + /* Set shipping mode off, AUTOIDLE on */ return regmap_write(regmap, MT6360_REG_MODECTRL2, 0x7A); } diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 534c233eadea..788eb05a193c 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -5383,6 +5383,10 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port) case PR_SWAP_SNK_SRC_SOURCE_ON: /* Do nothing, vsafe0v is expected during transition */ break; + case SNK_ATTACH_WAIT: + case SNK_DEBOUNCED: + /*Do nothing, still waiting for VSAFE5V for connect */ + break; default: if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled) tcpm_set_state(port, SNK_UNATTACHED, 0); diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 0c4a0f1ac044..516d00affa82 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -76,6 +76,10 @@ static int ucsi_read_error(struct ucsi *ucsi) if (ret) return ret; + ret = ucsi_acknowledge_command(ucsi); + if (ret) + return ret; + switch (error) { case UCSI_ERROR_INCOMPATIBLE_PARTNER: return -EOPNOTSUPP; @@ -511,8 +515,6 @@ static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner, num_pdos * sizeof(u32)); if (ret < 0) dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret); - if (ret == 0 && offset == 0) - dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n"); return ret; } @@ -955,6 +957,8 @@ static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role) role == TYPEC_HOST)) goto out_unlock; + reinit_completion(&con->complete); + command = UCSI_SET_UOR | UCSI_CONNECTOR_NUMBER(con->num); command |= UCSI_SET_UOR_ROLE(role); command |= UCSI_SET_UOR_ACCEPT_ROLE_SWAPS; @@ -962,14 +966,18 @@ static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role) if (ret < 0) goto out_unlock; + mutex_unlock(&con->lock); + if (!wait_for_completion_timeout(&con->complete, - msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) - ret = -ETIMEDOUT; + msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) + return -ETIMEDOUT; + + return 0; out_unlock: mutex_unlock(&con->lock); - return ret < 0 ? ret : 0; + return ret; } static int ucsi_pr_swap(struct typec_port *port, enum typec_role role) @@ -991,6 +999,8 @@ static int ucsi_pr_swap(struct typec_port *port, enum typec_role role) if (cur_role == role) goto out_unlock; + reinit_completion(&con->complete); + command = UCSI_SET_PDR | UCSI_CONNECTOR_NUMBER(con->num); command |= UCSI_SET_PDR_ROLE(role); command |= UCSI_SET_PDR_ACCEPT_ROLE_SWAPS; @@ -998,11 +1008,13 @@ static int ucsi_pr_swap(struct typec_port *port, enum typec_role role) if (ret < 0) goto out_unlock; + mutex_unlock(&con->lock); + if (!wait_for_completion_timeout(&con->complete, - msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) { - ret = -ETIMEDOUT; - goto out_unlock; - } + msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) + return -ETIMEDOUT; + + mutex_lock(&con->lock); /* Something has gone wrong while swapping the role */ if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) != @@ -1049,6 +1061,15 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) con->num = index + 1; con->ucsi = ucsi; + cap->fwnode = ucsi_find_fwnode(con); + con->usb_role_sw = fwnode_usb_role_switch_get(cap->fwnode); + if (IS_ERR(con->usb_role_sw)) { + dev_err(ucsi->dev, "con%d: failed to get usb role switch\n", + con->num); + return PTR_ERR(con->usb_role_sw); + } + + /* Delay other interactions with the con until registration is complete */ mutex_lock(&con->lock); @@ -1084,7 +1105,6 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DEBUG_ACCESSORY) *accessory = TYPEC_ACCESSORY_DEBUG; - cap->fwnode = ucsi_find_fwnode(con); cap->driver_data = con; cap->ops = &ucsi_ops; @@ -1142,13 +1162,6 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) ucsi_port_psy_changed(con); } - con->usb_role_sw = fwnode_usb_role_switch_get(cap->fwnode); - if (IS_ERR(con->usb_role_sw)) { - dev_err(ucsi->dev, "con%d: failed to get usb role switch\n", - con->num); - con->usb_role_sw = NULL; - } - /* Only notify USB controller if partner supports USB data */ if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) & UCSI_CONSTAT_PARTNER_FLAG_USB)) u_role = USB_ROLE_NONE; @@ -1261,12 +1274,21 @@ err: static void ucsi_init_work(struct work_struct *work) { - struct ucsi *ucsi = container_of(work, struct ucsi, work); + struct ucsi_android *aucsi = container_of(work, + struct ucsi_android, work.work); int ret; - ret = ucsi_init(ucsi); + ret = ucsi_init(&aucsi->ucsi); if (ret) - dev_err(ucsi->dev, "PPM init failed (%d)\n", ret); + dev_err(aucsi->ucsi.dev, "PPM init failed (%d)\n", ret); + + if (ret == -EPROBE_DEFER) { + if (aucsi->work_count++ > UCSI_ROLE_SWITCH_WAIT_COUNT) + return; + + queue_delayed_work(system_long_wq, &aucsi->work, + UCSI_ROLE_SWITCH_INTERVAL); + } } /** @@ -1298,15 +1320,17 @@ EXPORT_SYMBOL_GPL(ucsi_set_drvdata); struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops) { struct ucsi *ucsi; + struct ucsi_android *aucsi; if (!ops || !ops->read || !ops->sync_write || !ops->async_write) return ERR_PTR(-EINVAL); - ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL); - if (!ucsi) + aucsi = kzalloc(sizeof(*aucsi), GFP_KERNEL); + if (!aucsi) return ERR_PTR(-ENOMEM); - INIT_WORK(&ucsi->work, ucsi_init_work); + ucsi = &aucsi->ucsi; + INIT_DELAYED_WORK(&aucsi->work, ucsi_init_work); mutex_init(&ucsi->ppm_lock); ucsi->dev = dev; ucsi->ops = ops; @@ -1321,7 +1345,9 @@ EXPORT_SYMBOL_GPL(ucsi_create); */ void ucsi_destroy(struct ucsi *ucsi) { - kfree(ucsi); + struct ucsi_android *aucsi = container_of(ucsi, + struct ucsi_android, ucsi); + kfree(aucsi); } EXPORT_SYMBOL_GPL(ucsi_destroy); @@ -1331,6 +1357,8 @@ EXPORT_SYMBOL_GPL(ucsi_destroy); */ int ucsi_register(struct ucsi *ucsi) { + struct ucsi_android *aucsi = container_of(ucsi, + struct ucsi_android, ucsi); int ret; ret = ucsi->ops->read(ucsi, UCSI_VERSION, &ucsi->version, @@ -1341,7 +1369,7 @@ int ucsi_register(struct ucsi *ucsi) if (!ucsi->version) return -ENODEV; - queue_work(system_long_wq, &ucsi->work); + queue_delayed_work(system_long_wq, &aucsi->work, 0); return 0; } @@ -1355,11 +1383,13 @@ EXPORT_SYMBOL_GPL(ucsi_register); */ void ucsi_unregister(struct ucsi *ucsi) { + struct ucsi_android *aucsi = container_of(ucsi, + struct ucsi_android, ucsi); u64 cmd = UCSI_SET_NOTIFICATION_ENABLE; int i; /* Make sure that we are not in the middle of driver initialization */ - cancel_work_sync(&ucsi->work); + cancel_delayed_work_sync(&aucsi->work); /* Disable notifications */ ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd)); diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index b3450d87f324..d366ab2add37 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -289,6 +289,9 @@ struct ucsi { struct ucsi_connector *connector; struct work_struct work; +#define UCSI_ROLE_SWITCH_RETRY_PER_HZ 10 +#define UCSI_ROLE_SWITCH_INTERVAL (HZ / UCSI_ROLE_SWITCH_RETRY_PER_HZ) +#define UCSI_ROLE_SWITCH_WAIT_COUNT (10 * UCSI_ROLE_SWITCH_RETRY_PER_HZ) /* PPM Communication lock */ struct mutex ppm_lock; @@ -304,6 +307,25 @@ struct ucsi { #define EVENT_PROCESSING 3 }; +/** + * struct ucsi_android - contains parameters without modifying the format + * of ucsi struct. + * @ucsi: contains the ucsi reference. + * @work: work structure for queuing ucsi_init_work. + * @work_count: to track the wait count(MAX= UCSI_ROLE_SWITCH_WAIT_COUNT). + * + * Required to address Bug: 260537721 + * If the role switch module probes late the + * fwnode_usb_role_switch_get() will fail with -EPROBE_DEFER. + * To recover from this, restart the ucsi_init_work + * to find the fwnode again using a delayed workqueue. + */ +struct ucsi_android { + struct ucsi ucsi; + struct delayed_work work; + int work_count; +}; + #define UCSI_MAX_SVID 5 #define UCSI_MAX_ALTMODES (UCSI_MAX_SVID * 6) diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index d8d3892e5a69..3c6d452e3bf4 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -393,7 +393,6 @@ static int stub_probe(struct usb_device *udev) err_port: dev_set_drvdata(&udev->dev, NULL); - usb_put_dev(udev); /* we already have busid_priv, just lock busid_lock */ spin_lock(&busid_priv->busid_lock); @@ -408,6 +407,7 @@ call_put_busid_priv: put_busid_priv(busid_priv); sdev_free: + usb_put_dev(udev); stub_device_free(sdev); return rc; diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c index 325c22008e53..5dd41e8215e0 100644 --- a/drivers/usb/usbip/stub_rx.c +++ b/drivers/usb/usbip/stub_rx.c @@ -138,7 +138,9 @@ static int tweak_set_configuration_cmd(struct urb *urb) req = (struct usb_ctrlrequest *) urb->setup_packet; config = le16_to_cpu(req->wValue); + usb_lock_device(sdev->udev); err = usb_set_configuration(sdev->udev, config); + usb_unlock_device(sdev->udev); if (err && err != -ENODEV) dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n", config, err); diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index f2ad450db547..e65c0fa95d31 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -473,11 +473,14 @@ static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; + bool old_ready; spin_lock(&vdpasim->lock); + old_ready = vq->ready; vq->ready = ready; - if (vq->ready) + if (vq->ready && !old_ready) { vdpasim_queue_ready(vdpasim, idx); + } spin_unlock(&vdpasim->lock); } diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h index 7d922950caaf..74c2e5411469 100644 --- a/drivers/vfio/mdev/mdev_private.h +++ b/drivers/vfio/mdev/mdev_private.h @@ -35,7 +35,10 @@ struct mdev_device { bool active; }; -#define to_mdev_device(dev) container_of(dev, struct mdev_device, dev) +static inline struct mdev_device *to_mdev_device(struct device *dev) +{ + return container_of(dev, struct mdev_device, dev); +} #define dev_is_mdev(d) ((d)->bus == &mdev_bus_type) struct mdev_type { diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 2151bc7f87ab..90db9d66867c 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -46,7 +46,6 @@ static struct vfio { struct mutex group_lock; struct cdev group_cdev; dev_t group_devt; - wait_queue_head_t release_q; } vfio; struct vfio_iommu_driver { @@ -90,15 +89,6 @@ struct vfio_group { struct blocking_notifier_head notifier; }; -struct vfio_device { - struct kref kref; - struct device *dev; - const struct vfio_device_ops *ops; - struct vfio_group *group; - struct list_head group_next; - void *device_data; -}; - #ifdef CONFIG_VFIO_NOIOMMU static bool noiommu __read_mostly; module_param_named(enable_unsafe_noiommu_mode, @@ -532,67 +522,17 @@ static struct vfio_group *vfio_group_get_from_dev(struct device *dev) /** * Device objects - create, release, get, put, search */ -static -struct vfio_device *vfio_group_create_device(struct vfio_group *group, - struct device *dev, - const struct vfio_device_ops *ops, - void *device_data) -{ - struct vfio_device *device; - - device = kzalloc(sizeof(*device), GFP_KERNEL); - if (!device) - return ERR_PTR(-ENOMEM); - - kref_init(&device->kref); - device->dev = dev; - device->group = group; - device->ops = ops; - device->device_data = device_data; - dev_set_drvdata(dev, device); - - /* No need to get group_lock, caller has group reference */ - vfio_group_get(group); - - mutex_lock(&group->device_lock); - list_add(&device->group_next, &group->device_list); - group->dev_counter++; - mutex_unlock(&group->device_lock); - - return device; -} - -static void vfio_device_release(struct kref *kref) -{ - struct vfio_device *device = container_of(kref, - struct vfio_device, kref); - struct vfio_group *group = device->group; - - list_del(&device->group_next); - group->dev_counter--; - mutex_unlock(&group->device_lock); - - dev_set_drvdata(device->dev, NULL); - - kfree(device); - - /* vfio_del_group_dev may be waiting for this device */ - wake_up(&vfio.release_q); -} - /* Device reference always implies a group reference */ void vfio_device_put(struct vfio_device *device) { - struct vfio_group *group = device->group; - kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock); - vfio_group_put(group); + if (refcount_dec_and_test(&device->refcount)) + complete(&device->comp); } EXPORT_SYMBOL_GPL(vfio_device_put); -static void vfio_device_get(struct vfio_device *device) +static bool vfio_device_try_get(struct vfio_device *device) { - vfio_group_get(device->group); - kref_get(&device->kref); + return refcount_inc_not_zero(&device->refcount); } static struct vfio_device *vfio_group_get_device(struct vfio_group *group, @@ -602,8 +542,7 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group, mutex_lock(&group->device_lock); list_for_each_entry(device, &group->device_list, group_next) { - if (device->dev == dev) { - vfio_device_get(device); + if (device->dev == dev && vfio_device_try_get(device)) { mutex_unlock(&group->device_lock); return device; } @@ -801,14 +740,23 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb, /** * VFIO driver API */ -int vfio_add_group_dev(struct device *dev, - const struct vfio_device_ops *ops, void *device_data) +void vfio_init_group_dev(struct vfio_device *device, struct device *dev, + const struct vfio_device_ops *ops, void *device_data) { + init_completion(&device->comp); + device->dev = dev; + device->ops = ops; + device->device_data = device_data; +} +EXPORT_SYMBOL_GPL(vfio_init_group_dev); + +int vfio_register_group_dev(struct vfio_device *device) +{ + struct vfio_device *existing_device; struct iommu_group *iommu_group; struct vfio_group *group; - struct vfio_device *device; - iommu_group = iommu_group_get(dev); + iommu_group = iommu_group_get(device->dev); if (!iommu_group) return -EINVAL; @@ -827,30 +775,51 @@ int vfio_add_group_dev(struct device *dev, iommu_group_put(iommu_group); } - device = vfio_group_get_device(group, dev); - if (device) { - dev_WARN(dev, "Device already exists on group %d\n", + existing_device = vfio_group_get_device(group, device->dev); + if (existing_device) { + dev_WARN(device->dev, "Device already exists on group %d\n", iommu_group_id(iommu_group)); - vfio_device_put(device); + vfio_device_put(existing_device); vfio_group_put(group); return -EBUSY; } - device = vfio_group_create_device(group, dev, ops, device_data); - if (IS_ERR(device)) { - vfio_group_put(group); - return PTR_ERR(device); - } + /* Our reference on group is moved to the device */ + device->group = group; - /* - * Drop all but the vfio_device reference. The vfio_device holds - * a reference to the vfio_group, which holds a reference to the - * iommu_group. - */ - vfio_group_put(group); + /* Refcounting can't start until the driver calls register */ + refcount_set(&device->refcount, 1); + + mutex_lock(&group->device_lock); + list_add(&device->group_next, &group->device_list); + group->dev_counter++; + mutex_unlock(&group->device_lock); return 0; } +EXPORT_SYMBOL_GPL(vfio_register_group_dev); + +int vfio_add_group_dev(struct device *dev, const struct vfio_device_ops *ops, + void *device_data) +{ + struct vfio_device *device; + int ret; + + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) + return -ENOMEM; + + vfio_init_group_dev(device, dev, ops, device_data); + ret = vfio_register_group_dev(device); + if (ret) + goto err_kfree; + dev_set_drvdata(dev, device); + return 0; + +err_kfree: + kfree(device); + return ret; +} EXPORT_SYMBOL_GPL(vfio_add_group_dev); /** @@ -895,9 +864,8 @@ static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, ret = !strcmp(dev_name(it->dev), buf); } - if (ret) { + if (ret && vfio_device_try_get(it)) { device = it; - vfio_device_get(device); break; } } @@ -918,21 +886,13 @@ EXPORT_SYMBOL_GPL(vfio_device_data); /* * Decrement the device reference count and wait for the device to be * removed. Open file descriptors for the device... */ -void *vfio_del_group_dev(struct device *dev) +void vfio_unregister_group_dev(struct vfio_device *device) { - DEFINE_WAIT_FUNC(wait, woken_wake_function); - struct vfio_device *device = dev_get_drvdata(dev); struct vfio_group *group = device->group; - void *device_data = device->device_data; struct vfio_unbound_dev *unbound; unsigned int i = 0; bool interrupted = false; - - /* - * The group exists so long as we have a device reference. Get - * a group reference and use it to scan for the device going away. - */ - vfio_group_get(group); + long rc; /* * When the device is removed from the group, the group suddenly @@ -945,7 +905,7 @@ void *vfio_del_group_dev(struct device *dev) */ unbound = kzalloc(sizeof(*unbound), GFP_KERNEL); if (unbound) { - unbound->dev = dev; + unbound->dev = device->dev; mutex_lock(&group->unbound_lock); list_add(&unbound->unbound_next, &group->unbound_list); mutex_unlock(&group->unbound_lock); @@ -953,44 +913,33 @@ void *vfio_del_group_dev(struct device *dev) WARN_ON(!unbound); vfio_device_put(device); - - /* - * If the device is still present in the group after the above - * 'put', then it is in use and we need to request it from the - * bus driver. The driver may in turn need to request the - * device from the user. We send the request on an arbitrary - * interval with counter to allow the driver to take escalating - * measures to release the device if it has the ability to do so. - */ - add_wait_queue(&vfio.release_q, &wait); - - do { - device = vfio_group_get_device(group, dev); - if (!device) - break; - + rc = try_wait_for_completion(&device->comp); + while (rc <= 0) { if (device->ops->request) - device->ops->request(device_data, i++); - - vfio_device_put(device); + device->ops->request(device->device_data, i++); if (interrupted) { - wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10); + rc = wait_for_completion_timeout(&device->comp, + HZ * 10); } else { - wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10); - if (signal_pending(current)) { + rc = wait_for_completion_interruptible_timeout( + &device->comp, HZ * 10); + if (rc < 0) { interrupted = true; - dev_warn(dev, + dev_warn(device->dev, "Device is currently in use, task" " \"%s\" (%d) " "blocked until device is released", current->comm, task_pid_nr(current)); } } + } - } while (1); + mutex_lock(&group->device_lock); + list_del(&device->group_next); + group->dev_counter--; + mutex_unlock(&group->device_lock); - remove_wait_queue(&vfio.release_q, &wait); /* * In order to support multiple devices per group, devices can be * plucked from the group while other devices in the group are still @@ -1008,8 +957,19 @@ void *vfio_del_group_dev(struct device *dev) if (list_empty(&group->device_list)) wait_event(group->container_q, !group->container); + /* Matches the get in vfio_register_group_dev() */ vfio_group_put(group); +} +EXPORT_SYMBOL_GPL(vfio_unregister_group_dev); +void *vfio_del_group_dev(struct device *dev) +{ + struct vfio_device *device = dev_get_drvdata(dev); + void *device_data = device->device_data; + + vfio_unregister_group_dev(device); + dev_set_drvdata(dev, NULL); + kfree(device); return device_data; } EXPORT_SYMBOL_GPL(vfio_del_group_dev); @@ -1823,6 +1783,7 @@ struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps, buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL); if (!buf) { kfree(caps->buf); + caps->buf = NULL; caps->size = 0; return ERR_PTR(-ENOMEM); } @@ -2356,7 +2317,6 @@ static int __init vfio_init(void) mutex_init(&vfio.iommu_drivers_lock); INIT_LIST_HEAD(&vfio.group_list); INIT_LIST_HEAD(&vfio.iommu_drivers_list); - init_waitqueue_head(&vfio.release_q); ret = misc_register(&vfio_dev); if (ret) { diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index fbd438e9b9b0..ce50ca9a320c 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -98,6 +98,12 @@ struct vfio_dma { unsigned long *bitmap; }; +struct vfio_batch { + struct page **pages; /* for pin_user_pages_remote */ + struct page *fallback_page; /* if pages alloc fails */ + int capacity; /* length of pages array */ +}; + struct vfio_group { struct iommu_group *iommu_group; struct list_head next; @@ -428,6 +434,31 @@ static int put_pfn(unsigned long pfn, int prot) return 0; } +#define VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *)) + +static void vfio_batch_init(struct vfio_batch *batch) +{ + if (unlikely(disable_hugepages)) + goto fallback; + + batch->pages = (struct page **) __get_free_page(GFP_KERNEL); + if (!batch->pages) + goto fallback; + + batch->capacity = VFIO_BATCH_MAX_CAPACITY; + return; + +fallback: + batch->pages = &batch->fallback_page; + batch->capacity = 1; +} + +static void vfio_batch_fini(struct vfio_batch *batch) +{ + if (batch->capacity == VFIO_BATCH_MAX_CAPACITY) + free_page((unsigned long)batch->pages); +} + static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, unsigned long vaddr, unsigned long *pfn, bool write_fault) @@ -464,10 +495,14 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, return ret; } -static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, - int prot, unsigned long *pfn) +/* + * Returns the positive number of pfns successfully obtained or a negative + * error code. + */ +static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr, + long npages, int prot, unsigned long *pfn, + struct page **pages) { - struct page *page[1]; struct vm_area_struct *vma; unsigned int flags = 0; int ret; @@ -476,11 +511,22 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, flags |= FOLL_WRITE; mmap_read_lock(mm); - ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM, - page, NULL, NULL); - if (ret == 1) { - *pfn = page_to_pfn(page[0]); - ret = 0; + ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM, + pages, NULL, NULL); + if (ret > 0) { + int i; + + /* + * The zero page is always resident, we don't need to pin it + * and it falls into our invalid/reserved test so we don't + * unpin in put_pfn(). Unpin all zero pages in the batch here. + */ + for (i = 0 ; i < ret; i++) { + if (unlikely(is_zero_pfn(page_to_pfn(pages[i])))) + unpin_user_page(pages[i]); + } + + *pfn = page_to_pfn(pages[0]); goto done; } @@ -494,8 +540,12 @@ retry: if (ret == -EAGAIN) goto retry; - if (!ret && !is_invalid_reserved_pfn(*pfn)) - ret = -EFAULT; + if (!ret) { + if (is_invalid_reserved_pfn(*pfn)) + ret = 1; + else + ret = -EFAULT; + } } done: mmap_read_unlock(mm); @@ -509,7 +559,7 @@ done: */ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, long npage, unsigned long *pfn_base, - unsigned long limit) + unsigned long limit, struct vfio_batch *batch) { unsigned long pfn = 0; long ret, pinned = 0, lock_acct = 0; @@ -520,8 +570,9 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, if (!current->mm) return -ENODEV; - ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base); - if (ret) + ret = vaddr_get_pfns(current->mm, vaddr, 1, dma->prot, pfn_base, + batch->pages); + if (ret < 0) return ret; pinned++; @@ -547,8 +598,9 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, /* Lock all the consecutive pages from pfn_base */ for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage; pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { - ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn); - if (ret) + ret = vaddr_get_pfns(current->mm, vaddr, 1, dma->prot, &pfn, + batch->pages); + if (ret < 0) break; if (pfn != *pfn_base + pinned || @@ -574,7 +626,7 @@ out: ret = vfio_lock_acct(dma, lock_acct, false); unpin_out: - if (ret) { + if (ret < 0) { if (!rsvd) { for (pfn = *pfn_base ; pinned ; pfn++, pinned--) put_pfn(pfn, dma->prot); @@ -610,6 +662,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, unsigned long *pfn_base, bool do_accounting) { + struct page *pages[1]; struct mm_struct *mm; int ret; @@ -617,8 +670,13 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, if (!mm) return -ENODEV; - ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); - if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { + ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages); + if (ret != 1) + goto out; + + ret = 0; + + if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { ret = vfio_lock_acct(dma, 1, true); if (ret) { put_pfn(*pfn_base, dma->prot); @@ -630,6 +688,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, } } +out: mmput(mm); return ret; } @@ -1263,15 +1322,19 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, { dma_addr_t iova = dma->iova; unsigned long vaddr = dma->vaddr; + struct vfio_batch batch; size_t size = map_size; long npage; unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; int ret = 0; + vfio_batch_init(&batch); + while (size) { /* Pin a contiguous chunk of memory */ npage = vfio_pin_pages_remote(dma, vaddr + dma->size, - size >> PAGE_SHIFT, &pfn, limit); + size >> PAGE_SHIFT, &pfn, limit, + &batch); if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; @@ -1291,6 +1354,7 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, dma->size += npage << PAGE_SHIFT; } + vfio_batch_fini(&batch); dma->iommu_mapped = true; if (ret) @@ -1449,6 +1513,7 @@ static int vfio_bus_type(struct device *dev, void *data) static int vfio_iommu_replay(struct vfio_iommu *iommu, struct vfio_domain *domain) { + struct vfio_batch batch; struct vfio_domain *d = NULL; struct rb_node *n; unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; @@ -1459,6 +1524,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, d = list_first_entry(&iommu->domain_list, struct vfio_domain, next); + vfio_batch_init(&batch); + n = rb_first(&iommu->dma_list); for (; n; n = rb_next(n)) { @@ -1506,7 +1573,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, npage = vfio_pin_pages_remote(dma, vaddr, n >> PAGE_SHIFT, - &pfn, limit); + &pfn, limit, + &batch); if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; @@ -1539,6 +1607,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, dma->iommu_mapped = true; } + vfio_batch_fini(&batch); return 0; unwind: @@ -1579,6 +1648,7 @@ unwind: } } + vfio_batch_fini(&batch); return ret; } diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index da02c3e96e7b..5beb20768b20 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -472,6 +472,7 @@ static void vhost_tx_batch(struct vhost_net *net, goto signal_used; msghdr->msg_control = &ctl; + msghdr->msg_controllen = sizeof(ctl); err = sock->ops->sendmsg(sock, msghdr, 0); if (unlikely(err < 0)) { vq_err(&nvq->vq, "Fail to batch sending packets\n"); @@ -1449,13 +1450,9 @@ err: return ERR_PTR(r); } -static struct ptr_ring *get_tap_ptr_ring(int fd) +static struct ptr_ring *get_tap_ptr_ring(struct file *file) { struct ptr_ring *ring; - struct file *file = fget(fd); - - if (!file) - return NULL; ring = tun_get_tx_ring(file); if (!IS_ERR(ring)) goto out; @@ -1464,7 +1461,6 @@ static struct ptr_ring *get_tap_ptr_ring(int fd) goto out; ring = NULL; out: - fput(file); return ring; } @@ -1551,8 +1547,12 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) r = vhost_net_enable_vq(n, vq); if (r) goto err_used; - if (index == VHOST_NET_VQ_RX) - nvq->rx_ring = get_tap_ptr_ring(fd); + if (index == VHOST_NET_VQ_RX) { + if (sock) + nvq->rx_ring = get_tap_ptr_ring(sock->file); + else + nvq->rx_ring = NULL; + } oldubufs = nvq->ubufs; nvq->ubufs = ubufs; diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index e4d60009d908..04578aa87e4d 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -97,8 +97,11 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid) return; irq = ops->get_vq_irq(vdpa, qid); + if (irq < 0) + return; + irq_bypass_unregister_producer(&vq->call_ctx.producer); - if (!vq->call_ctx.ctx || irq < 0) + if (!vq->call_ctx.ctx) return; vq->call_ctx.producer.token = vq->call_ctx.ctx; diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 0bd7e64331f0..5a0340c85dc6 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -274,7 +274,7 @@ __vringh_iov(struct vringh *vrh, u16 i, int (*copy)(const struct vringh *vrh, void *dst, const void *src, size_t len)) { - int err, count = 0, up_next, desc_max; + int err, count = 0, indirect_count = 0, up_next, desc_max; struct vring_desc desc, *descs; struct vringh_range range = { -1ULL, 0 }, slowrange; bool slow = false; @@ -331,7 +331,12 @@ __vringh_iov(struct vringh *vrh, u16 i, continue; } - if (count++ == vrh->vring.num) { + if (up_next == -1) + count++; + else + indirect_count++; + + if (count > vrh->vring.num || indirect_count > desc_max) { vringh_bad("Descriptor loop in %p", descs); err = -ELOOP; goto fail; @@ -393,6 +398,7 @@ __vringh_iov(struct vringh *vrh, u16 i, i = return_from_indirect(vrh, &up_next, &descs, &desc_max); slow = false; + indirect_count = 0; } else break; } diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index c282fc0d04bd..00f626733eb5 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -359,7 +359,7 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, return NULL; } - pkt->buf = kmalloc(pkt->len, GFP_KERNEL); + pkt->buf = kvmalloc(pkt->len, GFP_KERNEL); if (!pkt->buf) { kfree(pkt); return NULL; diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c index 6a26a364f9bd..5fa12dc7c02c 100644 --- a/drivers/video/console/sticore.c +++ b/drivers/video/console/sticore.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "../fbdev/sticore.h" @@ -1127,6 +1128,24 @@ int sti_call(const struct sti_struct *sti, unsigned long func, return ret; } +#if defined(CONFIG_FB_STI) +/* check if given fb_info is the primary device */ +int fb_is_primary_device(struct fb_info *info) +{ + struct sti_struct *sti; + + sti = sti_get_rom(0); + + /* if no built-in graphics card found, allow any fb driver as default */ + if (!sti) + return true; + + /* return true if it's the default built-in framebuffer driver */ + return (sti->info == info); +} +EXPORT_SYMBOL(fb_is_primary_device); +#endif + MODULE_AUTHOR("Philipp Rumpf, Helge Deller, Thomas Bogendoerfer"); MODULE_DESCRIPTION("Core STI driver for HP's NGLE series graphics cards in HP PARISC machines"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index 33595cc4778e..6252cd59673e 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -711,16 +711,18 @@ static int clcdfb_of_init_display(struct clcd_fb *fb) return -ENODEV; panel = of_graph_get_remote_port_parent(endpoint); - if (!panel) - return -ENODEV; + if (!panel) { + err = -ENODEV; + goto out_endpoint_put; + } err = clcdfb_of_get_backlight(&fb->dev->dev, fb->panel); if (err) - return err; + goto out_panel_put; err = clcdfb_of_get_mode(&fb->dev->dev, panel, fb->panel); if (err) - return err; + goto out_panel_put; err = of_property_read_u32(fb->dev->dev.of_node, "max-memory-bandwidth", &max_bandwidth); @@ -749,11 +751,21 @@ static int clcdfb_of_init_display(struct clcd_fb *fb) if (of_property_read_u32_array(endpoint, "arm,pl11x,tft-r0g0b0-pads", - tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0) - return -ENOENT; + tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0) { + err = -ENOENT; + goto out_panel_put; + } + + of_node_put(panel); + of_node_put(endpoint); return clcdfb_of_init_tft_panel(fb, tft_r0b0g0[0], tft_r0b0g0[1], tft_r0b0g0[2]); +out_panel_put: + of_node_put(panel); +out_endpoint_put: + of_node_put(endpoint); + return err; } static int clcdfb_of_vram_setup(struct clcd_fb *fb) @@ -771,12 +783,15 @@ static int clcdfb_of_vram_setup(struct clcd_fb *fb) return -ENODEV; fb->fb.screen_base = of_iomap(memory, 0); - if (!fb->fb.screen_base) + if (!fb->fb.screen_base) { + of_node_put(memory); return -ENOMEM; + } fb->fb.fix.smem_start = of_translate_address(memory, of_get_address(memory, 0, &size, NULL)); fb->fb.fix.smem_len = size; + of_node_put(memory); return 0; } diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c index edf169d0816e..8d092b106470 100644 --- a/drivers/video/fbdev/arkfb.c +++ b/drivers/video/fbdev/arkfb.c @@ -778,7 +778,12 @@ static int arkfb_set_par(struct fb_info *info) return -EINVAL; } - ark_set_pixclock(info, (hdiv * info->var.pixclock) / hmul); + value = (hdiv * info->var.pixclock) / hmul; + if (!value) { + fb_dbg(info, "invalid pixclock\n"); + value = 1; + } + ark_set_pixclock(info, value); svga_set_timings(par->state.vgabase, &ark_timing_regs, &(info->var), hmul, hdiv, (info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1, (info->var.vmode & FB_VMODE_INTERLACED) ? 2 : 1, @@ -789,6 +794,8 @@ static int arkfb_set_par(struct fb_info *info) value = ((value * hmul / hdiv) / 8) - 5; vga_wcrt(par->state.vgabase, 0x42, (value + 1) / 2); + if (screen_size > info->screen_size) + screen_size = info->screen_size; memset_io(info->screen_base, 0x00, screen_size); /* Device and screen back on */ svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80); diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c index 393894af26f8..2b00a9d554fc 100644 --- a/drivers/video/fbdev/chipsfb.c +++ b/drivers/video/fbdev/chipsfb.c @@ -430,6 +430,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) err_release_fb: framebuffer_release(p); err_disable: + pci_disable_device(dp); err_out: return rc; } diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index f102519ccefb..27828435dd4f 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -123,8 +123,8 @@ static int logo_lines; enums. */ static int logo_shown = FBCON_LOGO_CANSHOW; /* console mappings */ -static int first_fb_vc; -static int last_fb_vc = MAX_NR_CONSOLES - 1; +static unsigned int first_fb_vc; +static unsigned int last_fb_vc = MAX_NR_CONSOLES - 1; static int fbcon_is_default = 1; static int primary_device = -1; static int fbcon_has_console_bind; @@ -472,10 +472,12 @@ static int __init fb_console_setup(char *this_opt) options += 3; if (*options) first_fb_vc = simple_strtoul(options, &options, 10) - 1; - if (first_fb_vc < 0) + if (first_fb_vc >= MAX_NR_CONSOLES) first_fb_vc = 0; if (*options++ == '-') last_fb_vc = simple_strtoul(options, &options, 10) - 1; + if (last_fb_vc < first_fb_vc || last_fb_vc >= MAX_NR_CONSOLES) + last_fb_vc = MAX_NR_CONSOLES - 1; fbcon_is_default = 0; continue; } @@ -607,7 +609,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info, if (scr_readw(r) != vc->vc_video_erase_char) break; if (r != q && new_rows >= rows + logo_lines) { - save = kmalloc(array3_size(logo_lines, new_cols, 2), + save = kzalloc(array3_size(logo_lines, new_cols, 2), GFP_KERNEL); if (save) { int i = cols < new_cols ? cols : new_cols; @@ -1717,8 +1719,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, case SM_UP: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; - if (logo_shown >= 0) - goto redraw_up; switch (fb_scrollmode(p)) { case SCROLL_MOVE: fbcon_redraw_blit(vc, info, p, t, b - t - count, @@ -1807,8 +1807,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, case SM_DOWN: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; - if (logo_shown >= 0) - goto redraw_down; switch (fb_scrollmode(p)) { case SCROLL_MOVE: fbcon_redraw_blit(vc, info, p, b - 1, b - t - count, @@ -2510,6 +2508,11 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, if (charcount != 256 && charcount != 512) return -EINVAL; + /* font bigger than screen resolution ? */ + if (w > FBCON_SWAP(info->var.rotate, info->var.xres, info->var.yres) || + h > FBCON_SWAP(info->var.rotate, info->var.yres, info->var.xres)) + return -EINVAL; + /* Make sure drawing engine can handle the font */ if (!(info->pixmap.blit_x & (1 << (font->width - 1))) || !(info->pixmap.blit_y & (1 << (font->height - 1)))) @@ -2771,6 +2774,34 @@ void fbcon_update_vcs(struct fb_info *info, bool all) } EXPORT_SYMBOL(fbcon_update_vcs); +/* let fbcon check if it supports a new screen resolution */ +int fbcon_modechange_possible(struct fb_info *info, struct fb_var_screeninfo *var) +{ + struct fbcon_ops *ops = info->fbcon_par; + struct vc_data *vc; + unsigned int i; + + WARN_CONSOLE_UNLOCKED(); + + if (!ops) + return 0; + + /* prevent setting a screen size which is smaller than font size */ + for (i = first_fb_vc; i <= last_fb_vc; i++) { + vc = vc_cons[i].d; + if (!vc || vc->vc_mode != KD_TEXT || + registered_fb[con2fb_map[i]] != info) + continue; + + if (vc->vc_font.width > FBCON_SWAP(var->rotate, var->xres, var->yres) || + vc->vc_font.height > FBCON_SWAP(var->rotate, var->yres, var->xres)) + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(fbcon_modechange_possible); + int fbcon_mode_deleted(struct fb_info *info, struct fb_videomode *mode) { @@ -3300,6 +3331,9 @@ static void fbcon_register_existing_fbs(struct work_struct *work) console_lock(); + deferred_takeover = false; + logo_shown = FBCON_LOGO_DONTSHOW; + for_each_registered_fb(i) fbcon_fb_registered(registered_fb[i]); @@ -3317,8 +3351,6 @@ static int fbcon_output_notifier(struct notifier_block *nb, pr_info("fbcon: Taking over console\n"); dummycon_unregister_output_notifier(&fbcon_output_nb); - deferred_takeover = false; - logo_shown = FBCON_LOGO_DONTSHOW; /* We may get called in atomic context */ schedule_work(&fbcon_deferred_takeover_work); diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 00939ca2065a..d787a344b3b8 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -513,7 +513,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate, while (n && (n * (logo->width + 8) - 8 > xres)) --n; - image.dx = (xres - n * (logo->width + 8) - 8) / 2; + image.dx = (xres - (n * (logo->width + 8) - 8)) / 2; image.dy = y ?: (yres - logo->height) / 2; } else { image.dx = 0; @@ -1019,6 +1019,16 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) if (ret) return ret; + /* verify that virtual resolution >= physical resolution */ + if (var->xres_virtual < var->xres || + var->yres_virtual < var->yres) { + pr_warn("WARNING: fbcon: Driver '%s' missed to adjust virtual screen size (%ux%u vs. %ux%u)\n", + info->fix.id, + var->xres_virtual, var->yres_virtual, + var->xres, var->yres); + return -EINVAL; + } + if ((var->activate & FB_ACTIVATE_MASK) != FB_ACTIVATE_NOW) return 0; @@ -1109,7 +1119,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, return -EFAULT; console_lock(); lock_fb_info(info); - ret = fb_set_var(info, &var); + ret = fbcon_modechange_possible(info, &var); + if (!ret) + ret = fb_set_var(info, &var); if (!ret) fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL); unlock_fb_info(info); diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index 3c309ab20887..40baa79f8046 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -1008,7 +1008,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) struct pci_dev *pdev = NULL; void __iomem *fb_virt; int gen2vm = efi_enabled(EFI_BOOT); - resource_size_t pot_start, pot_end; phys_addr_t paddr; int ret; @@ -1059,23 +1058,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) dio_fb_size = screen_width * screen_height * screen_depth / 8; - if (gen2vm) { - pot_start = 0; - pot_end = -1; - } else { - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || - pci_resource_len(pdev, 0) < screen_fb_size) { - pr_err("Resource not available or (0x%lx < 0x%lx)\n", - (unsigned long) pci_resource_len(pdev, 0), - (unsigned long) screen_fb_size); - goto err1; - } - - pot_end = pci_resource_end(pdev, 0); - pot_start = pot_end - screen_fb_size + 1; - } - - ret = vmbus_allocate_mmio(&par->mem, hdev, pot_start, pot_end, + ret = vmbus_allocate_mmio(&par->mem, hdev, 0, -1, screen_fb_size, 0x100000, true); if (ret != 0) { pr_err("Unable to allocate framebuffer memory\n"); diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c index 52cce0db8bd3..8fb4e01e1943 100644 --- a/drivers/video/fbdev/i740fb.c +++ b/drivers/video/fbdev/i740fb.c @@ -400,7 +400,7 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var, u32 xres, right, hslen, left, xtotal; u32 yres, lower, vslen, upper, ytotal; u32 vxres, xoffset, vyres, yoffset; - u32 bpp, base, dacspeed24, mem; + u32 bpp, base, dacspeed24, mem, freq; u8 r7; int i; @@ -643,7 +643,12 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var, par->atc[VGA_ATC_OVERSCAN] = 0; /* Calculate VCLK that most closely matches the requested dot clock */ - i740_calc_vclk((((u32)1e9) / var->pixclock) * (u32)(1e3), par); + freq = (((u32)1e9) / var->pixclock) * (u32)(1e3); + if (freq < I740_RFREQ_FIX) { + fb_dbg(info, "invalid pixclock\n"); + freq = I740_RFREQ_FIX; + } + i740_calc_vclk(freq, par); /* Since we program the clocks ourselves, always use VCLK2. */ par->misc |= 0x0C; @@ -657,6 +662,9 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var, static int i740fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { + if (!var->pixclock) + return -EINVAL; + switch (var->bits_per_pixel) { case 8: var->red.offset = var->green.offset = var->blue.offset = 0; diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c index 0642555289e0..c12d46e28359 100644 --- a/drivers/video/fbdev/pm2fb.c +++ b/drivers/video/fbdev/pm2fb.c @@ -616,6 +616,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) return -EINVAL; } + if (!var->pixclock) { + DPRINTK("pixclock is zero\n"); + return -EINVAL; + } + if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) { DPRINTK("pixclock too high (%ldKHz)\n", PICOS2KHZ(var->pixclock)); diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c index 4279e13a3b58..9e9888e40c57 100644 --- a/drivers/video/fbdev/pxa3xx-gcu.c +++ b/drivers/video/fbdev/pxa3xx-gcu.c @@ -381,7 +381,7 @@ pxa3xx_gcu_write(struct file *file, const char *buff, struct pxa3xx_gcu_batch *buffer; struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file); - int words = count / 4; + size_t words = count / 4; /* Does not need to be atomic. There's a lock in user space, * but anyhow, this is just for statistics. */ @@ -650,6 +650,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev) for (i = 0; i < 8; i++) { ret = pxa3xx_gcu_add_buffer(dev, priv); if (ret) { + pxa3xx_gcu_free_buffers(dev, priv); dev_err(dev, "failed to allocate DMA memory\n"); goto err_disable_clk; } @@ -666,15 +667,15 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev) SHARED_SIZE, irq); return 0; -err_free_dma: - dma_free_coherent(dev, SHARED_SIZE, - priv->shared, priv->shared_phys); +err_disable_clk: + clk_disable_unprepare(priv->clk); err_misc_deregister: misc_deregister(&priv->misc_dev); -err_disable_clk: - clk_disable_unprepare(priv->clk); +err_free_dma: + dma_free_coherent(dev, SHARED_SIZE, + priv->shared, priv->shared_phys); return ret; } @@ -687,6 +688,7 @@ static int pxa3xx_gcu_remove(struct platform_device *pdev) pxa3xx_gcu_wait_idle(priv); misc_deregister(&priv->misc_dev); dma_free_coherent(dev, SHARED_SIZE, priv->shared, priv->shared_phys); + clk_disable_unprepare(priv->clk); pxa3xx_gcu_free_buffers(dev, priv); return 0; diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c index 5c74253e7b2c..a936455a3df2 100644 --- a/drivers/video/fbdev/s3fb.c +++ b/drivers/video/fbdev/s3fb.c @@ -902,6 +902,8 @@ static int s3fb_set_par(struct fb_info *info) value = clamp((htotal + hsstart + 1) / 2 + 2, hsstart + 4, htotal + 1); svga_wcrt_multi(par->state.vgabase, s3_dtpc_regs, value); + if (screen_size > info->screen_size) + screen_size = info->screen_size; memset_io(info->screen_base, 0x00, screen_size); /* Device and screen back on */ svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80); diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c index fde27feae5d0..d6b2ce95a859 100644 --- a/drivers/video/fbdev/sis/init.c +++ b/drivers/video/fbdev/sis/init.c @@ -355,12 +355,12 @@ SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, } break; case 400: - if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 800) && (LCDwidth >= 600))) { + if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 800) && (LCDheight >= 600))) { if(VDisplay == 300) ModeIndex = ModeIndex_400x300[Depth]; } break; case 512: - if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 1024) && (LCDwidth >= 768))) { + if((!(VBFlags & CRT1_LCDA)) || ((LCDwidth >= 1024) && (LCDheight >= 768))) { if(VDisplay == 384) ModeIndex = ModeIndex_512x384[Depth]; } break; diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c index 28768c272b73..5fa3f1e5dfe8 100644 --- a/drivers/video/fbdev/smscufx.c +++ b/drivers/video/fbdev/smscufx.c @@ -97,7 +97,6 @@ struct ufx_data { struct kref kref; int fb_count; bool virtualized; /* true when physical usb device not present */ - struct delayed_work free_framebuffer_work; atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */ atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */ u8 *edid; /* null until we read edid from hw or get from sysfs */ @@ -137,6 +136,8 @@ static int ufx_submit_urb(struct ufx_data *dev, struct urb * urb, size_t len); static int ufx_alloc_urb_list(struct ufx_data *dev, int count, size_t size); static void ufx_free_urb_list(struct ufx_data *dev); +static DEFINE_MUTEX(disconnect_mutex); + /* reads a control register */ static int ufx_reg_read(struct ufx_data *dev, u32 index, u32 *data) { @@ -1070,9 +1071,13 @@ static int ufx_ops_open(struct fb_info *info, int user) if (user == 0 && !console) return -EBUSY; + mutex_lock(&disconnect_mutex); + /* If the USB device is gone, we don't accept new opens */ - if (dev->virtualized) + if (dev->virtualized) { + mutex_unlock(&disconnect_mutex); return -ENODEV; + } dev->fb_count++; @@ -1096,6 +1101,8 @@ static int ufx_ops_open(struct fb_info *info, int user) pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d", info->node, user, info, dev->fb_count); + mutex_unlock(&disconnect_mutex); + return 0; } @@ -1108,15 +1115,24 @@ static void ufx_free(struct kref *kref) { struct ufx_data *dev = container_of(kref, struct ufx_data, kref); - /* this function will wait for all in-flight urbs to complete */ - if (dev->urbs.count > 0) - ufx_free_urb_list(dev); - - pr_debug("freeing ufx_data %p", dev); - kfree(dev); } +static void ufx_ops_destory(struct fb_info *info) +{ + struct ufx_data *dev = info->par; + int node = info->node; + + /* Assume info structure is freed after this point */ + framebuffer_release(info); + + pr_debug("fb_info for /dev/fb%d has been freed", node); + + /* release reference taken by kref_init in probe() */ + kref_put(&dev->kref, ufx_free); +} + + static void ufx_release_urb_work(struct work_struct *work) { struct urb_node *unode = container_of(work, struct urb_node, @@ -1125,14 +1141,9 @@ static void ufx_release_urb_work(struct work_struct *work) up(&unode->dev->urbs.limit_sem); } -static void ufx_free_framebuffer_work(struct work_struct *work) +static void ufx_free_framebuffer(struct ufx_data *dev) { - struct ufx_data *dev = container_of(work, struct ufx_data, - free_framebuffer_work.work); struct fb_info *info = dev->info; - int node = info->node; - - unregister_framebuffer(info); if (info->cmap.len != 0) fb_dealloc_cmap(&info->cmap); @@ -1144,11 +1155,6 @@ static void ufx_free_framebuffer_work(struct work_struct *work) dev->info = NULL; - /* Assume info structure is freed after this point */ - framebuffer_release(info); - - pr_debug("fb_info for /dev/fb%d has been freed", node); - /* ref taken in probe() as part of registering framebfufer */ kref_put(&dev->kref, ufx_free); } @@ -1160,11 +1166,13 @@ static int ufx_ops_release(struct fb_info *info, int user) { struct ufx_data *dev = info->par; + mutex_lock(&disconnect_mutex); + dev->fb_count--; /* We can't free fb_info here - fbmem will touch it when we return */ if (dev->virtualized && (dev->fb_count == 0)) - schedule_delayed_work(&dev->free_framebuffer_work, HZ); + ufx_free_framebuffer(dev); if ((dev->fb_count == 0) && (info->fbdefio)) { fb_deferred_io_cleanup(info); @@ -1177,6 +1185,8 @@ static int ufx_ops_release(struct fb_info *info, int user) kref_put(&dev->kref, ufx_free); + mutex_unlock(&disconnect_mutex); + return 0; } @@ -1283,6 +1293,7 @@ static const struct fb_ops ufx_ops = { .fb_blank = ufx_ops_blank, .fb_check_var = ufx_ops_check_var, .fb_set_par = ufx_ops_set_par, + .fb_destroy = ufx_ops_destory, }; /* Assumes &info->lock held by caller @@ -1664,9 +1675,6 @@ static int ufx_usb_probe(struct usb_interface *interface, goto destroy_modedb; } - INIT_DELAYED_WORK(&dev->free_framebuffer_work, - ufx_free_framebuffer_work); - retval = ufx_reg_read(dev, 0x3000, &id_rev); check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval); dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev); @@ -1739,8 +1747,12 @@ e_nomem: static void ufx_usb_disconnect(struct usb_interface *interface) { struct ufx_data *dev; + struct fb_info *info; + + mutex_lock(&disconnect_mutex); dev = usb_get_intfdata(interface); + info = dev->info; pr_debug("USB disconnect starting\n"); @@ -1754,12 +1766,17 @@ static void ufx_usb_disconnect(struct usb_interface *interface) /* if clients still have us open, will be freed on last close */ if (dev->fb_count == 0) - schedule_delayed_work(&dev->free_framebuffer_work, 0); + ufx_free_framebuffer(dev); - /* release reference taken by kref_init in probe() */ - kref_put(&dev->kref, ufx_free); + /* this function will wait for all in-flight urbs to complete */ + if (dev->urbs.count > 0) + ufx_free_urb_list(dev); - /* consider ufx_data freed */ + pr_debug("freeing ufx_data %p", dev); + + unregister_framebuffer(info); + + mutex_unlock(&disconnect_mutex); } static struct usb_driver ufx_driver = { diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c index 265865610edc..3feb6e40d56d 100644 --- a/drivers/video/fbdev/stifb.c +++ b/drivers/video/fbdev/stifb.c @@ -1041,6 +1041,48 @@ stifb_copyarea(struct fb_info *info, const struct fb_copyarea *area) SETUP_FB(fb); } +#define ARTIST_VRAM_SIZE 0x000804 +#define ARTIST_VRAM_SRC 0x000808 +#define ARTIST_VRAM_SIZE_TRIGGER_WINFILL 0x000a04 +#define ARTIST_VRAM_DEST_TRIGGER_BLOCKMOVE 0x000b00 +#define ARTIST_SRC_BM_ACCESS 0x018008 +#define ARTIST_FGCOLOR 0x018010 +#define ARTIST_BGCOLOR 0x018014 +#define ARTIST_BITMAP_OP 0x01801c + +static void +stifb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + struct stifb_info *fb = container_of(info, struct stifb_info, info); + + if (rect->rop != ROP_COPY || + (fb->id == S9000_ID_HCRX && fb->info.var.bits_per_pixel == 32)) + return cfb_fillrect(info, rect); + + SETUP_HW(fb); + + if (fb->info.var.bits_per_pixel == 32) { + WRITE_WORD(0xBBA0A000, fb, REG_10); + + NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffffff); + } else { + WRITE_WORD(fb->id == S9000_ID_HCRX ? 0x13a02000 : 0x13a01000, fb, REG_10); + + NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xff); + } + + WRITE_WORD(0x03000300, fb, ARTIST_BITMAP_OP); + WRITE_WORD(0x2ea01000, fb, ARTIST_SRC_BM_ACCESS); + NGLE_QUICK_SET_DST_BM_ACCESS(fb, 0x2ea01000); + NGLE_REALLY_SET_IMAGE_FG_COLOR(fb, rect->color); + WRITE_WORD(0, fb, ARTIST_BGCOLOR); + + NGLE_SET_DSTXY(fb, (rect->dx << 16) | (rect->dy)); + SET_LENXY_START_RECFILL(fb, (rect->width << 16) | (rect->height)); + + SETUP_FB(fb); +} + static void __init stifb_init_display(struct stifb_info *fb) { @@ -1105,7 +1147,7 @@ static const struct fb_ops stifb_ops = { .owner = THIS_MODULE, .fb_setcolreg = stifb_setcolreg, .fb_blank = stifb_blank, - .fb_fillrect = cfb_fillrect, + .fb_fillrect = stifb_fillrect, .fb_copyarea = stifb_copyarea, .fb_imageblit = cfb_imageblit, }; @@ -1257,7 +1299,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) /* limit fbsize to max visible screen size */ if (fix->smem_len > yres*fix->line_length) - fix->smem_len = yres*fix->line_length; + fix->smem_len = ALIGN(yres*fix->line_length, 4*1024*1024); fix->accel = FB_ACCEL_NONE; @@ -1297,7 +1339,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) goto out_err0; } info->screen_size = fix->smem_len; - info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA; + info->flags = FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; info->pseudo_palette = &fb->pseudo_palette; /* This has to be done !!! */ @@ -1317,11 +1359,11 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) goto out_err3; } + /* save for primary gfx device detection & unregister_framebuffer() */ + sti->info = info; if (register_framebuffer(&fb->info) < 0) goto out_err4; - sti->info = info; /* save for unregister_framebuffer() */ - fb_info(&fb->info, "%s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n", fix->id, var->xres, diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index 90f48b71fd8f..d9eec1b60e66 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -1649,8 +1649,9 @@ static int dlfb_usb_probe(struct usb_interface *intf, const struct device_attribute *attr; struct dlfb_data *dlfb; struct fb_info *info; - int retval = -ENOMEM; + int retval; struct usb_device *usbdev = interface_to_usbdev(intf); + struct usb_endpoint_descriptor *out; /* usb initialization */ dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL); @@ -1664,6 +1665,12 @@ static int dlfb_usb_probe(struct usb_interface *intf, dlfb->udev = usb_get_dev(usbdev); usb_set_intfdata(intf, dlfb); + retval = usb_find_common_endpoints(intf->cur_altsetting, NULL, &out, NULL, NULL); + if (retval) { + dev_err(&intf->dev, "Device should have at lease 1 bulk endpoint!\n"); + goto error; + } + dev_dbg(&intf->dev, "console enable=%d\n", console); dev_dbg(&intf->dev, "fb_defio enable=%d\n", fb_defio); dev_dbg(&intf->dev, "shadow enable=%d\n", shadow); @@ -1673,6 +1680,7 @@ static int dlfb_usb_probe(struct usb_interface *intf, if (!dlfb_parse_vendor_descriptor(dlfb, intf)) { dev_err(&intf->dev, "firmware not recognized, incompatible device?\n"); + retval = -ENODEV; goto error; } @@ -1686,8 +1694,10 @@ static int dlfb_usb_probe(struct usb_interface *intf, /* allocates framebuffer driver structure, not framebuffer memory */ info = framebuffer_alloc(0, &dlfb->udev->dev); - if (!info) + if (!info) { + retval = -ENOMEM; goto error; + } dlfb->info = info; info->par = dlfb; diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c index 7a959e5ba90b..c274ec5e965c 100644 --- a/drivers/video/fbdev/vt8623fb.c +++ b/drivers/video/fbdev/vt8623fb.c @@ -504,6 +504,8 @@ static int vt8623fb_set_par(struct fb_info *info) (info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1, 1, 1, info->node); + if (screen_size > info->screen_size) + screen_size = info->screen_size; memset_io(info->screen_base, 0x00, screen_size); /* Device and screen back on */ diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c index 73eb34849eab..4ccfd30c2a30 100644 --- a/drivers/virt/vboxguest/vboxguest_linux.c +++ b/drivers/virt/vboxguest/vboxguest_linux.c @@ -356,8 +356,8 @@ static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) goto err_vbg_core_exit; } - ret = devm_request_irq(dev, pci->irq, vbg_core_isr, IRQF_SHARED, - DEVICE_NAME, gdev); + ret = request_irq(pci->irq, vbg_core_isr, IRQF_SHARED, DEVICE_NAME, + gdev); if (ret) { vbg_err("vboxguest: Error requesting irq: %d\n", ret); goto err_vbg_core_exit; @@ -367,7 +367,7 @@ static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) if (ret) { vbg_err("vboxguest: Error misc_register %s failed: %d\n", DEVICE_NAME, ret); - goto err_vbg_core_exit; + goto err_free_irq; } ret = misc_register(&gdev->misc_device_user); @@ -403,6 +403,8 @@ err_unregister_misc_device_user: misc_deregister(&gdev->misc_device_user); err_unregister_misc_device: misc_deregister(&gdev->misc_device); +err_free_irq: + free_irq(pci->irq, gdev); err_vbg_core_exit: vbg_core_exit(gdev); err_disable_pcidev: @@ -419,6 +421,7 @@ static void vbg_pci_remove(struct pci_dev *pci) vbg_gdev = NULL; mutex_unlock(&vbg_gdev_mutex); + free_irq(pci->irq, gdev); device_remove_file(gdev->dev, &dev_attr_host_features); device_remove_file(gdev->dev, &dev_attr_host_version); misc_deregister(&gdev->misc_device_user); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index de73bd63f6b6..e8ef0c66e558 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -62,6 +62,7 @@ #include #include #include +#include #include #include #include @@ -543,6 +544,28 @@ static const struct virtio_config_ops virtio_mmio_config_ops = { .get_shm_region = vm_get_shm_region, }; +#ifdef CONFIG_PM_SLEEP +static int virtio_mmio_freeze(struct device *dev) +{ + struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev); + + return virtio_device_freeze(&vm_dev->vdev); +} + +static int virtio_mmio_restore(struct device *dev) +{ + struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev); + + if (vm_dev->version == 1) + writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); + + return virtio_device_restore(&vm_dev->vdev); +} + +static const struct dev_pm_ops virtio_mmio_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(virtio_mmio_freeze, virtio_mmio_restore) +}; +#endif static void virtio_mmio_release_dev(struct device *_d) { @@ -689,6 +712,7 @@ static int vm_cmdline_set(const char *device, if (!vm_cmdline_parent_registered) { err = device_register(&vm_cmdline_parent); if (err) { + put_device(&vm_cmdline_parent); pr_err("Failed to register parent device!\n"); return err; } @@ -763,26 +787,6 @@ static void vm_unregister_cmdline_devices(void) #endif -#ifdef CONFIG_PM_SLEEP -static int virtio_mmio_freeze(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); - - return virtio_device_freeze(&vm_dev->vdev); -} - -static int virtio_mmio_restore(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); - - return virtio_device_restore(&vm_dev->vdev); -} -#endif - -static SIMPLE_DEV_PM_OPS(virtio_mmio_pm_ops, virtio_mmio_freeze, virtio_mmio_restore); - /* Platform driver */ static const struct of_device_id virtio_mmio_match[] = { @@ -806,7 +810,9 @@ static struct platform_driver virtio_mmio_driver = { .name = "virtio-mmio", .of_match_table = virtio_mmio_match, .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match), - .pm = &virtio_mmio_pm_ops, +#ifdef CONFIG_PM_SLEEP + .pm = &virtio_mmio_pm_ops, +#endif }, }; diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index b35bb2d57f62..1e890ef17687 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -254,8 +254,7 @@ void vp_del_vqs(struct virtio_device *vdev) if (vp_dev->msix_affinity_masks) { for (i = 0; i < vp_dev->msix_vectors; i++) - if (vp_dev->msix_affinity_masks[i]) - free_cpumask_var(vp_dev->msix_affinity_masks[i]); + free_cpumask_var(vp_dev->msix_affinity_masks[i]); } if (vp_dev->msix_enabled) { diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c index 974d02bb3a45..6546d029c7fd 100644 --- a/drivers/w1/slaves/w1_therm.c +++ b/drivers/w1/slaves/w1_therm.c @@ -2092,16 +2092,20 @@ static ssize_t w1_seq_show(struct device *device, if (sl->reg_num.id == reg_num->id) seq = i; + if (w1_reset_bus(sl->master)) + goto error; + + /* Put the device into chain DONE state */ + w1_write_8(sl->master, W1_MATCH_ROM); + w1_write_block(sl->master, (u8 *)&rn, 8); w1_write_8(sl->master, W1_42_CHAIN); w1_write_8(sl->master, W1_42_CHAIN_DONE); w1_write_8(sl->master, W1_42_CHAIN_DONE_INV); - w1_read_block(sl->master, &ack, sizeof(ack)); /* check for acknowledgment */ ack = w1_read_8(sl->master); if (ack != W1_42_SUCCESS_CONFIRM_BYTE) goto error; - } /* Exit from CHAIN state */ diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c index e5dcb26d85f0..dcb3ffda3fad 100644 --- a/drivers/watchdog/armada_37xx_wdt.c +++ b/drivers/watchdog/armada_37xx_wdt.c @@ -274,6 +274,8 @@ static int armada_37xx_wdt_probe(struct platform_device *pdev) if (!res) return -ENODEV; dev->reg = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!dev->reg) + return -ENOMEM; /* init clock */ dev->clk = devm_clk_get(&pdev->dev, NULL); diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c index ae7f9357bb87..46c2a4bd9ebe 100644 --- a/drivers/watchdog/rti_wdt.c +++ b/drivers/watchdog/rti_wdt.c @@ -227,7 +227,7 @@ static int rti_wdt_probe(struct platform_device *pdev) pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); - if (ret) { + if (ret < 0) { pm_runtime_put_noidle(dev); pm_runtime_disable(&pdev->dev); return dev_err_probe(dev, ret, "runtime pm failed\n"); diff --git a/drivers/watchdog/ts4800_wdt.c b/drivers/watchdog/ts4800_wdt.c index c137ad2bd5c3..0ea554c7cda5 100644 --- a/drivers/watchdog/ts4800_wdt.c +++ b/drivers/watchdog/ts4800_wdt.c @@ -125,13 +125,16 @@ static int ts4800_wdt_probe(struct platform_device *pdev) ret = of_property_read_u32_index(np, "syscon", 1, ®); if (ret < 0) { dev_err(dev, "no offset in syscon\n"); + of_node_put(syscon_np); return ret; } /* allocate memory for watchdog struct */ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL); - if (!wdt) + if (!wdt) { + of_node_put(syscon_np); return -ENOMEM; + } /* set regmap and offset to know where to write */ wdt->feed_offset = reg; diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c index 3065dd670a18..c60723f5ed99 100644 --- a/drivers/watchdog/wdat_wdt.c +++ b/drivers/watchdog/wdat_wdt.c @@ -462,6 +462,7 @@ static int wdat_wdt_probe(struct platform_device *pdev) return ret; watchdog_set_nowayout(&wdat->wdd, nowayout); + watchdog_stop_on_reboot(&wdat->wdd); return devm_watchdog_register_device(dev, &wdat->wdd); } diff --git a/drivers/xen/features.c b/drivers/xen/features.c index 25c053b09605..2c306de228db 100644 --- a/drivers/xen/features.c +++ b/drivers/xen/features.c @@ -29,6 +29,6 @@ void xen_setup_features(void) if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) break; for (j = 0; j < 32; j++) - xen_features[i * 32 + j] = !!(fi.submap & 1< #include #include +#include struct gntdev_dmabuf_priv; @@ -43,9 +44,10 @@ struct gntdev_unmap_notify { }; struct gntdev_grant_map { + atomic_t in_use; struct mmu_interval_notifier notifier; + bool notifier_init; struct list_head next; - struct vm_area_struct *vma; int index; int count; int flags; @@ -56,6 +58,7 @@ struct gntdev_grant_map { struct gnttab_unmap_grant_ref *unmap_ops; struct gnttab_map_grant_ref *kmap_ops; struct gnttab_unmap_grant_ref *kunmap_ops; + bool *being_removed; struct page **pages; unsigned long pages_vm_start; @@ -73,6 +76,11 @@ struct gntdev_grant_map { /* Needed to avoid allocation in gnttab_dma_free_pages(). */ xen_pfn_t *frames; #endif + + /* Number of live grants */ + atomic_t live_grants; + /* Needed to avoid allocation in __unmap_grant_pages */ + struct gntab_unmap_queue_data unmap_data; }; struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 54778aadf618..16acddaff9ae 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -60,10 +61,11 @@ module_param(limit, uint, 0644); MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by one mapping request"); +/* True in PV mode, false otherwise */ static int use_ptemod; -static int unmap_grant_pages(struct gntdev_grant_map *map, - int offset, int pages); +static void unmap_grant_pages(struct gntdev_grant_map *map, + int offset, int pages); static struct miscdevice gntdev_miscdev; @@ -120,6 +122,7 @@ static void gntdev_free_map(struct gntdev_grant_map *map) kvfree(map->unmap_ops); kvfree(map->kmap_ops); kvfree(map->kunmap_ops); + kvfree(map->being_removed); kfree(map); } @@ -140,12 +143,15 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, add->kunmap_ops = kvcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL); add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); + add->being_removed = + kvcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL); if (NULL == add->grants || NULL == add->map_ops || NULL == add->unmap_ops || NULL == add->kmap_ops || NULL == add->kunmap_ops || - NULL == add->pages) + NULL == add->pages || + NULL == add->being_removed) goto err; #ifdef CONFIG_XEN_GRANT_DMA_ALLOC @@ -240,9 +246,39 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map) if (!refcount_dec_and_test(&map->users)) return; - if (map->pages && !use_ptemod) + if (map->pages && !use_ptemod) { + /* + * Increment the reference count. This ensures that the + * subsequent call to unmap_grant_pages() will not wind up + * re-entering itself. It *can* wind up calling + * gntdev_put_map() recursively, but such calls will be with a + * reference count greater than 1, so they will return before + * this code is reached. The recursion depth is thus limited to + * 1. Do NOT use refcount_inc() here, as it will detect that + * the reference count is zero and WARN(). + */ + refcount_set(&map->users, 1); + + /* + * Unmap the grants. This may or may not be asynchronous, so it + * is possible that the reference count is 1 on return, but it + * could also be greater than 1. + */ unmap_grant_pages(map, 0, map->count); + /* Check if the memory now needs to be freed */ + if (!refcount_dec_and_test(&map->users)) + return; + + /* + * All pages have been returned to the hypervisor, so free the + * map. + */ + } + + if (use_ptemod && map->notifier_init) + mmu_interval_notifier_remove(&map->notifier); + if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { notify_remote_via_evtchn(map->notify.event); evtchn_put(map->notify.event); @@ -255,21 +291,14 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map) static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data) { struct gntdev_grant_map *map = data; - unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; - int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte; + unsigned int pgnr = (addr - map->pages_vm_start) >> PAGE_SHIFT; + int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte | + (1 << _GNTMAP_guest_avail0); u64 pte_maddr; BUG_ON(pgnr >= map->count); pte_maddr = arbitrary_virt_to_machine(pte).maddr; - /* - * Set the PTE as special to force get_user_pages_fast() fall - * back to the slow path. If this is not supported as part of - * the grant map, it will be done afterwards. - */ - if (xen_feature(XENFEAT_gnttab_map_avail_bits)) - flags |= (1 << _GNTMAP_guest_avail0); - gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags, map->grants[pgnr].ref, map->grants[pgnr].domid); @@ -278,16 +307,9 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data) return 0; } -#ifdef CONFIG_X86 -static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data) -{ - set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte)); - return 0; -} -#endif - int gntdev_map_grant_pages(struct gntdev_grant_map *map) { + size_t alloced = 0; int i, err = 0; if (!use_ptemod) { @@ -336,87 +358,130 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) map->pages, map->count); for (i = 0; i < map->count; i++) { - if (map->map_ops[i].status == GNTST_okay) + if (map->map_ops[i].status == GNTST_okay) { map->unmap_ops[i].handle = map->map_ops[i].handle; - else if (!err) + alloced++; + } else if (!err) err = -EINVAL; if (map->flags & GNTMAP_device_map) map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr; if (use_ptemod) { - if (map->kmap_ops[i].status == GNTST_okay) + if (map->kmap_ops[i].status == GNTST_okay) { + alloced++; map->kunmap_ops[i].handle = map->kmap_ops[i].handle; - else if (!err) + } else if (!err) err = -EINVAL; } } + atomic_add(alloced, &map->live_grants); return err; } -static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset, +static void __unmap_grant_pages_done(int result, + struct gntab_unmap_queue_data *data) +{ + unsigned int i; + struct gntdev_grant_map *map = data->data; + unsigned int offset = data->unmap_ops - map->unmap_ops; + int successful_unmaps = 0; + int live_grants; + + for (i = 0; i < data->count; i++) { + if (map->unmap_ops[offset + i].status == GNTST_okay && + map->unmap_ops[offset + i].handle != -1) + successful_unmaps++; + + WARN_ON(map->unmap_ops[offset+i].status && + map->unmap_ops[offset+i].handle != -1); + pr_debug("unmap handle=%d st=%d\n", + map->unmap_ops[offset+i].handle, + map->unmap_ops[offset+i].status); + map->unmap_ops[offset+i].handle = -1; + if (use_ptemod) { + if (map->kunmap_ops[offset + i].status == GNTST_okay && + map->kunmap_ops[offset + i].handle != -1) + successful_unmaps++; + + WARN_ON(map->kunmap_ops[offset+i].status && + map->kunmap_ops[offset+i].handle != -1); + pr_debug("kunmap handle=%u st=%d\n", + map->kunmap_ops[offset+i].handle, + map->kunmap_ops[offset+i].status); + map->kunmap_ops[offset+i].handle = -1; + } + } + + /* + * Decrease the live-grant counter. This must happen after the loop to + * prevent premature reuse of the grants by gnttab_mmap(). + */ + live_grants = atomic_sub_return(successful_unmaps, &map->live_grants); + if (WARN_ON(live_grants < 0)) + pr_err("%s: live_grants became negative (%d) after unmapping %d pages!\n", + __func__, live_grants, successful_unmaps); + + /* Release reference taken by __unmap_grant_pages */ + gntdev_put_map(NULL, map); +} + +static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset, int pages) { - int i, err = 0; - struct gntab_unmap_queue_data unmap_data; - if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { int pgno = (map->notify.addr >> PAGE_SHIFT); + if (pgno >= offset && pgno < offset + pages) { /* No need for kmap, pages are in lowmem */ uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); + tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; } } - unmap_data.unmap_ops = map->unmap_ops + offset; - unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; - unmap_data.pages = map->pages + offset; - unmap_data.count = pages; + map->unmap_data.unmap_ops = map->unmap_ops + offset; + map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; + map->unmap_data.pages = map->pages + offset; + map->unmap_data.count = pages; + map->unmap_data.done = __unmap_grant_pages_done; + map->unmap_data.data = map; + refcount_inc(&map->users); /* to keep map alive during async call below */ - err = gnttab_unmap_refs_sync(&unmap_data); - if (err) - return err; - - for (i = 0; i < pages; i++) { - if (map->unmap_ops[offset+i].status) - err = -EINVAL; - pr_debug("unmap handle=%d st=%d\n", - map->unmap_ops[offset+i].handle, - map->unmap_ops[offset+i].status); - map->unmap_ops[offset+i].handle = -1; - } - return err; + gnttab_unmap_refs_async(&map->unmap_data); } -static int unmap_grant_pages(struct gntdev_grant_map *map, int offset, - int pages) +static void unmap_grant_pages(struct gntdev_grant_map *map, int offset, + int pages) { - int range, err = 0; + int range; + + if (atomic_read(&map->live_grants) == 0) + return; /* Nothing to do */ pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages); /* It is possible the requested range will have a "hole" where we * already unmapped some of the grants. Only unmap valid ranges. */ - while (pages && !err) { - while (pages && map->unmap_ops[offset].handle == -1) { + while (pages) { + while (pages && map->being_removed[offset]) { offset++; pages--; } range = 0; while (range < pages) { - if (map->unmap_ops[offset+range].handle == -1) + if (map->being_removed[offset + range]) break; + map->being_removed[offset + range] = true; range++; } - err = __unmap_grant_pages(map, offset, range); + if (range) + __unmap_grant_pages(map, offset, range); offset += range; pages -= range; } - - return err; } /* ------------------------------------------------------------------ */ @@ -436,11 +501,7 @@ static void gntdev_vma_close(struct vm_area_struct *vma) struct gntdev_priv *priv = file->private_data; pr_debug("gntdev_vma_close %p\n", vma); - if (use_ptemod) { - WARN_ON(map->vma != vma); - mmu_interval_notifier_remove(&map->notifier); - map->vma = NULL; - } + vma->vm_private_data = NULL; gntdev_put_map(priv, map); } @@ -468,31 +529,30 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn, struct gntdev_grant_map *map = container_of(mn, struct gntdev_grant_map, notifier); unsigned long mstart, mend; - int err; + unsigned long map_start, map_end; if (!mmu_notifier_range_blockable(range)) return false; + map_start = map->pages_vm_start; + map_end = map->pages_vm_start + (map->count << PAGE_SHIFT); + /* * If the VMA is split or otherwise changed the notifier is not * updated, but we don't want to process VA's outside the modified * VMA. FIXME: It would be much more understandable to just prevent * modifying the VMA in the first place. */ - if (map->vma->vm_start >= range->end || - map->vma->vm_end <= range->start) + if (map_start >= range->end || map_end <= range->start) return true; - mstart = max(range->start, map->vma->vm_start); - mend = min(range->end, map->vma->vm_end); + mstart = max(range->start, map_start); + mend = min(range->end, map_end); pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", - map->index, map->count, - map->vma->vm_start, map->vma->vm_end, - range->start, range->end, mstart, mend); - err = unmap_grant_pages(map, - (mstart - map->vma->vm_start) >> PAGE_SHIFT, - (mend - mstart) >> PAGE_SHIFT); - WARN_ON(err); + map->index, map->count, map_start, map_end, + range->start, range->end, mstart, mend); + unmap_grant_pages(map, (mstart - map_start) >> PAGE_SHIFT, + (mend - mstart) >> PAGE_SHIFT); return true; } @@ -972,14 +1032,15 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) return -EINVAL; pr_debug("map %d+%d at %lx (pgoff %lx)\n", - index, count, vma->vm_start, vma->vm_pgoff); + index, count, vma->vm_start, vma->vm_pgoff); mutex_lock(&priv->lock); map = gntdev_find_map_index(priv, index, count); if (!map) goto unlock_out; - if (use_ptemod && map->vma) + if (!atomic_add_unless(&map->in_use, 1, 1)) goto unlock_out; + refcount_inc(&map->users); vma->vm_ops = &gntdev_vmops; @@ -1000,15 +1061,16 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) map->flags |= GNTMAP_readonly; } + map->pages_vm_start = vma->vm_start; + if (use_ptemod) { - map->vma = vma; err = mmu_interval_notifier_insert_locked( &map->notifier, vma->vm_mm, vma->vm_start, vma->vm_end - vma->vm_start, &gntdev_mmu_ops); - if (err) { - map->vma = NULL; + if (err) goto out_unlock_put; - } + + map->notifier_init = true; } mutex_unlock(&priv->lock); @@ -1025,7 +1087,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) */ mmu_interval_read_begin(&map->notifier); - map->pages_vm_start = vma->vm_start; err = apply_to_page_range(vma->vm_mm, vma->vm_start, vma->vm_end - vma->vm_start, find_grant_ptes, map); @@ -1043,23 +1104,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) err = vm_map_pages_zero(vma, map->pages, map->count); if (err) goto out_put_map; - } else { -#ifdef CONFIG_X86 - /* - * If the PTEs were not made special by the grant map - * hypercall, do so here. - * - * This is racy since the mapping is already visible - * to userspace but userspace should be well-behaved - * enough to not touch it until the mmap() call - * returns. - */ - if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) { - apply_to_page_range(vma->vm_mm, vma->vm_start, - vma->vm_end - vma->vm_start, - set_grant_ptes_as_special, NULL); - } -#endif } return 0; @@ -1071,13 +1115,8 @@ unlock_out: out_unlock_put: mutex_unlock(&priv->lock); out_put_map: - if (use_ptemod) { + if (use_ptemod) unmap_grant_pages(map, 0, map->count); - if (map->vma) { - mmu_interval_notifier_remove(&map->notifier); - map->vma = NULL; - } - } gntdev_put_map(priv, map); return err; } diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 5c83d41766c8..0a2d24d6ac6f 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -981,6 +981,9 @@ int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args) size_t size; int i, ret; + if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT)) + return -ENOMEM; + size = args->nr_pages << PAGE_SHIFT; if (args->coherent) args->vaddr = dma_alloc_coherent(args->dev, size, diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c index cdc6daa7a9f6..9cf7085a260b 100644 --- a/drivers/xen/pcpu.c +++ b/drivers/xen/pcpu.c @@ -228,7 +228,7 @@ static int register_pcpu(struct pcpu *pcpu) err = device_register(dev); if (err) { - pcpu_release(dev); + put_device(dev); return err; } diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c index 9db557b76511..804d8f4d0e73 100644 --- a/drivers/xen/platform-pci.c +++ b/drivers/xen/platform-pci.c @@ -137,7 +137,7 @@ static int platform_pci_probe(struct pci_dev *pdev, if (ret) { dev_warn(&pdev->dev, "Unable to set the evtchn callback " "err=%d\n", ret); - goto out; + goto irq_out; } } @@ -145,13 +145,16 @@ static int platform_pci_probe(struct pci_dev *pdev, grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); ret = gnttab_setup_auto_xlat_frames(grant_frames); if (ret) - goto out; + goto irq_out; ret = gnttab_init(); if (ret) goto grant_out; return 0; grant_out: gnttab_free_auto_xlat_frames(); +irq_out: + if (!xen_have_vector_callback) + free_irq(pdev->irq, pdev); out: pci_release_region(pdev, 0); mem_out: diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index fe8df32bb612..cd5f2f09468e 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -581,27 +581,30 @@ static int lock_pages( struct privcmd_dm_op_buf kbufs[], unsigned int num, struct page *pages[], unsigned int nr_pages, unsigned int *pinned) { - unsigned int i; + unsigned int i, off = 0; - for (i = 0; i < num; i++) { + for (i = 0; i < num; ) { unsigned int requested; int page_count; requested = DIV_ROUND_UP( offset_in_page(kbufs[i].uptr) + kbufs[i].size, - PAGE_SIZE); + PAGE_SIZE) - off; if (requested > nr_pages) return -ENOSPC; page_count = pin_user_pages_fast( - (unsigned long) kbufs[i].uptr, + (unsigned long)kbufs[i].uptr + off * PAGE_SIZE, requested, FOLL_WRITE, pages); - if (page_count < 0) - return page_count; + if (page_count <= 0) + return page_count ? : -EFAULT; *pinned += page_count; nr_pages -= page_count; pages += page_count; + + off = (requested == page_count) ? 0 : off + page_count; + i += !off; } return 0; @@ -677,10 +680,8 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata) } rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned); - if (rc < 0) { - nr_pages = pinned; + if (rc < 0) goto out; - } for (i = 0; i < kdata.num; i++) { set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr); @@ -692,7 +693,7 @@ static long privcmd_ioctl_dm_op(struct file *file, void __user *udata) xen_preemptible_hcall_end(); out: - unlock_pages(pages, nr_pages); + unlock_pages(pages, pinned); kfree(xbufs); kfree(pages); kfree(kbufs); diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c index 5e53b4817f16..097316a74126 100644 --- a/drivers/xen/xen-pciback/conf_space_capability.c +++ b/drivers/xen/xen-pciback/conf_space_capability.c @@ -190,13 +190,16 @@ static const struct config_field caplist_pm[] = { }; static struct msi_msix_field_config { - u16 enable_bit; /* bit for enabling MSI/MSI-X */ - unsigned int int_type; /* interrupt type for exclusiveness check */ + u16 enable_bit; /* bit for enabling MSI/MSI-X */ + u16 allowed_bits; /* bits allowed to be changed */ + unsigned int int_type; /* interrupt type for exclusiveness check */ } msi_field_config = { .enable_bit = PCI_MSI_FLAGS_ENABLE, + .allowed_bits = PCI_MSI_FLAGS_ENABLE, .int_type = INTERRUPT_TYPE_MSI, }, msix_field_config = { .enable_bit = PCI_MSIX_FLAGS_ENABLE, + .allowed_bits = PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL, .int_type = INTERRUPT_TYPE_MSIX, }; @@ -229,7 +232,7 @@ static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value, return 0; if (!dev_data->allow_interrupt_control || - (new_value ^ old_value) & ~field_config->enable_bit) + (new_value ^ old_value) & ~field_config->allowed_bits) return PCIBIOS_SET_FAILED; if (new_value & field_config->enable_bit) { diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 597af455a522..0792fda49a15 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -128,7 +128,7 @@ static ssize_t xenbus_file_read(struct file *filp, { struct xenbus_file_priv *u = filp->private_data; struct read_buffer *rb; - unsigned i; + ssize_t i; int ret; mutex_lock(&u->reply_mutex); @@ -148,7 +148,7 @@ again: rb = list_entry(u->read_buffers.next, struct read_buffer, list); i = 0; while (i < len) { - unsigned sz = min((unsigned)len - i, rb->len - rb->cons); + size_t sz = min_t(size_t, len - i, rb->len - rb->cons); ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz); diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c index 34742c6e189e..f17c4c03db30 100644 --- a/drivers/xen/xlate_mmu.c +++ b/drivers/xen/xlate_mmu.c @@ -261,7 +261,6 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, return 0; } -EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages); struct remap_pfn { struct mm_struct *mm; diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index a13ef836fe4e..84f3a6405b55 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -657,14 +657,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode, if (stat->st_result_mask & P9_STATS_NLINK) set_nlink(inode, stat->st_nlink); if (stat->st_result_mask & P9_STATS_MODE) { - inode->i_mode = stat->st_mode; - if ((S_ISBLK(inode->i_mode)) || - (S_ISCHR(inode->i_mode))) - init_special_inode(inode, inode->i_mode, - inode->i_rdev); + mode = stat->st_mode & S_IALLUGO; + mode |= inode->i_mode & ~S_IALLUGO; + inode->i_mode = mode; } - if (stat->st_result_mask & P9_STATS_RDEV) - inode->i_rdev = new_decode_dev(stat->st_rdev); if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) && stat->st_result_mask & P9_STATS_SIZE) v9fs_i_size_write(inode, stat->st_size); diff --git a/fs/Makefile b/fs/Makefile index 359c63fefa1b..c7851875b668 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -34,8 +34,6 @@ obj-$(CONFIG_TIMERFD) += timerfd.o obj-$(CONFIG_EVENTFD) += eventfd.o obj-$(CONFIG_USERFAULTFD) += userfaultfd.o obj-$(CONFIG_AIO) += aio.o -obj-$(CONFIG_IO_URING) += io_uring.o -obj-$(CONFIG_IO_WQ) += io-wq.o obj-$(CONFIG_FS_DAX) += dax.o obj-$(CONFIG_FS_ENCRYPTION) += crypto/ obj-$(CONFIG_FS_VERITY) += verity/ diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 262c0ae505af..159795059547 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -412,8 +412,11 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode, } /* skip if starts before the current position */ - if (offset < curr) + if (offset < curr) { + if (next > curr) + ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent); continue; + } /* found the next entry */ if (!dir_emit(ctx, dire->u.name, nlen, diff --git a/fs/afs/flock.c b/fs/afs/flock.c index cb3054c7843e..466ad609f205 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -76,7 +76,7 @@ void afs_lock_op_done(struct afs_call *call) if (call->error == 0) { spin_lock(&vnode->lock); trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0); - vnode->locked_at = call->reply_time; + vnode->locked_at = call->issue_time; afs_schedule_lock_extension(vnode); spin_unlock(&vnode->lock); } diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c index e7e98ad63a91..04d42e49fc59 100644 --- a/fs/afs/fs_probe.c +++ b/fs/afs/fs_probe.c @@ -161,8 +161,8 @@ responded: } } - if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) && - rtt_us < server->probe.rtt) { + rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us); + if (rtt_us < server->probe.rtt) { server->probe.rtt = rtt_us; server->rtt = rtt_us; alist->preferred = index; diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 1d95ed9dd86e..0048a32cb040 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -130,7 +130,7 @@ bad: static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry) { - return ktime_divns(call->reply_time, NSEC_PER_SEC) + expiry; + return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry; } static void xdr_decode_AFSCallBack(const __be32 **_bp, diff --git a/fs/afs/inode.c b/fs/afs/inode.c index f81a972bdd29..826fae22a8cc 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -729,10 +729,23 @@ int afs_getattr(const struct path *path, struct kstat *stat, { struct inode *inode = d_inode(path->dentry); struct afs_vnode *vnode = AFS_FS_I(inode); - int seq = 0; + struct key *key; + int ret, seq = 0; _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation); + if (vnode->volume && + !(query_flags & AT_STATX_DONT_SYNC) && + !test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { + key = afs_request_key(vnode->volume->cell); + if (IS_ERR(key)) + return PTR_ERR(key); + ret = afs_validate(vnode, key); + key_put(key); + if (ret < 0) + return ret; + } + do { read_seqbegin_or_lock(&vnode->cb_lock, &seq); generic_fillattr(inode, stat); diff --git a/fs/afs/internal.h b/fs/afs/internal.h index dc08a3d9b3a8..637cbe549397 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -135,7 +135,6 @@ struct afs_call { bool need_attention; /* T if RxRPC poked us */ bool async; /* T if asynchronous */ bool upgrade; /* T to request service upgrade */ - bool have_reply_time; /* T if have got reply_time */ bool intr; /* T if interruptible */ bool unmarshalling_error; /* T if an unmarshalling error occurred */ u16 service_id; /* Actual service ID (after upgrade) */ @@ -149,7 +148,7 @@ struct afs_call { } __attribute__((packed)); __be64 tmp64; }; - ktime_t reply_time; /* Time of first reply packet */ + ktime_t issue_time; /* Time of issue of operation */ }; struct afs_call_type { diff --git a/fs/afs/misc.c b/fs/afs/misc.c index 1d1a8debe472..f1dc2162900a 100644 --- a/fs/afs/misc.c +++ b/fs/afs/misc.c @@ -69,6 +69,7 @@ int afs_abort_to_error(u32 abort_code) /* Unified AFS error table */ case UAEPERM: return -EPERM; case UAENOENT: return -ENOENT; + case UAEAGAIN: return -EAGAIN; case UAEACCES: return -EACCES; case UAEBUSY: return -EBUSY; case UAEEXIST: return -EEXIST; diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 8be709cb8542..535d28b44bca 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -429,6 +429,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) if (call->max_lifespan) rxrpc_kernel_set_max_life(call->net->socket, rxcall, call->max_lifespan); + call->issue_time = ktime_get_real(); /* send the request */ iov[0].iov_base = call->request; @@ -533,12 +534,6 @@ static void afs_deliver_to_call(struct afs_call *call) return; } - if (!call->have_reply_time && - rxrpc_kernel_get_reply_time(call->net->socket, - call->rxcall, - &call->reply_time)) - call->have_reply_time = true; - ret = call->type->deliver(call); state = READ_ONCE(call->state); if (ret == 0 && call->unmarshalling_error) @@ -572,6 +567,8 @@ static void afs_deliver_to_call(struct afs_call *call) case -ENODATA: case -EBADMSG: case -EMSGSIZE: + case -ENOMEM: + case -EFAULT: abort_code = RXGEN_CC_UNMARSHAL; if (state != AFS_CALL_CL_AWAIT_REPLY) abort_code = RXGEN_SS_UNMARSHAL; @@ -579,7 +576,7 @@ static void afs_deliver_to_call(struct afs_call *call) abort_code, ret, "KUM"); goto local_abort; default: - abort_code = RX_USER_ABORT; + abort_code = RX_CALL_DEAD; rxrpc_kernel_abort_call(call->net->socket, call->rxcall, abort_code, ret, "KER"); goto local_abort; @@ -871,7 +868,7 @@ void afs_send_empty_reply(struct afs_call *call) case -ENOMEM: _debug("oom"); rxrpc_kernel_abort_call(net->socket, call->rxcall, - RX_USER_ABORT, -ENOMEM, "KOO"); + RXGEN_SS_MARSHAL, -ENOMEM, "KOO"); fallthrough; default: _leave(" [error]"); @@ -913,7 +910,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) if (n == -ENOMEM) { _debug("oom"); rxrpc_kernel_abort_call(net->socket, call->rxcall, - RX_USER_ABORT, -ENOMEM, "KOO"); + RXGEN_SS_MARSHAL, -ENOMEM, "KOO"); } _leave(" [error]"); } diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index bd787e71a657..5b2ef5ffd716 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -239,8 +239,7 @@ static void xdr_decode_YFSCallBack(const __be32 **_bp, struct afs_callback *cb = &scb->callback; ktime_t cb_expiry; - cb_expiry = call->reply_time; - cb_expiry = ktime_add(cb_expiry, xdr_to_u64(x->expiration_time) * 100); + cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) * 100); cb->expires_at = ktime_divns(cb_expiry, NSEC_PER_SEC); scb->have_cb = true; *_bp += xdr_size(x); diff --git a/fs/attr.c b/fs/attr.c index d8c1f796d0e9..22aa50c96ded 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -134,6 +134,8 @@ EXPORT_SYMBOL_NS(setattr_prepare, ANDROID_GKI_VFS_EXPORT_ONLY); */ int inode_newsize_ok(const struct inode *inode, loff_t offset) { + if (offset < 0) + return -EINVAL; if (inode->i_size < offset) { unsigned long limit; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 7d6f2f791fe9..dfb6409f7a81 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -907,7 +907,7 @@ static int load_elf_binary(struct linux_binprm *bprm) interp_elf_ex = kmalloc(sizeof(*interp_elf_ex), GFP_KERNEL); if (!interp_elf_ex) { retval = -ENOMEM; - goto out_free_ph; + goto out_free_file; } /* Get the exec headers */ @@ -1328,6 +1328,7 @@ out: out_free_dentry: kfree(interp_elf_ex); kfree(interp_elf_phdata); +out_free_file: allow_write_access(interpreter); if (interpreter) fput(interpreter); diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index b9c658e0548e..69f4db05191a 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -427,6 +427,30 @@ static void old_reloc(unsigned long rl) /****************************************************************************/ +static inline u32 __user *skip_got_header(u32 __user *rp) +{ + if (IS_ENABLED(CONFIG_RISCV)) { + /* + * RISC-V has a 16 byte GOT PLT header for elf64-riscv + * and 8 byte GOT PLT header for elf32-riscv. + * Skip the whole GOT PLT header, since it is reserved + * for the dynamic linker (ld.so). + */ + u32 rp_val0, rp_val1; + + if (get_user(rp_val0, rp)) + return rp; + if (get_user(rp_val1, rp + 1)) + return rp; + + if (rp_val0 == 0xffffffff && rp_val1 == 0xffffffff) + rp += 4; + else if (rp_val0 == 0xffffffff) + rp += 2; + } + return rp; +} + static int load_flat_file(struct linux_binprm *bprm, struct lib_info *libinfo, int id, unsigned long *extra_stack) { @@ -774,7 +798,8 @@ static int load_flat_file(struct linux_binprm *bprm, * image. */ if (flags & FLAT_FLAG_GOTPIC) { - for (rp = (u32 __user *)datapos; ; rp++) { + rp = skip_got_header((u32 __user *) datapos); + for (; ; rp++) { u32 addr, rp_val; if (get_user(rp_val, rp)) return -EFAULT; diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index baff31a147e7..7208ba22e734 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -137,6 +137,7 @@ struct share_check { u64 root_objectid; u64 inum; int share_count; + bool have_delayed_delete_refs; }; static inline int extent_is_shared(struct share_check *sc) @@ -287,8 +288,10 @@ static void prelim_release(struct preftree *preftree) struct prelim_ref *ref, *next_ref; rbtree_postorder_for_each_entry_safe(ref, next_ref, - &preftree->root.rb_root, rbnode) + &preftree->root.rb_root, rbnode) { + free_inode_elem_list(ref->inode_list); free_pref(ref); + } preftree->root = RB_ROOT_CACHED; preftree->count = 0; @@ -646,6 +649,18 @@ unode_aux_to_inode_list(struct ulist_node *node) return (struct extent_inode_elem *)(uintptr_t)node->aux; } +static void free_leaf_list(struct ulist *ulist) +{ + struct ulist_node *node; + struct ulist_iterator uiter; + + ULIST_ITER_INIT(&uiter); + while ((node = ulist_next(ulist, &uiter))) + free_inode_elem_list(unode_aux_to_inode_list(node)); + + ulist_free(ulist); +} + /* * We maintain three separate rbtrees: one for direct refs, one for * indirect refs which have a key, and one for indirect refs which do not @@ -760,7 +775,11 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info, cond_resched(); } out: - ulist_free(parents); + /* + * We may have inode lists attached to refs in the parents ulist, so we + * must free them before freeing the ulist and its refs. + */ + free_leaf_list(parents); return ret; } @@ -817,16 +836,11 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, struct preftrees *preftrees, struct share_check *sc) { struct btrfs_delayed_ref_node *node; - struct btrfs_delayed_extent_op *extent_op = head->extent_op; struct btrfs_key key; - struct btrfs_key tmp_op_key; struct rb_node *n; int count; int ret = 0; - if (extent_op && extent_op->update_key) - btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key); - spin_lock(&head->lock); for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { node = rb_entry(n, struct btrfs_delayed_ref_node, @@ -852,10 +866,16 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, case BTRFS_TREE_BLOCK_REF_KEY: { /* NORMAL INDIRECT METADATA backref */ struct btrfs_delayed_tree_ref *ref; + struct btrfs_key *key_ptr = NULL; + + if (head->extent_op && head->extent_op->update_key) { + btrfs_disk_key_to_cpu(&key, &head->extent_op->key); + key_ptr = &key; + } ref = btrfs_delayed_node_to_tree_ref(node); ret = add_indirect_ref(fs_info, preftrees, ref->root, - &tmp_op_key, ref->level + 1, + key_ptr, ref->level + 1, node->bytenr, count, sc, GFP_ATOMIC); break; @@ -881,13 +901,22 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, key.offset = ref->offset; /* - * Found a inum that doesn't match our known inum, we - * know it's shared. + * If we have a share check context and a reference for + * another inode, we can't exit immediately. This is + * because even if this is a BTRFS_ADD_DELAYED_REF + * reference we may find next a BTRFS_DROP_DELAYED_REF + * which cancels out this ADD reference. + * + * If this is a DROP reference and there was no previous + * ADD reference, then we need to signal that when we + * process references from the extent tree (through + * add_inline_refs() and add_keyed_refs()), we should + * not exit early if we find a reference for another + * inode, because one of the delayed DROP references + * may cancel that reference in the extent tree. */ - if (sc && sc->inum && ref->objectid != sc->inum) { - ret = BACKREF_FOUND_SHARED; - goto out; - } + if (sc && count < 0) + sc->have_delayed_delete_refs = true; ret = add_indirect_ref(fs_info, preftrees, ref->root, &key, 0, node->bytenr, count, sc, @@ -917,7 +946,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, } if (!ret) ret = extent_is_shared(sc); -out: + spin_unlock(&head->lock); return ret; } @@ -1020,7 +1049,8 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info, key.type = BTRFS_EXTENT_DATA_KEY; key.offset = btrfs_extent_data_ref_offset(leaf, dref); - if (sc && sc->inum && key.objectid != sc->inum) { + if (sc && sc->inum && key.objectid != sc->inum && + !sc->have_delayed_delete_refs) { ret = BACKREF_FOUND_SHARED; break; } @@ -1030,6 +1060,7 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info, ret = add_indirect_ref(fs_info, preftrees, root, &key, 0, bytenr, count, sc, GFP_NOFS); + break; } default: @@ -1119,7 +1150,8 @@ static int add_keyed_refs(struct btrfs_fs_info *fs_info, key.type = BTRFS_EXTENT_DATA_KEY; key.offset = btrfs_extent_data_ref_offset(leaf, dref); - if (sc && sc->inum && key.objectid != sc->inum) { + if (sc && sc->inum && key.objectid != sc->inum && + !sc->have_delayed_delete_refs) { ret = BACKREF_FOUND_SHARED; break; } @@ -1358,6 +1390,12 @@ again: if (ret < 0) goto out; ref->inode_list = eie; + /* + * We transferred the list ownership to the ref, + * so set to NULL to avoid a double free in case + * an error happens after this. + */ + eie = NULL; } ret = ulist_add_merge_ptr(refs, ref->parent, ref->inode_list, @@ -1383,6 +1421,14 @@ again: eie->next = ref->inode_list; } eie = NULL; + /* + * We have transferred the inode list ownership from + * this ref to the ref we added to the 'refs' ulist. + * So set this ref's inode list to NULL to avoid + * use-after-free when our caller uses it or double + * frees in case an error happens before we return. + */ + ref->inode_list = NULL; } cond_resched(); } @@ -1399,24 +1445,6 @@ out: return ret; } -static void free_leaf_list(struct ulist *blocks) -{ - struct ulist_node *node = NULL; - struct extent_inode_elem *eie; - struct ulist_iterator uiter; - - ULIST_ITER_INIT(&uiter); - while ((node = ulist_next(blocks, &uiter))) { - if (!node->aux) - continue; - eie = unode_aux_to_inode_list(node); - free_inode_elem_list(eie); - node->aux = 0; - } - - ulist_free(blocks); -} - /* * Finds all leafs with a reference to the specified combination of bytenr and * offset. key_list_head will point to a list of corresponding keys (caller must @@ -1542,6 +1570,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr, .root_objectid = root->root_key.objectid, .inum = inum, .share_count = 0, + .have_delayed_delete_refs = false, }; ulist_init(roots); @@ -1576,6 +1605,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr, break; bytenr = node->val; shared.share_count = 0; + shared.have_delayed_delete_refs = false; cond_resched(); } @@ -2030,10 +2060,29 @@ out: return ret; } +static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx) +{ + struct btrfs_data_container *inodes = ctx; + const size_t c = 3 * sizeof(u64); + + if (inodes->bytes_left >= c) { + inodes->bytes_left -= c; + inodes->val[inodes->elem_cnt] = inum; + inodes->val[inodes->elem_cnt + 1] = offset; + inodes->val[inodes->elem_cnt + 2] = root; + inodes->elem_cnt += 3; + } else { + inodes->bytes_missing += c - inodes->bytes_left; + inodes->bytes_left = 0; + inodes->elem_missed += 3; + } + + return 0; +} + int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, struct btrfs_path *path, - iterate_extent_inodes_t *iterate, void *ctx, - bool ignore_offset) + void *ctx, bool ignore_offset) { int ret; u64 extent_item_pos; @@ -2051,7 +2100,7 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, extent_item_pos = logical - found_key.objectid; ret = iterate_extent_inodes(fs_info, found_key.objectid, extent_item_pos, search_commit_root, - iterate, ctx, ignore_offset); + build_ino_list, ctx, ignore_offset); return ret; } diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index 17abde7f794c..6ed18b807b64 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h @@ -35,8 +35,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info, bool ignore_offset); int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, - struct btrfs_path *path, - iterate_extent_inodes_t *iterate, void *ctx, + struct btrfs_path *path, void *ctx, bool ignore_offset); int paths_from_inode(u64 inum, struct inode_fs_paths *ipath); diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index c99e293b50f5..889a598b17f6 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -2570,7 +2570,6 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) struct btrfs_path *path = NULL; LIST_HEAD(dirty); struct list_head *io = &cur_trans->io_bgs; - int num_started = 0; int loops = 0; spin_lock(&cur_trans->dirty_bgs_lock); @@ -2636,7 +2635,6 @@ again: cache->io_ctl.inode = NULL; ret = btrfs_write_out_cache(trans, cache, path); if (ret == 0 && cache->io_ctl.inode) { - num_started++; should_put = 0; /* @@ -2737,7 +2735,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) int should_put; struct btrfs_path *path; struct list_head *io = &cur_trans->io_bgs; - int num_started = 0; path = btrfs_alloc_path(); if (!path) @@ -2795,7 +2792,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) cache->io_ctl.inode = NULL; ret = btrfs_write_out_cache(trans, cache, path); if (ret == 0 && cache->io_ctl.inode) { - num_started++; should_put = 0; list_add_tail(&cache->io_list, io); } else { @@ -3130,6 +3126,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, * attempt. */ wait_for_alloc = true; + force = CHUNK_ALLOC_NO_FORCE; spin_unlock(&space_info->lock); mutex_lock(&fs_info->chunk_mutex); mutex_unlock(&fs_info->chunk_mutex); diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index d29780463182..be6935d19197 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -161,7 +161,7 @@ no_valid_dev_replace_entry_found: if (btrfs_find_device(fs_info->fs_devices, BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) { btrfs_err(fs_info, - "replace devid present without an active replace item"); +"replace without active item, run 'device scan --forget' on the target device"); ret = -EUCLEAN; } else { dev_replace->srcdev = NULL; @@ -954,8 +954,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info) up_write(&dev_replace->rwsem); /* Scrub for replace must not be running in suspended state */ - ret = btrfs_scrub_cancel(fs_info); - ASSERT(ret != -ENOTCONN); + btrfs_scrub_cancel(fs_info); trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a5bcad027883..f2abd8bfd4a0 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1596,9 +1596,10 @@ again: ret = btrfs_insert_fs_root(fs_info, root); if (ret) { - btrfs_put_root(root); - if (ret == -EEXIST) + if (ret == -EEXIST) { + btrfs_put_root(root); goto again; + } goto fail; } return root; @@ -3060,7 +3061,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ~BTRFS_FEATURE_INCOMPAT_SUPP; if (features) { btrfs_err(fs_info, - "cannot mount because of unsupported optional features (%llx)", + "cannot mount because of unsupported optional features (0x%llx)", features); err = -EINVAL; goto fail_alloc; @@ -3098,11 +3099,25 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ~BTRFS_FEATURE_COMPAT_RO_SUPP; if (!sb_rdonly(sb) && features) { btrfs_err(fs_info, - "cannot mount read-write because of unsupported optional features (%llx)", + "cannot mount read-write because of unsupported optional features (0x%llx)", features); err = -EINVAL; goto fail_alloc; } + /* + * We have unsupported RO compat features, although RO mounted, we + * should not cause any metadata write, including log replay. + * Or we could screw up whatever the new feature requires. + */ + if (unlikely(features && btrfs_super_log_root(disk_super) && + !btrfs_test_opt(fs_info, NOLOGREPLAY))) { + btrfs_err(fs_info, +"cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay", + features); + err = -EINVAL; + goto fail_alloc; + } + ret = btrfs_init_workqueues(fs_info, fs_devices); if (ret) { @@ -4090,6 +4105,31 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) /* clear out the rbtree of defraggable inodes */ btrfs_cleanup_defrag_inodes(fs_info); + /* + * After we parked the cleaner kthread, ordered extents may have + * completed and created new delayed iputs. If one of the async reclaim + * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we + * can hang forever trying to stop it, because if a delayed iput is + * added after it ran btrfs_run_delayed_iputs() and before it called + * btrfs_wait_on_delayed_iputs(), it will hang forever since there is + * no one else to run iputs. + * + * So wait for all ongoing ordered extents to complete and then run + * delayed iputs. This works because once we reach this point no one + * can either create new ordered extents nor create delayed iputs + * through some other means. + * + * Also note that btrfs_wait_ordered_roots() is not safe here, because + * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent, + * but the delayed iput for the respective inode is made only when doing + * the final btrfs_put_ordered_extent() (which must happen at + * btrfs_finish_ordered_io() when we are unmounting). + */ + btrfs_flush_workqueue(fs_info->endio_write_workers); + /* Ordered extents for free space inodes. */ + btrfs_flush_workqueue(fs_info->endio_freespace_worker); + btrfs_run_delayed_iputs(fs_info); + cancel_work_sync(&fs_info->async_reclaim_work); cancel_work_sync(&fs_info->async_data_reclaim_work); diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 1a8d419d9e1f..bfa2bf44529c 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -58,7 +58,7 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len, } struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, - u64 root_objectid, u32 generation, + u64 root_objectid, u64 generation, int check_generation) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h index f32f4113c976..5afb7ca42828 100644 --- a/fs/btrfs/export.h +++ b/fs/btrfs/export.h @@ -19,7 +19,7 @@ struct btrfs_fid { } __attribute__ ((packed)); struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, - u64 root_objectid, u32 generation, + u64 root_objectid, u64 generation, int check_generation); struct dentry *btrfs_get_parent(struct dentry *child); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index f39d02e7f7ef..16f44bc481ab 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -121,7 +121,7 @@ struct extent_buffer { */ struct extent_changeset { /* How many bytes are set/cleared in this operation */ - unsigned int bytes_changed; + u64 bytes_changed; /* Changed ranges */ struct ulist range_changed; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f59ec55e5feb..416a1b753ff6 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2833,8 +2833,9 @@ out: return ret; } -static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) +static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len) { + struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; struct extent_state *cached_state = NULL; @@ -2866,6 +2867,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) goto out_only_mutex; } + ret = file_modified(file); + if (ret) + goto out_only_mutex; + lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode))); lockend = round_down(offset + len, btrfs_inode_sectorsize(BTRFS_I(inode))) - 1; @@ -3301,7 +3306,7 @@ static long btrfs_fallocate(struct file *file, int mode, return -EOPNOTSUPP; if (mode & FALLOC_FL_PUNCH_HOLE) - return btrfs_punch_hole(inode, offset, len); + return btrfs_punch_hole(file, offset, len); /* * Only trigger disk allocation, don't trigger qgroup reserve @@ -3323,6 +3328,10 @@ static long btrfs_fallocate(struct file *file, int mode, goto out; } + ret = file_modified(file); + if (ret) + goto out; + /* * TODO: Move these two operations after we have checked * accurate reserved space, or fallocate can still fail but diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 1d9262a35473..779b7745cdc4 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -995,7 +995,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode, int ret = 0; if (btrfs_is_free_space_inode(inode)) { - WARN_ON_ONCE(1); ret = -EINVAL; goto out_unlock; } @@ -4023,6 +4022,13 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry) dest->root_key.objectid); return -EPERM; } + if (atomic_read(&dest->nr_swapfiles)) { + spin_unlock(&dest->root_item_lock); + btrfs_warn(fs_info, + "attempt to delete subvolume %llu with active swapfile", + root->root_key.objectid); + return -EPERM; + } root_flags = btrfs_root_flags(&dest->root_item); btrfs_set_root_flags(&dest->root_item, root_flags | BTRFS_ROOT_SUBVOL_DEAD); @@ -7474,7 +7480,19 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || em->block_start == EXTENT_MAP_INLINE) { free_extent_map(em); - ret = -ENOTBLK; + /* + * If we are in a NOWAIT context, return -EAGAIN in order to + * fallback to buffered IO. This is not only because we can + * block with buffered IO (no support for NOWAIT semantics at + * the moment) but also to avoid returning short reads to user + * space - this happens if we were able to read some data from + * previous non-compressed extents and then when we fallback to + * buffered IO, at btrfs_file_read_iter() by calling + * filemap_read(), we fail to fault in pages for the read buffer, + * in which case filemap_read() returns a short read (the number + * of bytes previously read is > 0, so it does not return -EFAULT). + */ + ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK; goto unlock_err; } @@ -10215,8 +10233,23 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, * set. We use this counter to prevent snapshots. We must increment it * before walking the extents because we don't want a concurrent * snapshot to run after we've already checked the extents. + * + * It is possible that subvolume is marked for deletion but still not + * removed yet. To prevent this race, we check the root status before + * activating the swapfile. */ + spin_lock(&root->root_item_lock); + if (btrfs_root_dead(root)) { + spin_unlock(&root->root_item_lock); + + btrfs_exclop_finish(fs_info); + btrfs_warn(fs_info, + "cannot activate swapfile because subvolume %llu is being deleted", + root->root_key.objectid); + return -EPERM; + } atomic_inc(&root->nr_swapfiles); + spin_unlock(&root->root_item_lock); isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index b5e9bfe884c4..a17076a05c4d 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2811,6 +2811,8 @@ static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp) } } + btrfs_free_path(path); + path = NULL; if (copy_to_user(argp, subvol_info, sizeof(*subvol_info))) ret = -EFAULT; @@ -2903,6 +2905,8 @@ static int btrfs_ioctl_get_subvol_rootref(struct file *file, void __user *argp) } out: + btrfs_free_path(path); + if (!ret || ret == -EOVERFLOW) { rootrefs->num_items = found; /* update min_treeid for next search */ @@ -2914,7 +2918,6 @@ out: } kfree(rootrefs); - btrfs_free_path(path); return ret; } @@ -3878,6 +3881,8 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg) ipath->fspath->val[i] = rel_ptr; } + btrfs_free_path(path); + path = NULL; ret = copy_to_user((void __user *)(unsigned long)ipa->fspath, ipath->fspath, size); if (ret) { @@ -3893,26 +3898,6 @@ out: return ret; } -static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx) -{ - struct btrfs_data_container *inodes = ctx; - const size_t c = 3 * sizeof(u64); - - if (inodes->bytes_left >= c) { - inodes->bytes_left -= c; - inodes->val[inodes->elem_cnt] = inum; - inodes->val[inodes->elem_cnt + 1] = offset; - inodes->val[inodes->elem_cnt + 2] = root; - inodes->elem_cnt += 3; - } else { - inodes->bytes_missing += c - inodes->bytes_left; - inodes->bytes_left = 0; - inodes->elem_missed += 3; - } - - return 0; -} - static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, void __user *arg, int version) { @@ -3948,21 +3933,20 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, size = min_t(u32, loi->size, SZ_16M); } + inodes = init_data_container(size); + if (IS_ERR(inodes)) { + ret = PTR_ERR(inodes); + goto out_loi; + } + path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } - - inodes = init_data_container(size); - if (IS_ERR(inodes)) { - ret = PTR_ERR(inodes); - inodes = NULL; - goto out; - } - ret = iterate_inodes_from_logical(loi->logical, fs_info, path, - build_ino_list, inodes, ignore_offset); + inodes, ignore_offset); + btrfs_free_path(path); if (ret == -EINVAL) ret = -ENOENT; if (ret < 0) @@ -3974,7 +3958,6 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, ret = -EFAULT; out: - btrfs_free_path(path); kvfree(inodes); out_loi: kfree(loi); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index a02e38fb696c..74cbbb5d8897 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1158,6 +1158,21 @@ out_add_root: fs_info->qgroup_rescan_running = true; btrfs_queue_work(fs_info->qgroup_rescan_workers, &fs_info->qgroup_rescan_work); + } else { + /* + * We have set both BTRFS_FS_QUOTA_ENABLED and + * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with + * -EINPROGRESS. That can happen because someone started the + * rescan worker by calling quota rescan ioctl before we + * attempted to initialize the rescan worker. Failure due to + * quotas disabled in the meanwhile is not possible, because + * we are holding a write lock on fs_info->subvol_sem, which + * is also acquired when disabling quotas. + * Ignore such error, and any other error would need to undo + * everything we did in the transaction we just committed. + */ + ASSERT(ret == -EINPROGRESS); + ret = 0; } out_free_path: @@ -2898,14 +2913,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, dstgroup->rsv_rfer = inherit->lim.rsv_rfer; dstgroup->rsv_excl = inherit->lim.rsv_excl; - ret = update_qgroup_limit_item(trans, dstgroup); - if (ret) { - fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; - btrfs_info(fs_info, - "unable to update quota limit for %llu", - dstgroup->qgroupid); - goto unlock; - } + qgroup_dirty(fs_info, dstgroup); } if (srcid) { @@ -3275,7 +3283,8 @@ out: static bool rescan_should_stop(struct btrfs_fs_info *fs_info) { return btrfs_fs_closing(fs_info) || - test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); + test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state) || + !test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); } static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) @@ -3305,11 +3314,9 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) err = PTR_ERR(trans); break; } - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { - err = -EINTR; - } else { - err = qgroup_rescan_leaf(trans, path); - } + + err = qgroup_rescan_leaf(trans, path); + if (err > 0) btrfs_commit_transaction(trans); else @@ -3323,7 +3330,7 @@ out: if (err > 0 && fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) { fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; - } else if (err < 0) { + } else if (err < 0 || stopped) { fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; } mutex_unlock(&fs_info->qgroup_rescan_lock); diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 9d33bf0154ab..9eb6a43e5ee6 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -332,6 +332,9 @@ static void merge_rbio(struct btrfs_raid_bio *dest, { bio_list_merge(&dest->bio_list, &victim->bio_list); dest->bio_list_bytes += victim->bio_list_bytes; + /* Also inherit the bitmaps from @victim. */ + bitmap_or(dest->dbitmap, victim->dbitmap, dest->dbitmap, + dest->stripe_npages); dest->generic_bio_cnt += victim->generic_bio_cnt; bio_list_init(&victim->bio_list); } @@ -874,6 +877,12 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err) if (rbio->generic_bio_cnt) btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt); + /* + * Clear the data bitmap, as the rbio may be cached for later usage. + * do this before before unlock_stripe() so there will be no new bio + * for this bio. + */ + bitmap_clear(rbio->dbitmap, 0, rbio->stripe_npages); /* * At this moment, rbio->bio_list is empty, however since rbio does not @@ -1207,6 +1216,9 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) else BUG(); + /* We should have at least one data sector. */ + ASSERT(bitmap_weight(rbio->dbitmap, rbio->stripe_npages)); + /* at this point we either have a full stripe, * or we've read the full stripe from the drive. * recalculate the parity and write the new results. @@ -1280,6 +1292,11 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) for (stripe = 0; stripe < rbio->real_stripes; stripe++) { for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { struct page *page; + + /* This vertical stripe has no data, skip it. */ + if (!test_bit(pagenr, rbio->dbitmap)) + continue; + if (stripe < rbio->nr_data) { page = page_in_rbio(rbio, stripe, pagenr, 1); if (!page) @@ -1304,6 +1321,11 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { struct page *page; + + /* This vertical stripe has no data, skip it. */ + if (!test_bit(pagenr, rbio->dbitmap)) + continue; + if (stripe < rbio->nr_data) { page = page_in_rbio(rbio, stripe, pagenr, 1); if (!page) @@ -1728,6 +1750,33 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) run_plug(plug); } +/* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */ +static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio) +{ + const struct btrfs_fs_info *fs_info = rbio->fs_info; + const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT; + const u64 full_stripe_start = rbio->bbio->raid_map[0]; + const u32 orig_len = orig_bio->bi_iter.bi_size; + const u32 sectorsize = fs_info->sectorsize; + u64 cur_logical; + + ASSERT(orig_logical >= full_stripe_start && + orig_logical + orig_len <= full_stripe_start + + rbio->nr_data * rbio->stripe_len); + + bio_list_add(&rbio->bio_list, orig_bio); + rbio->bio_list_bytes += orig_bio->bi_iter.bi_size; + + /* Update the dbitmap. */ + for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len; + cur_logical += sectorsize) { + int bit = ((u32)(cur_logical - full_stripe_start) >> + PAGE_SHIFT) % rbio->stripe_npages; + + set_bit(bit, rbio->dbitmap); + } +} + /* * our main entry point for writes from the rest of the FS. */ @@ -1744,9 +1793,8 @@ int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio, btrfs_put_bbio(bbio); return PTR_ERR(rbio); } - bio_list_add(&rbio->bio_list, bio); - rbio->bio_list_bytes = bio->bi_iter.bi_size; rbio->operation = BTRFS_RBIO_WRITE; + rbio_add_bio(rbio, bio); btrfs_bio_counter_inc_noblocked(fs_info); rbio->generic_bio_cnt = 1; @@ -2045,9 +2093,12 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) atomic_set(&rbio->error, 0); /* - * read everything that hasn't failed. Thanks to the - * stripe cache, it is possible that some or all of these - * pages are going to be uptodate. + * Read everything that hasn't failed. However this time we will + * not trust any cached sector. + * As we may read out some stale data but higher layer is not reading + * that stale part. + * + * So here we always re-read everything in recovery path. */ for (stripe = 0; stripe < rbio->real_stripes; stripe++) { if (rbio->faila == stripe || rbio->failb == stripe) { @@ -2056,16 +2107,6 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) } for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { - struct page *p; - - /* - * the rmw code may have already read this - * page in - */ - p = rbio_stripe_page(rbio, stripe, pagenr); - if (PageUptodate(p)) - continue; - ret = rbio_add_io_page(rbio, &bio_list, rbio_stripe_page(rbio, stripe, pagenr), stripe, pagenr, rbio->stripe_len); @@ -2143,8 +2184,7 @@ int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio, } rbio->operation = BTRFS_RBIO_READ_REBUILD; - bio_list_add(&rbio->bio_list, bio); - rbio->bio_list_bytes = bio->bi_iter.bi_size; + rbio_add_bio(rbio, bio); rbio->faila = find_logical_bio_stripe(rbio, bio); if (rbio->faila == -1) { diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index db37a3799649..e9e8ca4e98a7 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -336,9 +336,10 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id, key.offset = ref_id; again: ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); - if (ret < 0) + if (ret < 0) { + err = ret; goto out; - if (ret == 0) { + } else if (ret == 0) { leaf = path->nodes[0]; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 0392c556af60..88b9a5394561 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3811,6 +3811,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, int ret; struct btrfs_device *dev; unsigned int nofs_flag; + bool need_commit = false; if (btrfs_fs_closing(fs_info)) return -EAGAIN; @@ -3924,6 +3925,12 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, */ nofs_flag = memalloc_nofs_save(); if (!is_dev_replace) { + u64 old_super_errors; + + spin_lock(&sctx->stat_lock); + old_super_errors = sctx->stat.super_errors; + spin_unlock(&sctx->stat_lock); + btrfs_info(fs_info, "scrub: started on devid %llu", devid); /* * by holding device list mutex, we can @@ -3932,6 +3939,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, mutex_lock(&fs_info->fs_devices->device_list_mutex); ret = scrub_supers(sctx, dev); mutex_unlock(&fs_info->fs_devices->device_list_mutex); + + spin_lock(&sctx->stat_lock); + /* + * Super block errors found, but we can not commit transaction + * at current context, since btrfs_commit_transaction() needs + * to pause the current running scrub (hold by ourselves). + */ + if (sctx->stat.super_errors > old_super_errors && !sctx->readonly) + need_commit = true; + spin_unlock(&sctx->stat_lock); } if (!ret) @@ -3958,6 +3975,25 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, scrub_workers_put(fs_info); scrub_put_ctx(sctx); + /* + * We found some super block errors before, now try to force a + * transaction commit, as scrub has finished. + */ + if (need_commit) { + struct btrfs_trans_handle *trans; + + trans = btrfs_start_transaction(fs_info->tree_root, 0); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + btrfs_err(fs_info, + "scrub: failed to start transaction to fix super block errors: %d", ret); + return ret; + } + ret = btrfs_commit_transaction(trans); + if (ret < 0) + btrfs_err(fs_info, + "scrub: failed to commit transaction to fix super block errors: %d", ret); + } return ret; out: scrub_workers_put(fs_info); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 6b80dee17f49..4a6ba0997e39 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -5398,6 +5398,7 @@ static int clone_range(struct send_ctx *sctx, u64 ext_len; u64 clone_len; u64 clone_data_offset; + bool crossed_src_i_size = false; if (slot >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(clone_root->root, path); @@ -5454,8 +5455,10 @@ static int clone_range(struct send_ctx *sctx, if (key.offset >= clone_src_i_size) break; - if (key.offset + ext_len > clone_src_i_size) + if (key.offset + ext_len > clone_src_i_size) { ext_len = clone_src_i_size - key.offset; + crossed_src_i_size = true; + } clone_data_offset = btrfs_file_extent_offset(leaf, ei); if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) { @@ -5515,6 +5518,25 @@ static int clone_range(struct send_ctx *sctx, ret = send_clone(sctx, offset, clone_len, clone_root); } + } else if (crossed_src_i_size && clone_len < len) { + /* + * If we are at i_size of the clone source inode and we + * can not clone from it, terminate the loop. This is + * to avoid sending two write operations, one with a + * length matching clone_len and the final one after + * this loop with a length of len - clone_len. + * + * When using encoded writes (BTRFS_SEND_FLAG_COMPRESSED + * was passed to the send ioctl), this helps avoid + * sending an encoded write for an offset that is not + * sector size aligned, in case the i_size of the source + * inode is not sector size aligned. That will make the + * receiver fallback to decompression of the data and + * writing it using regular buffered IO, therefore while + * not incorrect, it's not optimal due decompression and + * possible re-compression at the receiver. + */ + break; } else { ret = send_extent_data(sctx, offset, clone_len); } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index b5d2005d3415..310f18d5624c 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -652,6 +652,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, compress_force = false; no_compress++; } else { + btrfs_err(info, "unrecognized compression value %s", + args[0].from); ret = -EINVAL; goto out; } @@ -710,8 +712,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, case Opt_thread_pool: ret = match_int(&args[0], &intarg); if (ret) { + btrfs_err(info, "unrecognized thread_pool value %s", + args[0].from); goto out; } else if (intarg == 0) { + btrfs_err(info, "invalid value 0 for thread_pool"); ret = -EINVAL; goto out; } @@ -772,8 +777,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, break; case Opt_ratio: ret = match_int(&args[0], &intarg); - if (ret) + if (ret) { + btrfs_err(info, "unrecognized metadata_ratio value %s", + args[0].from); goto out; + } info->metadata_ratio = intarg; btrfs_info(info, "metadata ratio %u", info->metadata_ratio); @@ -790,6 +798,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, btrfs_set_and_info(info, DISCARD_ASYNC, "turning on async discard"); } else { + btrfs_err(info, "unrecognized discard mode value %s", + args[0].from); ret = -EINVAL; goto out; } @@ -814,6 +824,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, btrfs_set_and_info(info, FREE_SPACE_TREE, "enabling free space tree"); } else { + btrfs_err(info, "unrecognized space_cache value %s", + args[0].from); ret = -EINVAL; goto out; } @@ -889,8 +901,12 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, break; case Opt_check_integrity_print_mask: ret = match_int(&args[0], &intarg); - if (ret) + if (ret) { + btrfs_err(info, + "unrecognized check_integrity_print_mask value %s", + args[0].from); goto out; + } info->check_integrity_print_mask = intarg; btrfs_info(info, "check_integrity_print_mask 0x%x", info->check_integrity_print_mask); @@ -905,13 +921,15 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, goto out; #endif case Opt_fatal_errors: - if (strcmp(args[0].from, "panic") == 0) + if (strcmp(args[0].from, "panic") == 0) { btrfs_set_opt(info->mount_opt, PANIC_ON_FATAL_ERROR); - else if (strcmp(args[0].from, "bug") == 0) + } else if (strcmp(args[0].from, "bug") == 0) { btrfs_clear_opt(info->mount_opt, PANIC_ON_FATAL_ERROR); - else { + } else { + btrfs_err(info, "unrecognized fatal_errors value %s", + args[0].from); ret = -EINVAL; goto out; } @@ -919,8 +937,12 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, case Opt_commit_interval: intarg = 0; ret = match_int(&args[0], &intarg); - if (ret) + if (ret) { + btrfs_err(info, "unrecognized commit_interval value %s", + args[0].from); + ret = -EINVAL; goto out; + } if (intarg == 0) { btrfs_info(info, "using default commit interval %us", @@ -934,8 +956,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, break; case Opt_rescue: ret = parse_rescue_options(info, args[0].from); - if (ret < 0) + if (ret < 0) { + btrfs_err(info, "unrecognized rescue value %s", + args[0].from); goto out; + } break; #ifdef CONFIG_BTRFS_DEBUG case Opt_fragment_all: diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 3bb6b688ece5..ecf190286377 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -1767,8 +1767,11 @@ int __init btrfs_init_sysfs(void) #ifdef CONFIG_BTRFS_DEBUG ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_debug_feature_attr_group); - if (ret) - goto out2; + if (ret) { + sysfs_unmerge_group(&btrfs_kset->kobj, + &btrfs_static_feature_attr_group); + goto out_remove_group; + } #endif return 0; diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index 999c14e5d0bd..0599566c66b0 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -192,7 +192,7 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info) void btrfs_free_dummy_root(struct btrfs_root *root) { - if (!root) + if (IS_ERR_OR_NULL(root)) return; /* Will be freed by btrfs_free_fs_roots */ if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state))) diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c index ce1ca8e73c2d..289366c98f5b 100644 --- a/fs/btrfs/tests/qgroup-tests.c +++ b/fs/btrfs/tests/qgroup-tests.c @@ -230,21 +230,21 @@ static int test_no_shared_qgroup(struct btrfs_root *root, ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false); if (ret) { - ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); return ret; } ret = insert_normal_tree_ref(root, nodesize, nodesize, 0, BTRFS_FS_TREE_OBJECTID); - if (ret) + if (ret) { + ulist_free(old_roots); return ret; + } ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false); if (ret) { ulist_free(old_roots); - ulist_free(new_roots); test_err("couldn't find old roots: %d", ret); return ret; } @@ -256,31 +256,33 @@ static int test_no_shared_qgroup(struct btrfs_root *root, return ret; } + /* btrfs_qgroup_account_extent() always frees the ulists passed to it. */ + old_roots = NULL; + new_roots = NULL; + if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, nodesize, nodesize)) { test_err("qgroup counts didn't match expected values"); return -EINVAL; } - old_roots = NULL; - new_roots = NULL; ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false); if (ret) { - ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); return ret; } ret = remove_extent_item(root, nodesize, nodesize); - if (ret) + if (ret) { + ulist_free(old_roots); return -EINVAL; + } ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false); if (ret) { ulist_free(old_roots); - ulist_free(new_roots); test_err("couldn't find old roots: %d", ret); return ret; } @@ -331,21 +333,21 @@ static int test_multiple_refs(struct btrfs_root *root, ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false); if (ret) { - ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); return ret; } ret = insert_normal_tree_ref(root, nodesize, nodesize, 0, BTRFS_FS_TREE_OBJECTID); - if (ret) + if (ret) { + ulist_free(old_roots); return ret; + } ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false); if (ret) { ulist_free(old_roots); - ulist_free(new_roots); test_err("couldn't find old roots: %d", ret); return ret; } @@ -366,21 +368,21 @@ static int test_multiple_refs(struct btrfs_root *root, ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false); if (ret) { - ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); return ret; } ret = add_tree_ref(root, nodesize, nodesize, 0, BTRFS_FIRST_FREE_OBJECTID); - if (ret) + if (ret) { + ulist_free(old_roots); return ret; + } ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false); if (ret) { ulist_free(old_roots); - ulist_free(new_roots); test_err("couldn't find old roots: %d", ret); return ret; } @@ -407,21 +409,21 @@ static int test_multiple_refs(struct btrfs_root *root, ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false); if (ret) { - ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); return ret; } ret = remove_extent_ref(root, nodesize, nodesize, 0, BTRFS_FIRST_FREE_OBJECTID); - if (ret) + if (ret) { + ulist_free(old_roots); return ret; + } ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false); if (ret) { ulist_free(old_roots); - ulist_free(new_roots); test_err("couldn't find old roots: %d", ret); return ret; } diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 735dd85e45ca..372ebbc25422 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1075,7 +1075,9 @@ again: extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen, inode_objectid, parent_objectid, 0, 0); - if (!IS_ERR_OR_NULL(extref)) { + if (IS_ERR(extref)) { + return PTR_ERR(extref); + } else if (extref) { u32 item_size; u32 cur_offset = 0; unsigned long base; @@ -5333,6 +5335,18 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, mutex_lock(&inode->log_mutex); } + /* + * For symlinks, we must always log their content, which is stored in an + * inline extent, otherwise we could end up with an empty symlink after + * log replay, which is invalid on linux (symlink(2) returns -ENOENT if + * one attempts to create an empty symlink). + * We don't need to worry about flushing delalloc, because when we create + * the inline extent when the symlink is created (we never have delalloc + * for symlinks). + */ + if (S_ISLNK(inode->vfs_inode.i_mode)) + inode_only = LOG_INODE_ALL; + /* * a brute force approach to making sure we get the most uptodate * copies of everything. @@ -5723,7 +5737,7 @@ process_leaf: } ctx->log_new_dentries = false; - if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK) + if (type == BTRFS_FT_DIR) log_mode = LOG_INODE_ALL; ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode), log_mode, ctx); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 004950ec3ff9..9191bbb22dfe 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -540,15 +540,47 @@ error: return ret; } -static bool device_path_matched(const char *path, struct btrfs_device *device) +/* + * Check if the device in the path matches the device in the given struct device. + * + * Returns: + * true If it is the same device. + * false If it is not the same device or on error. + */ +static bool device_matched(const struct btrfs_device *device, const char *path) { - int found; + char *device_name; + struct block_device *bdev_old; + struct block_device *bdev_new; + + /* + * If we are looking for a device with the matching dev_t, then skip + * device without a name (a missing device). + */ + if (!device->name) + return false; + + device_name = kzalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL); + if (!device_name) + return false; rcu_read_lock(); - found = strcmp(rcu_str_deref(device->name), path); + scnprintf(device_name, BTRFS_PATH_NAME_MAX, "%s", rcu_str_deref(device->name)); rcu_read_unlock(); - return found == 0; + bdev_old = lookup_bdev(device_name); + kfree(device_name); + if (IS_ERR(bdev_old)) + return false; + + bdev_new = lookup_bdev(path); + if (IS_ERR(bdev_new)) + return false; + + if (bdev_old == bdev_new) + return true; + + return false; } /* @@ -581,9 +613,7 @@ static int btrfs_free_stale_devices(const char *path, &fs_devices->devices, dev_list) { if (skip_device && skip_device == device) continue; - if (path && !device->name) - continue; - if (path && !device_path_matched(path, device)) + if (path && !device_matched(device, path)) continue; if (fs_devices->opened) { /* for an already deleted device return 0 */ @@ -4219,10 +4249,12 @@ static int balance_kthread(void *data) struct btrfs_fs_info *fs_info = data; int ret = 0; + sb_start_write(fs_info->sb); mutex_lock(&fs_info->balance_mutex); if (fs_info->balance_ctl) ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); mutex_unlock(&fs_info->balance_mutex); + sb_end_write(fs_info->sb); return ret; } @@ -7188,12 +7220,12 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) * do another round of validation checks. */ if (total_dev != fs_info->fs_devices->total_devices) { - btrfs_err(fs_info, - "super_num_devices %llu mismatch with num_devices %llu found here", + btrfs_warn(fs_info, +"super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit", btrfs_super_num_devices(fs_info->super_copy), total_dev); - ret = -EINVAL; - goto error; + fs_info->fs_devices->total_devices = total_dev; + btrfs_set_super_num_devices(fs_info->super_copy, total_dev); } if (btrfs_super_total_bytes(fs_info->super_copy) < fs_info->fs_devices->total_rw_bytes) { diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 7934a4d2d183..71712bf23bed 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -390,6 +390,9 @@ static int btrfs_xattr_handler_set(const struct xattr_handler *handler, const char *name, const void *buffer, size_t size, int flags) { + if (btrfs_root_readonly(BTRFS_I(inode)->root)) + return -EROFS; + name = xattr_full_name(handler, name); return btrfs_setxattr_trans(inode, name, buffer, size, flags); } diff --git a/fs/buffer.c b/fs/buffer.c index dd992943b270..5f6fd1fe2aba 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2378,7 +2378,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size) { struct address_space *mapping = inode->i_mapping; struct page *page; - void *fsdata; + void *fsdata = NULL; int err; err = inode_newsize_ok(inode, size); @@ -2404,7 +2404,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping, struct inode *inode = mapping->host; unsigned int blocksize = i_blocksize(inode); struct page *page; - void *fsdata; + void *fsdata = NULL; pgoff_t index, curidx; loff_t curpos; unsigned zerofrom, offset, len; diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index d3f67271d3c7..51562d36fa83 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2294,6 +2294,7 @@ static int caps_are_flushed(struct inode *inode, u64 flush_tid) */ static int unsafe_request_wait(struct inode *inode) { + struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mds_request *req1 = NULL, *req2 = NULL; int ret, err = 0; @@ -2313,6 +2314,76 @@ static int unsafe_request_wait(struct inode *inode) } spin_unlock(&ci->i_unsafe_lock); + /* + * Trigger to flush the journal logs in all the relevant MDSes + * manually, or in the worst case we must wait at most 5 seconds + * to wait the journal logs to be flushed by the MDSes periodically. + */ + if (req1 || req2) { + struct ceph_mds_request *req; + struct ceph_mds_session **sessions; + struct ceph_mds_session *s; + unsigned int max_sessions; + int i; + + mutex_lock(&mdsc->mutex); + max_sessions = mdsc->max_sessions; + + sessions = kcalloc(max_sessions, sizeof(s), GFP_KERNEL); + if (!sessions) { + mutex_unlock(&mdsc->mutex); + err = -ENOMEM; + goto out; + } + + spin_lock(&ci->i_unsafe_lock); + if (req1) { + list_for_each_entry(req, &ci->i_unsafe_dirops, + r_unsafe_dir_item) { + s = req->r_session; + if (!s) + continue; + if (!sessions[s->s_mds]) { + s = ceph_get_mds_session(s); + sessions[s->s_mds] = s; + } + } + } + if (req2) { + list_for_each_entry(req, &ci->i_unsafe_iops, + r_unsafe_target_item) { + s = req->r_session; + if (!s) + continue; + if (!sessions[s->s_mds]) { + s = ceph_get_mds_session(s); + sessions[s->s_mds] = s; + } + } + } + spin_unlock(&ci->i_unsafe_lock); + + /* the auth MDS */ + spin_lock(&ci->i_ceph_lock); + if (ci->i_auth_cap) { + s = ci->i_auth_cap->session; + if (!sessions[s->s_mds]) + sessions[s->s_mds] = ceph_get_mds_session(s); + } + spin_unlock(&ci->i_ceph_lock); + mutex_unlock(&mdsc->mutex); + + /* send flush mdlog request to MDSes */ + for (i = 0; i < max_sessions; i++) { + s = sessions[i]; + if (s) { + send_flush_mdlog(s); + ceph_put_mds_session(s); + } + } + kfree(sessions); + } + dout("unsafe_request_wait %p wait on tid %llu %llu\n", inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL); if (req1) { @@ -2320,15 +2391,19 @@ static int unsafe_request_wait(struct inode *inode) ceph_timeout_jiffies(req1->r_timeout)); if (ret) err = -EIO; - ceph_mdsc_put_request(req1); } if (req2) { ret = !wait_for_completion_timeout(&req2->r_safe_completion, ceph_timeout_jiffies(req2->r_timeout)); if (ret) err = -EIO; - ceph_mdsc_put_request(req2); } + +out: + if (req1) + ceph_mdsc_put_request(req1); + if (req2) + ceph_mdsc_put_request(req2); return err; } @@ -3501,24 +3576,23 @@ static void handle_cap_grant(struct inode *inode, fill_inline = true; } - if (ci->i_auth_cap == cap && - le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) { - if (newcaps & ~extra_info->issued) - wake = true; + if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) { + if (ci->i_auth_cap == cap) { + if (newcaps & ~extra_info->issued) + wake = true; - if (ci->i_requested_max_size > max_size || - !(le32_to_cpu(grant->wanted) & CEPH_CAP_ANY_FILE_WR)) { - /* re-request max_size if necessary */ - ci->i_requested_max_size = 0; - wake = true; + if (ci->i_requested_max_size > max_size || + !(le32_to_cpu(grant->wanted) & CEPH_CAP_ANY_FILE_WR)) { + /* re-request max_size if necessary */ + ci->i_requested_max_size = 0; + wake = true; + } + + ceph_kick_flushing_inode_caps(session, ci); } - - ceph_kick_flushing_inode_caps(session, ci); - spin_unlock(&ci->i_ceph_lock); up_read(&session->s_mdsc->snap_rwsem); - } else { - spin_unlock(&ci->i_ceph_lock); } + spin_unlock(&ci->i_ceph_lock); if (fill_inline) ceph_fill_inline_data(inode, NULL, extra_info->inline_data, @@ -4311,33 +4385,9 @@ static void flush_dirty_session_caps(struct ceph_mds_session *s) dout("flush_dirty_caps done\n"); } -static void iterate_sessions(struct ceph_mds_client *mdsc, - void (*cb)(struct ceph_mds_session *)) -{ - int mds; - - mutex_lock(&mdsc->mutex); - for (mds = 0; mds < mdsc->max_sessions; ++mds) { - struct ceph_mds_session *s; - - if (!mdsc->sessions[mds]) - continue; - - s = ceph_get_mds_session(mdsc->sessions[mds]); - if (!s) - continue; - - mutex_unlock(&mdsc->mutex); - cb(s); - ceph_put_mds_session(s); - mutex_lock(&mdsc->mutex); - } - mutex_unlock(&mdsc->mutex); -} - void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) { - iterate_sessions(mdsc, flush_dirty_session_caps); + ceph_mdsc_iterate_sessions(mdsc, flush_dirty_session_caps, true); } void __ceph_touch_fmode(struct ceph_inode_info *ci, diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index f63c1a090139..1fddb9cd3e88 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -478,8 +478,11 @@ more: 2 : (fpos_off(rde->offset) + 1); err = note_last_dentry(dfi, rde->name, rde->name_len, next_offset); - if (err) + if (err) { + ceph_mdsc_put_request(dfi->last_readdir); + dfi->last_readdir = NULL; return err; + } } else if (req->r_reply_info.dir_end) { dfi->next_offset = 2; /* keep last name */ @@ -520,6 +523,12 @@ more: if (!dir_emit(ctx, rde->name, rde->name_len, ceph_present_ino(inode->i_sb, le64_to_cpu(rde->inode.in->ino)), le32_to_cpu(rde->inode.in->mode) >> 12)) { + /* + * NOTE: Here no need to put the 'dfi->last_readdir', + * because when dir_emit stops us it's most likely + * doesn't have enough memory, etc. So for next readdir + * it will continue. + */ dout("filldir stopping us...\n"); return 0; } diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 450050801f3b..943655e36a79 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -592,9 +592,15 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry, iinfo.change_attr = 1; ceph_encode_timespec64(&iinfo.btime, &now); - iinfo.xattr_len = ARRAY_SIZE(xattr_buf); - iinfo.xattr_data = xattr_buf; - memset(iinfo.xattr_data, 0, iinfo.xattr_len); + if (req->r_pagelist) { + iinfo.xattr_len = req->r_pagelist->length; + iinfo.xattr_data = req->r_pagelist->mapped_tail; + } else { + /* fake it */ + iinfo.xattr_len = ARRAY_SIZE(xattr_buf); + iinfo.xattr_data = xattr_buf; + memset(iinfo.xattr_data, 0, iinfo.xattr_len); + } in.ino = cpu_to_le64(vino.ino); in.snapid = cpu_to_le64(CEPH_NOSNAP); @@ -697,6 +703,12 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, if (dentry->d_name.len > NAME_MAX) return -ENAMETOOLONG; + /* + * Do not truncate the file, since atomic_open is called before the + * permission check. The caller will do the truncation afterward. + */ + flags &= ~O_TRUNC; + if (flags & O_CREAT) { if (ceph_quota_is_max_files_exceeded(dir)) return -EDQUOT; @@ -706,6 +718,10 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, err = ceph_security_init_secctx(dentry, mode, &as_ctx); if (err < 0) goto out_ctx; + /* Async create can't handle more than a page of xattrs */ + if (as_ctx.pagelist && + !list_is_singular(&as_ctx.pagelist->head)) + try_async = false; } else if (!d_in_lookup(dentry)) { /* If it's not being looked up, it's negative */ return -ENOENT; @@ -759,9 +775,7 @@ retry: } set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); - err = ceph_mdsc_do_request(mdsc, - (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, - req); + err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req); err = ceph_handle_snapdir(req, dentry, err); if (err) goto out_req; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 981a91590631..fa51872ff850 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -809,6 +809,33 @@ static void put_request_session(struct ceph_mds_request *req) } } +void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc, + void (*cb)(struct ceph_mds_session *), + bool check_state) +{ + int mds; + + mutex_lock(&mdsc->mutex); + for (mds = 0; mds < mdsc->max_sessions; ++mds) { + struct ceph_mds_session *s; + + s = __ceph_lookup_mds_session(mdsc, mds); + if (!s) + continue; + + if (check_state && !check_session_state(s)) { + ceph_put_mds_session(s); + continue; + } + + mutex_unlock(&mdsc->mutex); + cb(s); + ceph_put_mds_session(s); + mutex_lock(&mdsc->mutex); + } + mutex_unlock(&mdsc->mutex); +} + void ceph_mdsc_release_request(struct kref *kref) { struct ceph_mds_request *req = container_of(kref, @@ -1157,7 +1184,7 @@ random: /* * session messages */ -static struct ceph_msg *create_session_msg(u32 op, u64 seq) +struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq) { struct ceph_msg *msg; struct ceph_mds_session_head *h; @@ -1165,7 +1192,8 @@ static struct ceph_msg *create_session_msg(u32 op, u64 seq) msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS, false); if (!msg) { - pr_err("create_session_msg ENOMEM creating msg\n"); + pr_err("ENOMEM creating session %s msg\n", + ceph_session_op_name(op)); return NULL; } h = msg->front.iov_base; @@ -1184,14 +1212,17 @@ static int encode_supported_features(void **p, void *end) if (count > 0) { size_t i; size_t size = FEATURE_BYTES(count); + unsigned long bit; if (WARN_ON_ONCE(*p + 4 + size > end)) return -ERANGE; ceph_encode_32(p, size); memset(*p, 0, size); - for (i = 0; i < count; i++) - ((unsigned char*)(*p))[i / 8] |= BIT(feature_bits[i] % 8); + for (i = 0; i < count; i++) { + bit = feature_bits[i]; + ((unsigned char *)(*p))[bit / 8] |= BIT(bit % 8); + } *p += size; } else { if (WARN_ON_ONCE(*p + 4 > end)) @@ -1296,7 +1327,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes, GFP_NOFS, false); if (!msg) { - pr_err("create_session_msg ENOMEM creating msg\n"); + pr_err("ENOMEM creating session open msg\n"); return ERR_PTR(-ENOMEM); } p = msg->front.iov_base; @@ -1830,8 +1861,8 @@ static int send_renew_caps(struct ceph_mds_client *mdsc, dout("send_renew_caps to mds%d (%s)\n", session->s_mds, ceph_mds_state_name(state)); - msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, - ++session->s_renew_seq); + msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, + ++session->s_renew_seq); if (!msg) return -ENOMEM; ceph_con_send(&session->s_con, msg); @@ -1845,7 +1876,7 @@ static int send_flushmsg_ack(struct ceph_mds_client *mdsc, dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n", session->s_mds, ceph_session_state_name(session->s_state), seq); - msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq); + msg = ceph_create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq); if (!msg) return -ENOMEM; ceph_con_send(&session->s_con, msg); @@ -1897,7 +1928,8 @@ static int request_close_session(struct ceph_mds_session *session) dout("request_close_session mds%d state %s seq %lld\n", session->s_mds, ceph_session_state_name(session->s_state), session->s_seq); - msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); + msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_CLOSE, + session->s_seq); if (!msg) return -ENOMEM; ceph_con_send(&session->s_con, msg); @@ -4372,24 +4404,12 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, } /* - * lock unlock sessions, to wait ongoing session activities + * lock unlock the session, to wait ongoing session activities */ -static void lock_unlock_sessions(struct ceph_mds_client *mdsc) +static void lock_unlock_session(struct ceph_mds_session *s) { - int i; - - mutex_lock(&mdsc->mutex); - for (i = 0; i < mdsc->max_sessions; i++) { - struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); - if (!s) - continue; - mutex_unlock(&mdsc->mutex); - mutex_lock(&s->s_mutex); - mutex_unlock(&s->s_mutex); - ceph_put_mds_session(s); - mutex_lock(&mdsc->mutex); - } - mutex_unlock(&mdsc->mutex); + mutex_lock(&s->s_mutex); + mutex_unlock(&s->s_mutex); } static void maybe_recover_session(struct ceph_mds_client *mdsc) @@ -4644,6 +4664,30 @@ static void wait_requests(struct ceph_mds_client *mdsc) dout("wait_requests done\n"); } +void send_flush_mdlog(struct ceph_mds_session *s) +{ + struct ceph_msg *msg; + + /* + * Pre-luminous MDS crashes when it sees an unknown session request + */ + if (!CEPH_HAVE_FEATURE(s->s_con.peer_features, SERVER_LUMINOUS)) + return; + + mutex_lock(&s->s_mutex); + dout("request mdlog flush to mds%d (%s)s seq %lld\n", s->s_mds, + ceph_session_state_name(s->s_state), s->s_seq); + msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_FLUSH_MDLOG, + s->s_seq); + if (!msg) { + pr_err("failed to request mdlog flush to mds%d (%s) seq %lld\n", + s->s_mds, ceph_session_state_name(s->s_state), s->s_seq); + } else { + ceph_con_send(&s->s_con, msg); + } + mutex_unlock(&s->s_mutex); +} + /* * called before mount is ro, and before dentries are torn down. * (hmm, does this still race with new lookups?) @@ -4653,7 +4697,8 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) dout("pre_umount\n"); mdsc->stopping = 1; - lock_unlock_sessions(mdsc); + ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true); + ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false); ceph_flush_dirty_caps(mdsc); wait_requests(mdsc); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index f5adbebcb38e..a92e42e8a9f8 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -33,10 +33,6 @@ enum ceph_feature_type { CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_METRIC_COLLECT, }; -/* - * This will always have the highest feature bit value - * as the last element of the array. - */ #define CEPHFS_FEATURES_CLIENT_SUPPORTED { \ 0, 1, 2, 3, 4, 5, 6, 7, \ CEPHFS_FEATURE_MIMIC, \ @@ -45,8 +41,6 @@ enum ceph_feature_type { CEPHFS_FEATURE_MULTI_RECONNECT, \ CEPHFS_FEATURE_DELEG_INO, \ CEPHFS_FEATURE_METRIC_COLLECT, \ - \ - CEPHFS_FEATURE_MAX, \ } #define CEPHFS_FEATURES_CLIENT_REQUIRED {} @@ -524,6 +518,11 @@ static inline void ceph_mdsc_put_request(struct ceph_mds_request *req) kref_put(&req->r_kref, ceph_mdsc_release_request); } +extern void send_flush_mdlog(struct ceph_mds_session *s); +extern void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc, + void (*cb)(struct ceph_mds_session *), + bool check_state); +extern struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq); extern void __ceph_queue_cap_release(struct ceph_mds_session *session, struct ceph_cap *cap); extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc, diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 0369f672a76f..734873be56a7 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -697,9 +697,10 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc, struct ceph_mds_snap_realm *ri; /* encoded */ __le64 *snaps; /* encoded */ __le64 *prior_parent_snaps; /* encoded */ - struct ceph_snap_realm *realm = NULL; + struct ceph_snap_realm *realm; struct ceph_snap_realm *first_realm = NULL; - int invalidate = 0; + struct ceph_snap_realm *realm_to_rebuild = NULL; + int rebuild_snapcs; int err = -ENOMEM; LIST_HEAD(dirty_realms); @@ -707,6 +708,8 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc, dout("update_snap_trace deletion=%d\n", deletion); more: + realm = NULL; + rebuild_snapcs = 0; ceph_decode_need(&p, e, sizeof(*ri), bad); ri = p; p += sizeof(*ri); @@ -730,7 +733,7 @@ more: err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent)); if (err < 0) goto fail; - invalidate += err; + rebuild_snapcs += err; if (le64_to_cpu(ri->seq) > realm->seq) { dout("update_snap_trace updating %llx %p %lld -> %lld\n", @@ -755,22 +758,30 @@ more: if (realm->seq > mdsc->last_snap_seq) mdsc->last_snap_seq = realm->seq; - invalidate = 1; + rebuild_snapcs = 1; } else if (!realm->cached_context) { dout("update_snap_trace %llx %p seq %lld new\n", realm->ino, realm, realm->seq); - invalidate = 1; + rebuild_snapcs = 1; } else { dout("update_snap_trace %llx %p seq %lld unchanged\n", realm->ino, realm, realm->seq); } - dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino, - realm, invalidate, p, e); + dout("done with %llx %p, rebuild_snapcs=%d, %p %p\n", realm->ino, + realm, rebuild_snapcs, p, e); - /* invalidate when we reach the _end_ (root) of the trace */ - if (invalidate && p >= e) - rebuild_snap_realms(realm, &dirty_realms); + /* + * this will always track the uppest parent realm from which + * we need to rebuild the snapshot contexts _downward_ in + * hierarchy. + */ + if (rebuild_snapcs) + realm_to_rebuild = realm; + + /* rebuild_snapcs when we reach the _end_ (root) of the trace */ + if (realm_to_rebuild && p >= e) + rebuild_snap_realms(realm_to_rebuild, &dirty_realms); if (!first_realm) first_realm = realm; diff --git a/fs/ceph/strings.c b/fs/ceph/strings.c index 4a79f3632260..573bb9556fb5 100644 --- a/fs/ceph/strings.c +++ b/fs/ceph/strings.c @@ -46,6 +46,7 @@ const char *ceph_session_op_name(int op) case CEPH_SESSION_FLUSHMSG_ACK: return "flushmsg_ack"; case CEPH_SESSION_FORCE_RO: return "force_ro"; case CEPH_SESSION_REJECT: return "reject"; + case CEPH_SESSION_REQUEST_FLUSH_MDLOG: return "flush_mdlog"; } return "???"; } diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 2a3981337077..e4ba1ef15490 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -317,6 +317,14 @@ static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val, } #define XATTR_RSTAT_FIELD(_type, _name) \ XATTR_NAME_CEPH(_type, _name, VXATTR_FLAG_RSTAT) +#define XATTR_RSTAT_FIELD_UPDATABLE(_type, _name) \ + { \ + .name = CEPH_XATTR_NAME(_type, _name), \ + .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \ + .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \ + .exists_cb = NULL, \ + .flags = VXATTR_FLAG_RSTAT, \ + } #define XATTR_LAYOUT_FIELD(_type, _name, _field) \ { \ .name = CEPH_XATTR_NAME2(_type, _name, _field), \ @@ -354,7 +362,7 @@ static struct ceph_vxattr ceph_dir_vxattrs[] = { XATTR_RSTAT_FIELD(dir, rfiles), XATTR_RSTAT_FIELD(dir, rsubdirs), XATTR_RSTAT_FIELD(dir, rbytes), - XATTR_RSTAT_FIELD(dir, rctime), + XATTR_RSTAT_FIELD_UPDATABLE(dir, rctime), { .name = "ceph.dir.pin", .name_size = sizeof("ceph.dir.pin"), diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 8563e7b78c02..b582dbf919e4 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -898,7 +898,7 @@ cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter) ssize_t rc; struct inode *inode = file_inode(iocb->ki_filp); - if (iocb->ki_filp->f_flags & O_DIRECT) + if (iocb->ki_flags & IOCB_DIRECT) return cifs_user_readv(iocb, iter); rc = cifs_revalidate_mapping(inode); @@ -1033,7 +1033,7 @@ struct file_system_type cifs_fs_type = { }; MODULE_ALIAS_FS("cifs"); -static struct file_system_type smb3_fs_type = { +struct file_system_type smb3_fs_type = { .owner = THIS_MODULE, .name = "smb3", .mount = smb3_do_mount, @@ -1221,8 +1221,11 @@ static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off, ssize_t rc; struct cifsFileInfo *cfile = dst_file->private_data; - if (cfile->swapfile) - return -EOPNOTSUPP; + if (cfile->swapfile) { + rc = -EOPNOTSUPP; + free_xid(xid); + return rc; + } rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff, len, flags); diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 905d03863721..e996f0bef414 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -51,7 +51,7 @@ static inline unsigned long cifs_get_time(struct dentry *dentry) return (unsigned long) dentry->d_fsdata; } -extern struct file_system_type cifs_fs_type; +extern struct file_system_type cifs_fs_type, smb3_fs_type; extern const struct address_space_operations cifs_addr_ops; extern const struct address_space_operations cifs_addr_ops_smallbuf; diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 6599069be690..196285b0fe46 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -1982,11 +1982,13 @@ extern mempool_t *cifs_mid_poolp; /* Operations for different SMB versions */ #define SMB1_VERSION_STRING "1.0" +#define SMB20_VERSION_STRING "2.0" +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY extern struct smb_version_operations smb1_operations; extern struct smb_version_values smb1_values; -#define SMB20_VERSION_STRING "2.0" extern struct smb_version_operations smb20_operations; extern struct smb_version_values smb20_values; +#endif /* CIFS_ALLOW_INSECURE_LEGACY */ #define SMB21_VERSION_STRING "2.1" extern struct smb_version_operations smb21_operations; extern struct smb_version_values smb21_values; diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 24c6f36177ba..a6ca4eda9a5a 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -230,6 +230,8 @@ extern unsigned int setup_special_user_owner_ACE(struct cifs_ace *pace); extern void dequeue_mid(struct mid_q_entry *mid, bool malformed); extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, unsigned int to_read); +extern ssize_t cifs_discard_from_socket(struct TCP_Server_Info *server, + size_t to_read); extern int cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page, unsigned int page_offset, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 0496934feecb..c279527aae92 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1451,9 +1451,9 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server) while (remaining > 0) { int length; - length = cifs_read_from_socket(server, server->bigbuf, - min_t(unsigned int, remaining, - CIFSMaxBufSize + MAX_HEADER_SIZE(server))); + length = cifs_discard_from_socket(server, + min_t(size_t, remaining, + CIFSMaxBufSize + MAX_HEADER_SIZE(server))); if (length < 0) return length; server->total_read += length; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 7f5d173760cf..d1c3086d7ddd 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -695,9 +695,6 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) int length = 0; int total_read; - smb_msg->msg_control = NULL; - smb_msg->msg_controllen = 0; - for (total_read = 0; msg_data_left(smb_msg); total_read += length) { try_to_freeze(); @@ -748,18 +745,33 @@ int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, unsigned int to_read) { - struct msghdr smb_msg; + struct msghdr smb_msg = {}; struct kvec iov = {.iov_base = buf, .iov_len = to_read}; iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read); return cifs_readv_from_socket(server, &smb_msg); } +ssize_t +cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read) +{ + struct msghdr smb_msg = {}; + + /* + * iov_iter_discard already sets smb_msg.type and count and iov_offset + * and cifs_readv_from_socket sets msg_control and msg_controllen + * so little to initialize in struct msghdr + */ + iov_iter_discard(&smb_msg.msg_iter, READ, to_read); + + return cifs_readv_from_socket(server, &smb_msg); +} + int cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page, unsigned int page_offset, unsigned int to_read) { - struct msghdr smb_msg; + struct msghdr smb_msg = {}; struct bio_vec bv = { .bv_page = page, .bv_len = to_read, .bv_offset = page_offset}; iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 6c06870f9018..144064dc0d38 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1735,11 +1735,13 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl) struct cifsFileInfo *cfile; __u32 type; - rc = -EACCES; xid = get_xid(); - if (!(fl->fl_flags & FL_FLOCK)) - return -ENOLCK; + if (!(fl->fl_flags & FL_FLOCK)) { + rc = -ENOLCK; + free_xid(xid); + return rc; + } cfile = (struct cifsFileInfo *)file->private_data; tcon = tlink_tcon(cfile->tlink); @@ -1758,8 +1760,9 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl) * if no lock or unlock then nothing to do since we do not * know what it is */ + rc = -EOPNOTSUPP; free_xid(xid); - return -EOPNOTSUPP; + return rc; } rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock, @@ -3244,6 +3247,9 @@ static ssize_t __cifs_writev( ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from) { + struct file *file = iocb->ki_filp; + + cifs_revalidate_mapping(file->f_inode); return __cifs_writev(iocb, from, true); } @@ -3933,6 +3939,15 @@ static ssize_t __cifs_readv( len = ctx->len; } + if (direct) { + rc = filemap_write_and_wait_range(file->f_inode->i_mapping, + offset, offset + len - 1); + if (rc) { + kref_put(&ctx->refcount, cifs_aio_ctx_release); + return -EAGAIN; + } + } + /* grab a lock here due to read response handlers can access ctx */ mutex_lock(&ctx->aio_mutex); diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index dcde44ff6cf9..e45598b62242 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c @@ -193,7 +193,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) rc = put_user(ExtAttrBits & FS_FL_USER_VISIBLE, (int __user *)arg); - if (rc != EOPNOTSUPP) + if (rc != -EOPNOTSUPP) break; } #endif /* CONFIG_CIFS_POSIX */ @@ -222,7 +222,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) * pSMBFile->fid.netfid, * extAttrBits, * &ExtAttrMask); - * if (rc != EOPNOTSUPP) + * if (rc != -EOPNOTSUPP) * break; */ diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 94dab4309fbb..85d30fef98a2 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -97,6 +97,9 @@ parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len, if (rc != 1) return -EINVAL; + if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN) + return -EINVAL; + rc = symlink_hash(link_len, link_str, md5_hash); if (rc) { cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc); diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 1c14cf01dbef..9d740916a8ee 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -1053,18 +1053,23 @@ static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void .data = data, .sb = NULL, }; + struct file_system_type **fs_type = (struct file_system_type *[]) { + &cifs_fs_type, &smb3_fs_type, NULL, + }; - iterate_supers_type(&cifs_fs_type, f, &sd); - - if (!sd.sb) - return ERR_PTR(-EINVAL); - /* - * Grab an active reference in order to prevent automounts (DFS links) - * of expiring and then freeing up our cifs superblock pointer while - * we're doing failover. - */ - cifs_sb_active(sd.sb); - return sd.sb; + for (; *fs_type; fs_type++) { + iterate_supers_type(*fs_type, f, &sd); + if (sd.sb) { + /* + * Grab an active reference in order to prevent automounts (DFS links) + * of expiring and then freeing up our cifs superblock pointer while + * we're doing failover. + */ + cifs_sb_active(sd.sb); + return sd.sb; + } + } + return ERR_PTR(-EINVAL); } static void __cifs_put_super(struct super_block *sb) diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index d58c5ffeca0d..cf6fd138d8d5 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -306,6 +306,7 @@ out: cifs_put_tcp_session(chan->server, 0); unload_nls(vol.local_nls); + free_xid(xid); return rc; } diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index 2fa3ba354cc9..001c26daacba 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -74,7 +74,6 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, nr_ioctl_req.Reserved = 0; rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, - true /* is_fsctl */, (char *)&nr_ioctl_req, sizeof(nr_ioctl_req), CIFSMaxBufSize, NULL, NULL /* no return info */); if (rc == -EOPNOTSUPP) { diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index a718dc77e604..97cd4df04060 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -371,8 +371,6 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon, num_rqst++; if (cfile) { - cifsFileInfo_put(cfile); - cfile = NULL; rc = compound_send_recv(xid, ses, server, flags, num_rqst - 2, &rqst[1], &resp_buftype[1], diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 0e8f484031da..72368b656b33 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -587,7 +587,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon) struct cifs_ses *ses = tcon->ses; rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, - FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */, + FSCTL_QUERY_NETWORK_INTERFACE_INFO, NULL /* no data input */, 0 /* no data input */, CIFSMaxBufSize, (char **)&out_buf, &ret_data_len); if (rc == -EOPNOTSUPP) { @@ -1000,9 +1000,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size, size_t name_len, value_len, user_name_len; while (src_size > 0) { - name = &src->ea_data[0]; name_len = (size_t)src->ea_name_length; - value = &src->ea_data[src->ea_name_length + 1]; value_len = (size_t)le16_to_cpu(src->ea_value_length); if (name_len == 0) @@ -1014,6 +1012,9 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size, goto out; } + name = &src->ea_data[0]; + value = &src->ea_data[src->ea_name_length + 1]; + if (ea_name) { if (ea_name_len == name_len && memcmp(ea_name, name, name_len) == 0) { @@ -1255,6 +1256,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, COMPOUND_FID, current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 0, data, size); + if (rc) + goto sea_exit; smb2_set_next_command(tcon, &rqst[1]); smb2_set_related(&rqst[1]); @@ -1265,6 +1268,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, rqst[2].rq_nvec = 1; rc = SMB2_close_init(tcon, server, &rqst[2], COMPOUND_FID, COMPOUND_FID, false); + if (rc) + goto sea_exit; smb2_set_related(&rqst[2]); rc = compound_send_recv(xid, ses, server, @@ -1469,9 +1474,8 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon, struct resume_key_req *res_key; rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, - FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */, - NULL, 0 /* no input */, CIFSMaxBufSize, - (char **)&res_key, &ret_data_len); + FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */, + CIFSMaxBufSize, (char **)&res_key, &ret_data_len); if (rc) { cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc); @@ -1610,7 +1614,7 @@ smb2_ioctl_query_info(const unsigned int xid, rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, - qi.info_type, true, buffer, qi.output_buffer_length, + qi.info_type, buffer, qi.output_buffer_length, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE); free_req1_func = SMB2_ioctl_free; @@ -1744,9 +1748,17 @@ smb2_copychunk_range(const unsigned int xid, int chunks_copied = 0; bool chunk_sizes_updated = false; ssize_t bytes_written, total_bytes_written = 0; + struct inode *inode; pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL); + /* + * We need to flush all unwritten data before we can send the + * copychunk ioctl to the server. + */ + inode = d_inode(trgtfile->dentry); + filemap_write_and_wait(inode->i_mapping); + if (pcchunk == NULL) return -ENOMEM; @@ -1778,9 +1790,8 @@ smb2_copychunk_range(const unsigned int xid, retbuf = NULL; rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, - true /* is_fsctl */, (char *)pcchunk, - sizeof(struct copychunk_ioctl), CIFSMaxBufSize, - (char **)&retbuf, &ret_data_len); + (char *)pcchunk, sizeof(struct copychunk_ioctl), + CIFSMaxBufSize, (char **)&retbuf, &ret_data_len); if (rc == 0) { if (ret_data_len != sizeof(struct copychunk_ioctl_rsp)) { @@ -1940,7 +1951,6 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_SPARSE, - true /* is_fctl */, &setsparse, 1, CIFSMaxBufSize, NULL, NULL); if (rc) { tcon->broken_sparse_sup = true; @@ -2023,7 +2033,6 @@ smb2_duplicate_extents(const unsigned int xid, rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, trgtfile->fid.volatile_fid, FSCTL_DUPLICATE_EXTENTS_TO_FILE, - true /* is_fsctl */, (char *)&dup_ext_buf, sizeof(struct duplicate_extents_to_file), CIFSMaxBufSize, NULL, @@ -2058,7 +2067,6 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_INTEGRITY_INFORMATION, - true /* is_fsctl */, (char *)&integr_info, sizeof(struct fsctl_set_integrity_information_req), CIFSMaxBufSize, NULL, @@ -2111,7 +2119,6 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SRV_ENUMERATE_SNAPSHOTS, - true /* is_fsctl */, NULL, 0 /* no input data */, max_response_size, (char **)&retbuf, &ret_data_len); @@ -2753,7 +2760,6 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, do { rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, FSCTL_DFS_GET_REFERRALS, - true /* is_fsctl */, (char *)dfs_req, dfs_req_size, CIFSMaxBufSize, (char **)&dfs_rsp, &dfs_rsp_size); } while (rc == -EAGAIN); @@ -2955,8 +2961,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl_init(tcon, server, &rqst[1], fid.persistent_fid, - fid.volatile_fid, FSCTL_GET_REPARSE_POINT, - true /* is_fctl */, NULL, 0, + fid.volatile_fid, FSCTL_GET_REPARSE_POINT, NULL, 0, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE); @@ -3136,8 +3141,7 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, - COMPOUND_FID, FSCTL_GET_REPARSE_POINT, - true /* is_fctl */, NULL, 0, + COMPOUND_FID, FSCTL_GET_REPARSE_POINT, NULL, 0, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE); @@ -3400,7 +3404,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len); rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, - cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true, + cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, (char *)&fsctl_buf, sizeof(struct file_zero_data_information), 0, NULL, NULL); @@ -3430,7 +3434,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, loff_t offset, loff_t len) { - struct inode *inode; + struct inode *inode = file_inode(file); struct cifsFileInfo *cfile = file->private_data; struct file_zero_data_information fsctl_buf; long rc; @@ -3439,14 +3443,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, xid = get_xid(); - inode = d_inode(cfile->dentry); - + inode_lock(inode); /* Need to make file sparse, if not already, before freeing range. */ /* Consider adding equivalent for compressed since it could also work */ if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) { rc = -EOPNOTSUPP; - free_xid(xid); - return rc; + goto out; } /* @@ -3462,9 +3464,11 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, - true /* is_fctl */, (char *)&fsctl_buf, + (char *)&fsctl_buf, sizeof(struct file_zero_data_information), CIFSMaxBufSize, NULL, NULL); +out: + inode_unlock(inode); free_xid(xid); return rc; } @@ -3521,7 +3525,7 @@ static int smb3_simple_fallocate_range(unsigned int xid, in_data.length = cpu_to_le64(len); rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, - FSCTL_QUERY_ALLOCATED_RANGES, true, + FSCTL_QUERY_ALLOCATED_RANGES, (char *)&in_data, sizeof(in_data), 1024 * sizeof(struct file_allocated_range_buffer), (char **)&out_data, &out_data_len); @@ -3624,7 +3628,7 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon, if (rc) goto out; - if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) + if (cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) smb2_set_sparse(xid, tcon, cfile, inode, false); eof = cpu_to_le64(off + len); @@ -3762,7 +3766,7 @@ static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offs rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, - FSCTL_QUERY_ALLOCATED_RANGES, true, + FSCTL_QUERY_ALLOCATED_RANGES, (char *)&in_data, sizeof(in_data), sizeof(struct file_allocated_range_buffer), (char **)&out_data, &out_data_len); @@ -3822,7 +3826,7 @@ static int smb3_fiemap(struct cifs_tcon *tcon, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, - FSCTL_QUERY_ALLOCATED_RANGES, true, + FSCTL_QUERY_ALLOCATED_RANGES, (char *)&in_data, sizeof(in_data), 1024 * sizeof(struct file_allocated_range_buffer), (char **)&out_data, &out_data_len); @@ -4024,11 +4028,13 @@ smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, } } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static bool smb2_is_read_op(__u32 oplock) { return oplock == SMB2_OPLOCK_LEVEL_II; } +#endif /* CIFS_ALLOW_INSECURE_LEGACY */ static bool smb21_is_read_op(__u32 oplock) @@ -5114,7 +5120,7 @@ out: return rc; } - +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct smb_version_operations smb20_operations = { .compare_fids = smb2_compare_fids, .setup_request = smb2_setup_request, @@ -5212,6 +5218,7 @@ struct smb_version_operations smb20_operations = { .llseek = smb3_llseek, .is_status_io_timeout = smb2_is_status_io_timeout, }; +#endif /* CIFS_ALLOW_INSECURE_LEGACY */ struct smb_version_operations smb21_operations = { .compare_fids = smb2_compare_fids, @@ -5540,6 +5547,7 @@ struct smb_version_operations smb311_operations = { .is_status_io_timeout = smb2_is_status_io_timeout, }; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct smb_version_values smb20_values = { .version_string = SMB20_VERSION_STRING, .protocol_id = SMB20_PROT_ID, @@ -5560,6 +5568,7 @@ struct smb_version_values smb20_values = { .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, .create_lease_size = sizeof(struct create_lease), }; +#endif /* ALLOW_INSECURE_LEGACY */ struct smb_version_values smb21_values = { .version_string = SMB21_VERSION_STRING, diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 88554b640b0d..0c4a2474e75b 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -281,6 +281,9 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, ses->binding_chan = NULL; mutex_unlock(&tcon->ses->session_mutex); goto failed; + } else if (rc) { + mutex_unlock(&ses->session_mutex); + goto out; } } /* @@ -1072,13 +1075,13 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) pneg_inbuf->Dialects[0] = cpu_to_le16(server->vals->protocol_id); pneg_inbuf->DialectCount = cpu_to_le16(1); - /* structure is big enough for 3 dialects, sending only 1 */ + /* structure is big enough for 4 dialects, sending only 1 */ inbuflen = sizeof(*pneg_inbuf) - - sizeof(pneg_inbuf->Dialects[0]) * 2; + sizeof(pneg_inbuf->Dialects[0]) * 3; } rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, - FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, + FSCTL_VALIDATE_NEGOTIATE_INFO, (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize, (char **)&pneg_rsp, &rsplen); if (rc == -EOPNOTSUPP) { @@ -2291,7 +2294,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len) unsigned int acelen, acl_size, ace_count; unsigned int owner_offset = 0; unsigned int group_offset = 0; - struct smb3_acl acl; + struct smb3_acl acl = {}; *len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8); @@ -2364,6 +2367,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len) acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */ acl.AclSize = cpu_to_le16(acl_size); acl.AceCount = cpu_to_le16(ace_count); + /* acl.Sbz1 and Sbz2 MBZ so are not set here, but initialized above */ memcpy(aclptr, &acl, sizeof(struct smb3_acl)); buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd); @@ -2919,7 +2923,7 @@ int SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid, u32 opcode, - bool is_fsctl, char *in_data, u32 indatalen, + char *in_data, u32 indatalen, __u32 max_response_size) { struct smb2_ioctl_req *req; @@ -2994,10 +2998,8 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, req->sync_hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size), SMB2_MAX_BUFFER_SIZE)); - if (is_fsctl) - req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); - else - req->Flags = 0; + /* always an FSCTL (for now) */ + req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */ if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) @@ -3024,9 +3026,9 @@ SMB2_ioctl_free(struct smb_rqst *rqst) */ int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, - u64 volatile_fid, u32 opcode, bool is_fsctl, - char *in_data, u32 indatalen, u32 max_out_data_len, - char **out_data, u32 *plen /* returned data len */) + u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen, + u32 max_out_data_len, char **out_data, + u32 *plen /* returned data len */) { struct smb_rqst rqst; struct smb2_ioctl_rsp *rsp = NULL; @@ -3068,7 +3070,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, rc = SMB2_ioctl_init(tcon, server, &rqst, persistent_fid, volatile_fid, opcode, - is_fsctl, in_data, indatalen, max_out_data_len); + in_data, indatalen, max_out_data_len); if (rc) goto ioctl_exit; @@ -3150,7 +3152,7 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, cpu_to_le16(COMPRESSION_FORMAT_DEFAULT); rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, - FSCTL_SET_COMPRESSION, true /* is_fsctl */, + FSCTL_SET_COMPRESSION, (char *)&fsctl_input /* data input */, 2 /* in data len */, CIFSMaxBufSize /* max out data */, &ret_data /* out data */, NULL); diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 4eb0ca84355a..ed2b4fb012a4 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -155,13 +155,13 @@ extern int SMB2_open_init(struct cifs_tcon *tcon, extern void SMB2_open_free(struct smb_rqst *rqst); extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 opcode, - bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen, + char *in_data, u32 indatalen, u32 maxoutlen, char **out_data, u32 *plen /* returned data len */); extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid, u32 opcode, - bool is_fsctl, char *in_data, u32 indatalen, + char *in_data, u32 indatalen, __u32 max_response_size); extern void SMB2_ioctl_free(struct smb_rqst *rqst); extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon, diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 503a0056b60f..b137006f0fd2 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -209,10 +209,6 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg, *sent = 0; - smb_msg->msg_name = (struct sockaddr *) &server->dstaddr; - smb_msg->msg_namelen = sizeof(struct sockaddr); - smb_msg->msg_control = NULL; - smb_msg->msg_controllen = 0; if (server->noblocksnd) smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; else @@ -324,7 +320,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, sigset_t mask, oldmask; size_t total_len = 0, sent, size; struct socket *ssocket = server->ssocket; - struct msghdr smb_msg; + struct msghdr smb_msg = {}; __be32 rfc1002_marker; if (cifs_rdma_enabled(server)) { diff --git a/fs/coredump.c b/fs/coredump.c index b1b74dbfa22a..7c5edadf5208 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -520,7 +520,7 @@ static bool dump_interrupted(void) * but then we need to teach dump_write() to restart and clear * TIF_SIGPENDING. */ - return signal_pending(current); + return fatal_signal_pending(current) || freezing(current); } static void wait_for_dump_helpers(struct file *file) diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 9322a1c984fb..f5f056739cb6 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -222,7 +222,16 @@ struct fscrypt_info { * will be NULL if the master key was found in a process-subscribed * keyring rather than in the filesystem-level keyring. */ +#ifdef __GENKSYMS__ + /* + * Android ABI CRC preservation due to commit 391cceee6d43 ("fscrypt: + * stop using keyrings subsystem for fscrypt_master_key") changing this + * type. Size is the same, this is a private field. + */ struct key *ci_master_key; +#else + struct fscrypt_master_key *ci_master_key; +#endif /* * Link in list of inodes that were unlocked with the master key. @@ -461,6 +470,40 @@ struct fscrypt_master_key_secret { */ struct fscrypt_master_key { + /* + * Back-pointer to the super_block of the filesystem to which this + * master key has been added. Only valid if ->mk_active_refs > 0. + */ + struct super_block *mk_sb; + + /* + * Link in ->mk_sb->s_master_keys->key_hashtable. + * Only valid if ->mk_active_refs > 0. + */ + struct hlist_node mk_node; + + /* Semaphore that protects ->mk_secret and ->mk_users */ + struct rw_semaphore mk_sem; + + /* + * Active and structural reference counts. An active ref guarantees + * that the struct continues to exist, continues to be in the keyring + * ->mk_sb->s_master_keys, and that any embedded subkeys (e.g. + * ->mk_direct_keys) that have been prepared continue to exist. + * A structural ref only guarantees that the struct continues to exist. + * + * There is one active ref associated with ->mk_secret being present, + * and one active ref for each inode in ->mk_decrypted_inodes. + * + * There is one structural ref associated with the active refcount being + * nonzero. Finding a key in the keyring also takes a structural ref, + * which is then held temporarily while the key is operated on. + */ + refcount_t mk_active_refs; + refcount_t mk_struct_refs; + + struct rcu_head mk_rcu_head; + /* * The secret key material. After FS_IOC_REMOVE_ENCRYPTION_KEY is * executed, this is wiped and no new inodes can be unlocked with this @@ -469,7 +512,10 @@ struct fscrypt_master_key { * FS_IOC_REMOVE_ENCRYPTION_KEY can be retried, or * FS_IOC_ADD_ENCRYPTION_KEY can add the secret again. * - * Locking: protected by this master key's key->sem. + * While ->mk_secret is present, one ref in ->mk_active_refs is held. + * + * Locking: protected by ->mk_sem. The manipulation of ->mk_active_refs + * associated with this field is protected by ->mk_sem as well. */ struct fscrypt_master_key_secret mk_secret; @@ -490,22 +536,12 @@ struct fscrypt_master_key { * * This is NULL for v1 policy keys; those can only be added by root. * - * Locking: in addition to this keyring's own semaphore, this is - * protected by this master key's key->sem, so we can do atomic - * search+insert. It can also be searched without taking any locks, but - * in that case the returned key may have already been removed. + * Locking: protected by ->mk_sem. (We don't just rely on the keyrings + * subsystem semaphore ->mk_users->sem, as we need support for atomic + * search+insert along with proper synchronization with ->mk_secret.) */ struct key *mk_users; - /* - * Length of ->mk_decrypted_inodes, plus one if mk_secret is present. - * Once this goes to 0, the master key is removed from ->s_master_keys. - * The 'struct fscrypt_master_key' will continue to live as long as the - * 'struct key' whose payload it is, but we won't let this reference - * count rise again. - */ - refcount_t mk_refcount; - /* * List of inodes that were unlocked using this key. This allows the * inodes to be evicted efficiently if the key is removed. @@ -531,10 +567,10 @@ static inline bool is_master_key_secret_present(const struct fscrypt_master_key_secret *secret) { /* - * The READ_ONCE() is only necessary for fscrypt_drop_inode() and - * fscrypt_key_describe(). These run in atomic context, so they can't - * take the key semaphore and thus 'secret' can change concurrently - * which would be a data race. But they only need to know whether the + * The READ_ONCE() is only necessary for fscrypt_drop_inode(). + * fscrypt_drop_inode() runs in atomic context, so it can't take the key + * semaphore and thus 'secret' can change concurrently which would be a + * data race. But fscrypt_drop_inode() only need to know whether the * secret *was* present at the time of check, so READ_ONCE() suffices. */ return READ_ONCE(secret->size) != 0; @@ -563,7 +599,11 @@ static inline int master_key_spec_len(const struct fscrypt_key_specifier *spec) return 0; } -struct key * +void fscrypt_put_master_key(struct fscrypt_master_key *mk); + +void fscrypt_put_master_key_activeref(struct fscrypt_master_key *mk); + +struct fscrypt_master_key * fscrypt_find_master_key(struct super_block *sb, const struct fscrypt_key_specifier *mk_spec); diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c index af74599ae1cf..be5c650e4957 100644 --- a/fs/crypto/hooks.c +++ b/fs/crypto/hooks.c @@ -5,8 +5,6 @@ * Encryption hooks for higher-level filesystem operations. */ -#include - #include "fscrypt_private.h" /** @@ -142,7 +140,6 @@ int fscrypt_prepare_setflags(struct inode *inode, unsigned int oldflags, unsigned int flags) { struct fscrypt_info *ci; - struct key *key; struct fscrypt_master_key *mk; int err; @@ -158,14 +155,13 @@ int fscrypt_prepare_setflags(struct inode *inode, ci = inode->i_crypt_info; if (ci->ci_policy.version != FSCRYPT_POLICY_V2) return -EINVAL; - key = ci->ci_master_key; - mk = key->payload.data[0]; - down_read(&key->sem); + mk = ci->ci_master_key; + down_read(&mk->mk_sem); if (is_master_key_secret_present(&mk->mk_secret)) err = fscrypt_derive_dirhash_key(ci, mk); else err = -ENOKEY; - up_read(&key->sem); + up_read(&mk->mk_sem); return err; } return 0; diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index c492deaed9d6..eb7b9040d267 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -18,6 +18,7 @@ * information about these ioctls. */ +#include #include #include #include @@ -25,6 +26,18 @@ #include "fscrypt_private.h" +/* The master encryption keys for a filesystem (->s_master_keys) */ +struct fscrypt_keyring { + /* + * Lock that protects ->key_hashtable. It does *not* protect the + * fscrypt_master_key structs themselves. + */ + spinlock_t lock; + + /* Hash table that maps fscrypt_key_specifier to fscrypt_master_key */ + struct hlist_head key_hashtable[128]; +}; + static void wipe_master_key_secret(struct fscrypt_master_key_secret *secret) { fscrypt_destroy_hkdf(&secret->hkdf); @@ -38,20 +51,70 @@ static void move_master_key_secret(struct fscrypt_master_key_secret *dst, memzero_explicit(src, sizeof(*src)); } -static void free_master_key(struct fscrypt_master_key *mk) +static void fscrypt_free_master_key(struct rcu_head *head) { + struct fscrypt_master_key *mk = + container_of(head, struct fscrypt_master_key, mk_rcu_head); + /* + * The master key secret and any embedded subkeys should have already + * been wiped when the last active reference to the fscrypt_master_key + * struct was dropped; doing it here would be unnecessarily late. + * Nevertheless, use kfree_sensitive() in case anything was missed. + */ + kfree_sensitive(mk); +} + +void fscrypt_put_master_key(struct fscrypt_master_key *mk) +{ + if (!refcount_dec_and_test(&mk->mk_struct_refs)) + return; + /* + * No structural references left, so free ->mk_users, and also free the + * fscrypt_master_key struct itself after an RCU grace period ensures + * that concurrent keyring lookups can no longer find it. + */ + WARN_ON(refcount_read(&mk->mk_active_refs) != 0); + key_put(mk->mk_users); + mk->mk_users = NULL; + call_rcu(&mk->mk_rcu_head, fscrypt_free_master_key); +} + +void fscrypt_put_master_key_activeref(struct fscrypt_master_key *mk) +{ + struct super_block *sb = mk->mk_sb; + struct fscrypt_keyring *keyring = sb->s_master_keys; size_t i; - wipe_master_key_secret(&mk->mk_secret); + if (!refcount_dec_and_test(&mk->mk_active_refs)) + return; + /* + * No active references left, so complete the full removal of this + * fscrypt_master_key struct by removing it from the keyring and + * destroying any subkeys embedded in it. + */ + + spin_lock(&keyring->lock); + hlist_del_rcu(&mk->mk_node); + spin_unlock(&keyring->lock); + + /* + * ->mk_active_refs == 0 implies that ->mk_secret is not present and + * that ->mk_decrypted_inodes is empty. + */ + WARN_ON(is_master_key_secret_present(&mk->mk_secret)); + WARN_ON(!list_empty(&mk->mk_decrypted_inodes)); for (i = 0; i <= FSCRYPT_MODE_MAX; i++) { fscrypt_destroy_prepared_key(&mk->mk_direct_keys[i]); fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_64_keys[i]); fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_32_keys[i]); } + memzero_explicit(&mk->mk_ino_hash_key, + sizeof(mk->mk_ino_hash_key)); + mk->mk_ino_hash_key_initialized = false; - key_put(mk->mk_users); - kfree_sensitive(mk); + /* Drop the structural ref associated with the active refs. */ + fscrypt_put_master_key(mk); } static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec) @@ -61,44 +124,6 @@ static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec) return master_key_spec_len(spec) != 0; } -static int fscrypt_key_instantiate(struct key *key, - struct key_preparsed_payload *prep) -{ - key->payload.data[0] = (struct fscrypt_master_key *)prep->data; - return 0; -} - -static void fscrypt_key_destroy(struct key *key) -{ - free_master_key(key->payload.data[0]); -} - -static void fscrypt_key_describe(const struct key *key, struct seq_file *m) -{ - seq_puts(m, key->description); - - if (key_is_positive(key)) { - const struct fscrypt_master_key *mk = key->payload.data[0]; - - if (!is_master_key_secret_present(&mk->mk_secret)) - seq_puts(m, ": secret removed"); - } -} - -/* - * Type of key in ->s_master_keys. Each key of this type represents a master - * key which has been added to the filesystem. Its payload is a - * 'struct fscrypt_master_key'. The "." prefix in the key type name prevents - * users from adding keys of this type via the keyrings syscalls rather than via - * the intended method of FS_IOC_ADD_ENCRYPTION_KEY. - */ -static struct key_type key_type_fscrypt = { - .name = "._fscrypt", - .instantiate = fscrypt_key_instantiate, - .destroy = fscrypt_key_destroy, - .describe = fscrypt_key_describe, -}; - static int fscrypt_user_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { @@ -131,32 +156,6 @@ static struct key_type key_type_fscrypt_user = { .describe = fscrypt_user_key_describe, }; -/* Search ->s_master_keys or ->mk_users */ -static struct key *search_fscrypt_keyring(struct key *keyring, - struct key_type *type, - const char *description) -{ - /* - * We need to mark the keyring reference as "possessed" so that we - * acquire permission to search it, via the KEY_POS_SEARCH permission. - */ - key_ref_t keyref = make_key_ref(keyring, true /* possessed */); - - keyref = keyring_search(keyref, type, description, false); - if (IS_ERR(keyref)) { - if (PTR_ERR(keyref) == -EAGAIN || /* not found */ - PTR_ERR(keyref) == -EKEYREVOKED) /* recently invalidated */ - keyref = ERR_PTR(-ENOKEY); - return ERR_CAST(keyref); - } - return key_ref_to_ptr(keyref); -} - -#define FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE \ - (CONST_STRLEN("fscrypt-") + sizeof_field(struct super_block, s_id)) - -#define FSCRYPT_MK_DESCRIPTION_SIZE (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + 1) - #define FSCRYPT_MK_USERS_DESCRIPTION_SIZE \ (CONST_STRLEN("fscrypt-") + 2 * FSCRYPT_KEY_IDENTIFIER_SIZE + \ CONST_STRLEN("-users") + 1) @@ -164,21 +163,6 @@ static struct key *search_fscrypt_keyring(struct key *keyring, #define FSCRYPT_MK_USER_DESCRIPTION_SIZE \ (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + CONST_STRLEN(".uid.") + 10 + 1) -static void format_fs_keyring_description( - char description[FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE], - const struct super_block *sb) -{ - sprintf(description, "fscrypt-%s", sb->s_id); -} - -static void format_mk_description( - char description[FSCRYPT_MK_DESCRIPTION_SIZE], - const struct fscrypt_key_specifier *mk_spec) -{ - sprintf(description, "%*phN", - master_key_spec_len(mk_spec), (u8 *)&mk_spec->u); -} - static void format_mk_users_keyring_description( char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE], const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) @@ -199,20 +183,15 @@ static void format_mk_user_description( /* Create ->s_master_keys if needed. Synchronized by fscrypt_add_key_mutex. */ static int allocate_filesystem_keyring(struct super_block *sb) { - char description[FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE]; - struct key *keyring; + struct fscrypt_keyring *keyring; if (sb->s_master_keys) return 0; - format_fs_keyring_description(description, sb); - keyring = keyring_alloc(description, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, - current_cred(), KEY_POS_SEARCH | - KEY_USR_SEARCH | KEY_USR_READ | KEY_USR_VIEW, - KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); - if (IS_ERR(keyring)) - return PTR_ERR(keyring); - + keyring = kzalloc(sizeof(*keyring), GFP_KERNEL); + if (!keyring) + return -ENOMEM; + spin_lock_init(&keyring->lock); /* * Pairs with the smp_load_acquire() in fscrypt_find_master_key(). * I.e., here we publish ->s_master_keys with a RELEASE barrier so that @@ -222,21 +201,80 @@ static int allocate_filesystem_keyring(struct super_block *sb) return 0; } -void fscrypt_sb_free(struct super_block *sb) +/* + * Release all encryption keys that have been added to the filesystem, along + * with the keyring that contains them. + * + * This is called at unmount time. The filesystem's underlying block device(s) + * are still available at this time; this is important because after user file + * accesses have been allowed, this function may need to evict keys from the + * keyslots of an inline crypto engine, which requires the block device(s). + * + * This is also called when the super_block is being freed. This is needed to + * avoid a memory leak if mounting fails after the "test_dummy_encryption" + * option was processed, as in that case the unmount-time call isn't made. + */ +void fscrypt_destroy_keyring(struct super_block *sb) { - key_put(sb->s_master_keys); + struct fscrypt_keyring *keyring = sb->s_master_keys; + size_t i; + + if (!keyring) + return; + + for (i = 0; i < ARRAY_SIZE(keyring->key_hashtable); i++) { + struct hlist_head *bucket = &keyring->key_hashtable[i]; + struct fscrypt_master_key *mk; + struct hlist_node *tmp; + + hlist_for_each_entry_safe(mk, tmp, bucket, mk_node) { + /* + * Since all inodes were already evicted, every key + * remaining in the keyring should have an empty inode + * list, and should only still be in the keyring due to + * the single active ref associated with ->mk_secret. + * There should be no structural refs beyond the one + * associated with the active ref. + */ + WARN_ON(refcount_read(&mk->mk_active_refs) != 1); + WARN_ON(refcount_read(&mk->mk_struct_refs) != 1); + WARN_ON(!is_master_key_secret_present(&mk->mk_secret)); + wipe_master_key_secret(&mk->mk_secret); + fscrypt_put_master_key_activeref(mk); + } + } + kfree_sensitive(keyring); sb->s_master_keys = NULL; } -/* - * Find the specified master key in ->s_master_keys. - * Returns ERR_PTR(-ENOKEY) if not found. - */ -struct key *fscrypt_find_master_key(struct super_block *sb, - const struct fscrypt_key_specifier *mk_spec) +static struct hlist_head * +fscrypt_mk_hash_bucket(struct fscrypt_keyring *keyring, + const struct fscrypt_key_specifier *mk_spec) { - struct key *keyring; - char description[FSCRYPT_MK_DESCRIPTION_SIZE]; + /* + * Since key specifiers should be "random" values, it is sufficient to + * use a trivial hash function that just takes the first several bits of + * the key specifier. + */ + unsigned long i = get_unaligned((unsigned long *)&mk_spec->u); + + return &keyring->key_hashtable[i % ARRAY_SIZE(keyring->key_hashtable)]; +} + +/* + * Find the specified master key struct in ->s_master_keys and take a structural + * ref to it. The structural ref guarantees that the key struct continues to + * exist, but it does *not* guarantee that ->s_master_keys continues to contain + * the key struct. The structural ref needs to be dropped by + * fscrypt_put_master_key(). Returns NULL if the key struct is not found. + */ +struct fscrypt_master_key * +fscrypt_find_master_key(struct super_block *sb, + const struct fscrypt_key_specifier *mk_spec) +{ + struct fscrypt_keyring *keyring; + struct hlist_head *bucket; + struct fscrypt_master_key *mk; /* * Pairs with the smp_store_release() in allocate_filesystem_keyring(). @@ -246,10 +284,38 @@ struct key *fscrypt_find_master_key(struct super_block *sb, */ keyring = smp_load_acquire(&sb->s_master_keys); if (keyring == NULL) - return ERR_PTR(-ENOKEY); /* No keyring yet, so no keys yet. */ + return NULL; /* No keyring yet, so no keys yet. */ - format_mk_description(description, mk_spec); - return search_fscrypt_keyring(keyring, &key_type_fscrypt, description); + bucket = fscrypt_mk_hash_bucket(keyring, mk_spec); + rcu_read_lock(); + switch (mk_spec->type) { + case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: + hlist_for_each_entry_rcu(mk, bucket, mk_node) { + if (mk->mk_spec.type == + FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && + memcmp(mk->mk_spec.u.descriptor, + mk_spec->u.descriptor, + FSCRYPT_KEY_DESCRIPTOR_SIZE) == 0 && + refcount_inc_not_zero(&mk->mk_struct_refs)) + goto out; + } + break; + case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: + hlist_for_each_entry_rcu(mk, bucket, mk_node) { + if (mk->mk_spec.type == + FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER && + memcmp(mk->mk_spec.u.identifier, + mk_spec->u.identifier, + FSCRYPT_KEY_IDENTIFIER_SIZE) == 0 && + refcount_inc_not_zero(&mk->mk_struct_refs)) + goto out; + } + break; + } + mk = NULL; +out: + rcu_read_unlock(); + return mk; } static int allocate_master_key_users_keyring(struct fscrypt_master_key *mk) @@ -277,17 +343,30 @@ static int allocate_master_key_users_keyring(struct fscrypt_master_key *mk) static struct key *find_master_key_user(struct fscrypt_master_key *mk) { char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE]; + key_ref_t keyref; format_mk_user_description(description, mk->mk_spec.u.identifier); - return search_fscrypt_keyring(mk->mk_users, &key_type_fscrypt_user, - description); + + /* + * We need to mark the keyring reference as "possessed" so that we + * acquire permission to search it, via the KEY_POS_SEARCH permission. + */ + keyref = keyring_search(make_key_ref(mk->mk_users, true /*possessed*/), + &key_type_fscrypt_user, description, false); + if (IS_ERR(keyref)) { + if (PTR_ERR(keyref) == -EAGAIN || /* not found */ + PTR_ERR(keyref) == -EKEYREVOKED) /* recently invalidated */ + keyref = ERR_PTR(-ENOKEY); + return ERR_CAST(keyref); + } + return key_ref_to_ptr(keyref); } /* * Give the current user a "key" in ->mk_users. This charges the user's quota * and marks the master key as added by the current user, so that it cannot be - * removed by another user with the key. Either the master key's key->sem must - * be held for write, or the master key must be still undergoing initialization. + * removed by another user with the key. Either ->mk_sem must be held for + * write, or the master key must be still undergoing initialization. */ static int add_master_key_user(struct fscrypt_master_key *mk) { @@ -309,7 +388,7 @@ static int add_master_key_user(struct fscrypt_master_key *mk) /* * Remove the current user's "key" from ->mk_users. - * The master key's key->sem must be held for write. + * ->mk_sem must be held for write. * * Returns 0 if removed, -ENOKEY if not found, or another -errno code. */ @@ -327,63 +406,49 @@ static int remove_master_key_user(struct fscrypt_master_key *mk) } /* - * Allocate a new fscrypt_master_key which contains the given secret, set it as - * the payload of a new 'struct key' of type fscrypt, and link the 'struct key' - * into the given keyring. Synchronized by fscrypt_add_key_mutex. + * Allocate a new fscrypt_master_key, transfer the given secret over to it, and + * insert it into sb->s_master_keys. */ -static int add_new_master_key(struct fscrypt_master_key_secret *secret, - const struct fscrypt_key_specifier *mk_spec, - struct key *keyring) +static int add_new_master_key(struct super_block *sb, + struct fscrypt_master_key_secret *secret, + const struct fscrypt_key_specifier *mk_spec) { + struct fscrypt_keyring *keyring = sb->s_master_keys; struct fscrypt_master_key *mk; - char description[FSCRYPT_MK_DESCRIPTION_SIZE]; - struct key *key; int err; mk = kzalloc(sizeof(*mk), GFP_KERNEL); if (!mk) return -ENOMEM; + mk->mk_sb = sb; + init_rwsem(&mk->mk_sem); + refcount_set(&mk->mk_struct_refs, 1); mk->mk_spec = *mk_spec; - move_master_key_secret(&mk->mk_secret, secret); - - refcount_set(&mk->mk_refcount, 1); /* secret is present */ INIT_LIST_HEAD(&mk->mk_decrypted_inodes); spin_lock_init(&mk->mk_decrypted_inodes_lock); if (mk_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) { err = allocate_master_key_users_keyring(mk); if (err) - goto out_free_mk; + goto out_put; err = add_master_key_user(mk); if (err) - goto out_free_mk; + goto out_put; } - /* - * Note that we don't charge this key to anyone's quota, since when - * ->mk_users is in use those keys are charged instead, and otherwise - * (when ->mk_users isn't in use) only root can add these keys. - */ - format_mk_description(description, mk_spec); - key = key_alloc(&key_type_fscrypt, description, - GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(), - KEY_POS_SEARCH | KEY_USR_SEARCH | KEY_USR_VIEW, - KEY_ALLOC_NOT_IN_QUOTA, NULL); - if (IS_ERR(key)) { - err = PTR_ERR(key); - goto out_free_mk; - } - err = key_instantiate_and_link(key, mk, sizeof(*mk), keyring, NULL); - key_put(key); - if (err) - goto out_free_mk; + move_master_key_secret(&mk->mk_secret, secret); + refcount_set(&mk->mk_active_refs, 1); /* ->mk_secret is present */ + spin_lock(&keyring->lock); + hlist_add_head_rcu(&mk->mk_node, + fscrypt_mk_hash_bucket(keyring, mk_spec)); + spin_unlock(&keyring->lock); return 0; -out_free_mk: - free_master_key(mk); +out_put: + fscrypt_put_master_key(mk); return err; } @@ -392,42 +457,34 @@ out_free_mk: static int add_existing_master_key(struct fscrypt_master_key *mk, struct fscrypt_master_key_secret *secret) { - struct key *mk_user; - bool rekey; int err; /* * If the current user is already in ->mk_users, then there's nothing to - * do. (Not applicable for v1 policy keys, which have NULL ->mk_users.) + * do. Otherwise, we need to add the user to ->mk_users. (Neither is + * applicable for v1 policy keys, which have NULL ->mk_users.) */ if (mk->mk_users) { - mk_user = find_master_key_user(mk); + struct key *mk_user = find_master_key_user(mk); + if (mk_user != ERR_PTR(-ENOKEY)) { if (IS_ERR(mk_user)) return PTR_ERR(mk_user); key_put(mk_user); return 0; } - } - - /* If we'll be re-adding ->mk_secret, try to take the reference. */ - rekey = !is_master_key_secret_present(&mk->mk_secret); - if (rekey && !refcount_inc_not_zero(&mk->mk_refcount)) - return KEY_DEAD; - - /* Add the current user to ->mk_users, if applicable. */ - if (mk->mk_users) { err = add_master_key_user(mk); - if (err) { - if (rekey && refcount_dec_and_test(&mk->mk_refcount)) - return KEY_DEAD; + if (err) return err; - } } /* Re-add the secret if needed. */ - if (rekey) + if (!is_master_key_secret_present(&mk->mk_secret)) { + if (!refcount_inc_not_zero(&mk->mk_active_refs)) + return KEY_DEAD; move_master_key_secret(&mk->mk_secret, secret); + } + return 0; } @@ -436,38 +493,36 @@ static int do_add_master_key(struct super_block *sb, const struct fscrypt_key_specifier *mk_spec) { static DEFINE_MUTEX(fscrypt_add_key_mutex); - struct key *key; + struct fscrypt_master_key *mk; int err; mutex_lock(&fscrypt_add_key_mutex); /* serialize find + link */ -retry: - key = fscrypt_find_master_key(sb, mk_spec); - if (IS_ERR(key)) { - err = PTR_ERR(key); - if (err != -ENOKEY) - goto out_unlock; + + mk = fscrypt_find_master_key(sb, mk_spec); + if (!mk) { /* Didn't find the key in ->s_master_keys. Add it. */ err = allocate_filesystem_keyring(sb); - if (err) - goto out_unlock; - err = add_new_master_key(secret, mk_spec, sb->s_master_keys); + if (!err) + err = add_new_master_key(sb, secret, mk_spec); } else { /* * Found the key in ->s_master_keys. Re-add the secret if * needed, and add the user to ->mk_users if needed. */ - down_write(&key->sem); - err = add_existing_master_key(key->payload.data[0], secret); - up_write(&key->sem); + down_write(&mk->mk_sem); + err = add_existing_master_key(mk, secret); + up_write(&mk->mk_sem); if (err == KEY_DEAD) { - /* Key being removed or needs to be removed */ - key_invalidate(key); - key_put(key); - goto retry; + /* + * We found a key struct, but it's already been fully + * removed. Ignore the old struct and add a new one. + * fscrypt_add_key_mutex means we don't need to worry + * about concurrent adds. + */ + err = add_new_master_key(sb, secret, mk_spec); } - key_put(key); + fscrypt_put_master_key(mk); } -out_unlock: mutex_unlock(&fscrypt_add_key_mutex); return err; } @@ -761,19 +816,19 @@ int fscrypt_verify_key_added(struct super_block *sb, const u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) { struct fscrypt_key_specifier mk_spec; - struct key *key, *mk_user; struct fscrypt_master_key *mk; + struct key *mk_user; int err; mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER; memcpy(mk_spec.u.identifier, identifier, FSCRYPT_KEY_IDENTIFIER_SIZE); - key = fscrypt_find_master_key(sb, &mk_spec); - if (IS_ERR(key)) { - err = PTR_ERR(key); + mk = fscrypt_find_master_key(sb, &mk_spec); + if (!mk) { + err = -ENOKEY; goto out; } - mk = key->payload.data[0]; + down_read(&mk->mk_sem); mk_user = find_master_key_user(mk); if (IS_ERR(mk_user)) { err = PTR_ERR(mk_user); @@ -781,7 +836,8 @@ int fscrypt_verify_key_added(struct super_block *sb, key_put(mk_user); err = 0; } - key_put(key); + up_read(&mk->mk_sem); + fscrypt_put_master_key(mk); out: if (err == -ENOKEY && capable(CAP_FOWNER)) err = 0; @@ -943,11 +999,10 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users) struct super_block *sb = file_inode(filp)->i_sb; struct fscrypt_remove_key_arg __user *uarg = _uarg; struct fscrypt_remove_key_arg arg; - struct key *key; struct fscrypt_master_key *mk; u32 status_flags = 0; int err; - bool dead; + bool inodes_remain; if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; @@ -967,12 +1022,10 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users) return -EACCES; /* Find the key being removed. */ - key = fscrypt_find_master_key(sb, &arg.key_spec); - if (IS_ERR(key)) - return PTR_ERR(key); - mk = key->payload.data[0]; - - down_write(&key->sem); + mk = fscrypt_find_master_key(sb, &arg.key_spec); + if (!mk) + return -ENOKEY; + down_write(&mk->mk_sem); /* If relevant, remove current user's (or all users) claim to the key */ if (mk->mk_users && mk->mk_users->keys.nr_leaves_on_tree != 0) { @@ -981,7 +1034,7 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users) else err = remove_master_key_user(mk); if (err) { - up_write(&key->sem); + up_write(&mk->mk_sem); goto out_put_key; } if (mk->mk_users->keys.nr_leaves_on_tree != 0) { @@ -993,26 +1046,22 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users) status_flags |= FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS; err = 0; - up_write(&key->sem); + up_write(&mk->mk_sem); goto out_put_key; } } /* No user claims remaining. Go ahead and wipe the secret. */ - dead = false; + err = -ENOKEY; if (is_master_key_secret_present(&mk->mk_secret)) { wipe_master_key_secret(&mk->mk_secret); - dead = refcount_dec_and_test(&mk->mk_refcount); - } - up_write(&key->sem); - if (dead) { - /* - * No inodes reference the key, and we wiped the secret, so the - * key object is free to be removed from the keyring. - */ - key_invalidate(key); + fscrypt_put_master_key_activeref(mk); err = 0; - } else { + } + inodes_remain = refcount_read(&mk->mk_active_refs) > 0; + up_write(&mk->mk_sem); + + if (inodes_remain) { /* Some inodes still reference this key; try to evict them. */ err = try_to_lock_encrypted_files(sb, mk); if (err == -EBUSY) { @@ -1028,7 +1077,7 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users) * has been fully removed including all files locked. */ out_put_key: - key_put(key); + fscrypt_put_master_key(mk); if (err == 0) err = put_user(status_flags, &uarg->removal_status_flags); return err; @@ -1075,7 +1124,6 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg) { struct super_block *sb = file_inode(filp)->i_sb; struct fscrypt_get_key_status_arg arg; - struct key *key; struct fscrypt_master_key *mk; int err; @@ -1092,19 +1140,18 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg) arg.user_count = 0; memset(arg.__out_reserved, 0, sizeof(arg.__out_reserved)); - key = fscrypt_find_master_key(sb, &arg.key_spec); - if (IS_ERR(key)) { - if (key != ERR_PTR(-ENOKEY)) - return PTR_ERR(key); + mk = fscrypt_find_master_key(sb, &arg.key_spec); + if (!mk) { arg.status = FSCRYPT_KEY_STATUS_ABSENT; err = 0; goto out; } - mk = key->payload.data[0]; - down_read(&key->sem); + down_read(&mk->mk_sem); if (!is_master_key_secret_present(&mk->mk_secret)) { - arg.status = FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED; + arg.status = refcount_read(&mk->mk_active_refs) > 0 ? + FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED : + FSCRYPT_KEY_STATUS_ABSENT /* raced with full removal */; err = 0; goto out_release_key; } @@ -1126,8 +1173,8 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg) } err = 0; out_release_key: - up_read(&key->sem); - key_put(key); + up_read(&mk->mk_sem); + fscrypt_put_master_key(mk); out: if (!err && copy_to_user(uarg, &arg, sizeof(arg))) err = -EFAULT; @@ -1139,13 +1186,9 @@ int __init fscrypt_init_keyring(void) { int err; - err = register_key_type(&key_type_fscrypt); - if (err) - return err; - err = register_key_type(&key_type_fscrypt_user); if (err) - goto err_unregister_fscrypt; + return err; err = register_key_type(&key_type_fscrypt_provisioning); if (err) @@ -1155,7 +1198,5 @@ int __init fscrypt_init_keyring(void) err_unregister_fscrypt_user: unregister_key_type(&key_type_fscrypt_user); -err_unregister_fscrypt: - unregister_key_type(&key_type_fscrypt); return err; } diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 05f4693f7e15..a12feee13fd7 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -9,7 +9,6 @@ */ #include -#include #include #include "fscrypt_private.h" @@ -156,6 +155,7 @@ void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key) { crypto_free_skcipher(prep_key->tfm); fscrypt_destroy_inline_crypt_key(prep_key); + memzero_explicit(prep_key, sizeof(*prep_key)); } /* Given a per-file encryption key, set up the file's crypto transform object */ @@ -443,20 +443,18 @@ static bool fscrypt_valid_master_key_size(const struct fscrypt_master_key *mk, /* * Find the master key, then set up the inode's actual encryption key. * - * If the master key is found in the filesystem-level keyring, then the - * corresponding 'struct key' is returned in *master_key_ret with its semaphore - * read-locked. This is needed to ensure that only one task links the - * fscrypt_info into ->mk_decrypted_inodes (as multiple tasks may race to create - * an fscrypt_info for the same inode), and to synchronize the master key being - * removed with a new inode starting to use it. + * If the master key is found in the filesystem-level keyring, then it is + * returned in *mk_ret with its semaphore read-locked. This is needed to ensure + * that only one task links the fscrypt_info into ->mk_decrypted_inodes (as + * multiple tasks may race to create an fscrypt_info for the same inode), and to + * synchronize the master key being removed with a new inode starting to use it. */ static int setup_file_encryption_key(struct fscrypt_info *ci, bool need_dirhash_key, - struct key **master_key_ret) + struct fscrypt_master_key **mk_ret) { - struct key *key; - struct fscrypt_master_key *mk = NULL; struct fscrypt_key_specifier mk_spec; + struct fscrypt_master_key *mk; int err; switch (ci->ci_policy.version) { @@ -477,11 +475,10 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, return -EINVAL; } - key = fscrypt_find_master_key(ci->ci_inode->i_sb, &mk_spec); - if (IS_ERR(key)) { - if (key != ERR_PTR(-ENOKEY) || - ci->ci_policy.version != FSCRYPT_POLICY_V1) - return PTR_ERR(key); + mk = fscrypt_find_master_key(ci->ci_inode->i_sb, &mk_spec); + if (!mk) { + if (ci->ci_policy.version != FSCRYPT_POLICY_V1) + return -ENOKEY; err = fscrypt_select_encryption_impl(ci, false); if (err) @@ -495,9 +492,7 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, */ return fscrypt_setup_v1_file_key_via_subscribed_keyrings(ci); } - - mk = key->payload.data[0]; - down_read(&key->sem); + down_read(&mk->mk_sem); /* Has the secret been removed (via FS_IOC_REMOVE_ENCRYPTION_KEY)? */ if (!is_master_key_secret_present(&mk->mk_secret)) { @@ -529,18 +524,18 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, if (err) goto out_release_key; - *master_key_ret = key; + *mk_ret = mk; return 0; out_release_key: - up_read(&key->sem); - key_put(key); + up_read(&mk->mk_sem); + fscrypt_put_master_key(mk); return err; } static void put_crypt_info(struct fscrypt_info *ci) { - struct key *key; + struct fscrypt_master_key *mk; if (!ci) return; @@ -550,24 +545,18 @@ static void put_crypt_info(struct fscrypt_info *ci) else if (ci->ci_owns_key) fscrypt_destroy_prepared_key(&ci->ci_enc_key); - key = ci->ci_master_key; - if (key) { - struct fscrypt_master_key *mk = key->payload.data[0]; - + mk = ci->ci_master_key; + if (mk) { /* * Remove this inode from the list of inodes that were unlocked - * with the master key. - * - * In addition, if we're removing the last inode from a key that - * already had its secret removed, invalidate the key so that it - * gets removed from ->s_master_keys. + * with the master key. In addition, if we're removing the last + * inode from a master key struct that already had its secret + * removed, then complete the full removal of the struct. */ spin_lock(&mk->mk_decrypted_inodes_lock); list_del(&ci->ci_master_key_link); spin_unlock(&mk->mk_decrypted_inodes_lock); - if (refcount_dec_and_test(&mk->mk_refcount)) - key_invalidate(key); - key_put(key); + fscrypt_put_master_key_activeref(mk); } memzero_explicit(ci, sizeof(*ci)); kmem_cache_free(fscrypt_info_cachep, ci); @@ -581,7 +570,7 @@ fscrypt_setup_encryption_info(struct inode *inode, { struct fscrypt_info *crypt_info; struct fscrypt_mode *mode; - struct key *master_key = NULL; + struct fscrypt_master_key *mk = NULL; int res; res = fscrypt_initialize(inode->i_sb->s_cop->flags); @@ -604,8 +593,7 @@ fscrypt_setup_encryption_info(struct inode *inode, WARN_ON(mode->ivsize > FSCRYPT_MAX_IV_SIZE); crypt_info->ci_mode = mode; - res = setup_file_encryption_key(crypt_info, need_dirhash_key, - &master_key); + res = setup_file_encryption_key(crypt_info, need_dirhash_key, &mk); if (res) goto out; @@ -620,12 +608,9 @@ fscrypt_setup_encryption_info(struct inode *inode, * We won the race and set ->i_crypt_info to our crypt_info. * Now link it into the master key's inode list. */ - if (master_key) { - struct fscrypt_master_key *mk = - master_key->payload.data[0]; - - refcount_inc(&mk->mk_refcount); - crypt_info->ci_master_key = key_get(master_key); + if (mk) { + crypt_info->ci_master_key = mk; + refcount_inc(&mk->mk_active_refs); spin_lock(&mk->mk_decrypted_inodes_lock); list_add(&crypt_info->ci_master_key_link, &mk->mk_decrypted_inodes); @@ -635,9 +620,9 @@ fscrypt_setup_encryption_info(struct inode *inode, } res = 0; out: - if (master_key) { - up_read(&master_key->sem); - key_put(master_key); + if (mk) { + up_read(&mk->mk_sem); + fscrypt_put_master_key(mk); } put_crypt_info(crypt_info); return res; @@ -802,7 +787,6 @@ EXPORT_SYMBOL(fscrypt_free_inode); int fscrypt_drop_inode(struct inode *inode) { const struct fscrypt_info *ci = fscrypt_get_info(inode); - const struct fscrypt_master_key *mk; /* * If ci is NULL, then the inode doesn't have an encryption key set up @@ -812,7 +796,6 @@ int fscrypt_drop_inode(struct inode *inode) */ if (!ci || !ci->ci_master_key) return 0; - mk = ci->ci_master_key->payload.data[0]; /* * With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes @@ -831,6 +814,6 @@ int fscrypt_drop_inode(struct inode *inode) * then the thread removing the key will either evict the inode itself * or will correctly detect that it wasn't evicted due to the race. */ - return !is_master_key_secret_present(&mk->mk_secret); + return !is_master_key_secret_present(&ci->ci_master_key->mk_secret); } EXPORT_SYMBOL_GPL(fscrypt_drop_inode); diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index a51cef6bd27f..41bd3e70fd20 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -692,12 +692,8 @@ int fscrypt_set_context(struct inode *inode, void *fs_data) * delayed key setup that requires the inode number. */ if (ci->ci_policy.version == FSCRYPT_POLICY_V2 && - (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) { - const struct fscrypt_master_key *mk = - ci->ci_master_key->payload.data[0]; - - fscrypt_hash_inode_number(ci, mk); - } + (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) + fscrypt_hash_inode_number(ci, ci->ci_master_key); return inode->i_sb->s_cop->set_context(inode, &ctx, ctxsize, fs_data); } diff --git a/fs/dax.c b/fs/dax.c index 61e73b6a2314..c0f181ea1005 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -851,7 +851,8 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) goto unlock_pmd; - flush_cache_page(vma, address, pfn); + flush_cache_range(vma, address, + address + HPAGE_PMD_SIZE); pmd = pmdp_invalidate(vma, address, pmdp); pmd = pmd_wrprotect(pmd); pmd = pmd_mkclean(pmd); diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 848e0aaa8da5..f47f0a7d2c3b 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -730,6 +730,28 @@ void debugfs_remove(struct dentry *dentry) } EXPORT_SYMBOL_GPL(debugfs_remove); +/** + * debugfs_lookup_and_remove - lookup a directory or file and recursively remove it + * @name: a pointer to a string containing the name of the item to look up. + * @parent: a pointer to the parent dentry of the item. + * + * This is the equlivant of doing something like + * debugfs_remove(debugfs_lookup(..)) but with the proper reference counting + * handled for the directory being looked up. + */ +void debugfs_lookup_and_remove(const char *name, struct dentry *parent) +{ + struct dentry *dentry; + + dentry = debugfs_lookup(name, parent); + if (!dentry) + return; + + debugfs_remove(dentry); + dput(dentry); +} +EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove); + /** * debugfs_rename - rename a file/directory in the debugfs filesystem * @old_dir: a pointer to the parent dentry for the renamed object. This diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index 283c7b94edda..ca06069e95c8 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -198,13 +198,13 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, if (!prev_seq) { kref_get(&lkb->lkb_ref); + mutex_lock(&ls->ls_cb_mutex); if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) { - mutex_lock(&ls->ls_cb_mutex); list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay); - mutex_unlock(&ls->ls_cb_mutex); } else { queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); } + mutex_unlock(&ls->ls_cb_mutex); } out: mutex_unlock(&lkb->lkb_cb_mutex); @@ -284,7 +284,9 @@ void dlm_callback_stop(struct dlm_ls *ls) void dlm_callback_suspend(struct dlm_ls *ls) { + mutex_lock(&ls->ls_cb_mutex); set_bit(LSFL_CB_DELAY, &ls->ls_flags); + mutex_unlock(&ls->ls_cb_mutex); if (ls->ls_callback_wq) flush_workqueue(ls->ls_callback_wq); diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 1e9d8999b939..dde9afb6747b 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -1551,6 +1551,7 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype, lkb->lkb_wait_type = 0; lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; lkb->lkb_wait_count--; + unhold_lkb(lkb); goto out_del; } @@ -1577,6 +1578,7 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype, log_error(ls, "remwait error %x reply %d wait_type %d overlap", lkb->lkb_id, mstype, lkb->lkb_wait_type); lkb->lkb_wait_count--; + unhold_lkb(lkb); lkb->lkb_wait_type = 0; } @@ -2886,17 +2888,9 @@ static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args) static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, struct dlm_args *args) { - int rv = -EINVAL; + int rv = -EBUSY; if (args->flags & DLM_LKF_CONVERT) { - if (lkb->lkb_flags & DLM_IFL_MSTCPY) - goto out; - - if (args->flags & DLM_LKF_QUECVT && - !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1]) - goto out; - - rv = -EBUSY; if (lkb->lkb_status != DLM_LKSTS_GRANTED) goto out; @@ -2905,6 +2899,14 @@ static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, if (is_overlap(lkb)) goto out; + + rv = -EINVAL; + if (lkb->lkb_flags & DLM_IFL_MSTCPY) + goto out; + + if (args->flags & DLM_LKF_QUECVT && + !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1]) + goto out; } lkb->lkb_exflags = args->flags; @@ -4065,13 +4067,14 @@ static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len) rv = _create_message(ls, sizeof(struct dlm_message) + len, dir_nodeid, DLM_MSG_REMOVE, &ms, &mh); if (rv) - return; + goto out; memcpy(ms->m_extra, name, len); ms->m_hash = hash; send_message(mh, ms); +out: spin_lock(&ls->ls_remove_spin); ls->ls_remove_len = 0; memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN); @@ -5312,11 +5315,16 @@ int dlm_recover_waiters_post(struct dlm_ls *ls) lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK; lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; lkb->lkb_wait_type = 0; - lkb->lkb_wait_count = 0; + /* drop all wait_count references we still + * hold a reference for this iteration. + */ + while (lkb->lkb_wait_count) { + lkb->lkb_wait_count--; + unhold_lkb(lkb); + } mutex_lock(&ls->ls_waiters_mutex); list_del_init(&lkb->lkb_wait_reply); mutex_unlock(&ls->ls_waiters_mutex); - unhold_lkb(lkb); /* for waiters list */ if (oc || ou) { /* do an unlock or cancel instead of resending */ diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c index c38b2b8ffd1d..a10d2bcfe75a 100644 --- a/fs/dlm/plock.c +++ b/fs/dlm/plock.c @@ -23,11 +23,11 @@ struct plock_op { struct list_head list; int done; struct dlm_plock_info info; + int (*callback)(struct file_lock *fl, int result); }; struct plock_xop { struct plock_op xop; - int (*callback)(struct file_lock *fl, int result); void *fl; void *file; struct file_lock flc; @@ -129,19 +129,18 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, /* fl_owner is lockd which doesn't distinguish processes on the nfs client */ op->info.owner = (__u64) fl->fl_pid; - xop->callback = fl->fl_lmops->lm_grant; + op->callback = fl->fl_lmops->lm_grant; locks_init_lock(&xop->flc); locks_copy_lock(&xop->flc, fl); xop->fl = fl; xop->file = file; } else { op->info.owner = (__u64)(long) fl->fl_owner; - xop->callback = NULL; } send_op(op); - if (xop->callback == NULL) { + if (!op->callback) { rv = wait_event_interruptible(recv_wq, (op->done != 0)); if (rv == -ERESTARTSYS) { log_debug(ls, "dlm_posix_lock: wait killed %llx", @@ -203,7 +202,7 @@ static int dlm_plock_callback(struct plock_op *op) file = xop->file; flc = &xop->flc; fl = xop->fl; - notify = xop->callback; + notify = op->callback; if (op->info.rv) { notify(fl, op->info.rv); @@ -436,10 +435,9 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count, if (op->info.fsid == info.fsid && op->info.number == info.number && op->info.owner == info.owner) { - struct plock_xop *xop = (struct plock_xop *)op; list_del_init(&op->list); memcpy(&op->info, &info, sizeof(info)); - if (xop->callback) + if (op->callback) do_callback = 1; else op->done = 1; diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 0b2e465d275e..6f6e3d0af2fd 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -94,14 +94,18 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, if (page) { __clear_bit(j, bounced); - if (kaddr) { - if (kaddr + PAGE_SIZE == page_address(page)) + if (!PageHighMem(page)) { + if (!i) { + kaddr = page_address(page); + continue; + } + if (kaddr && + kaddr + PAGE_SIZE == page_address(page)) { kaddr += PAGE_SIZE; - else - kaddr = NULL; - } else if (!i) { - kaddr = page_address(page); + continue; + } } + kaddr = NULL; continue; } kaddr = NULL; diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 7fc79c47798a..7596db389201 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -789,12 +789,9 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, /* wake up the caller thread for sync decompression */ if (sync) { - unsigned long flags; - - spin_lock_irqsave(&io->u.wait.lock, flags); if (!atomic_add_return(bios, &io->pending_bios)) - wake_up_locked(&io->u.wait); - spin_unlock_irqrestore(&io->u.wait.lock, flags); + complete(&io->u.done); + return; } @@ -1214,7 +1211,7 @@ jobqueue_init(struct super_block *sb, } else { fg_out: q = fgq; - init_waitqueue_head(&fgq->u.wait); + init_completion(&fgq->u.done); atomic_set(&fgq->pending_bios, 0); } q->sb = sb; @@ -1377,8 +1374,7 @@ static void z_erofs_runqueue(struct super_block *sb, return; /* wait until all bios are completed */ - io_wait_event(io[JQ_SUBMIT].u.wait, - !atomic_read(&io[JQ_SUBMIT].pending_bios)); + wait_for_completion_io(&io[JQ_SUBMIT].u.done); /* handle synchronous decompress queue in the caller context */ z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool); diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h index 942ee69dff6a..60bf396164ec 100644 --- a/fs/erofs/zdata.h +++ b/fs/erofs/zdata.h @@ -90,7 +90,7 @@ struct z_erofs_decompressqueue { z_erofs_next_pcluster_t head; union { - wait_queue_head_t wait; + struct completion done; struct work_struct work; } u; }; diff --git a/fs/eventfd.c b/fs/eventfd.c index df466ef81ddd..4a14295cffe0 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -45,21 +45,7 @@ struct eventfd_ctx { int id; }; -/** - * eventfd_signal - Adds @n to the eventfd counter. - * @ctx: [in] Pointer to the eventfd context. - * @n: [in] Value of the counter to be added to the eventfd internal counter. - * The value cannot be negative. - * - * This function is supposed to be called by the kernel in paths that do not - * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX - * value, and we signal this as overflow condition by returning a EPOLLERR - * to poll(2). - * - * Returns the amount by which the counter was incremented. This will be less - * than @n if the counter has overflowed. - */ -__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) +__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask) { unsigned long flags; @@ -80,12 +66,31 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) n = ULLONG_MAX - ctx->count; ctx->count += n; if (waitqueue_active(&ctx->wqh)) - wake_up_locked_poll(&ctx->wqh, EPOLLIN); + wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask); this_cpu_dec(eventfd_wake_count); spin_unlock_irqrestore(&ctx->wqh.lock, flags); return n; } + +/** + * eventfd_signal - Adds @n to the eventfd counter. + * @ctx: [in] Pointer to the eventfd context. + * @n: [in] Value of the counter to be added to the eventfd internal counter. + * The value cannot be negative. + * + * This function is supposed to be called by the kernel in paths that do not + * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX + * value, and we signal this as overflow condition by returning a EPOLLERR + * to poll(2). + * + * Returns the amount by which the counter was incremented. This will be less + * than @n if the counter has overflowed. + */ +__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) +{ + return eventfd_signal_mask(ctx, n, 0); +} EXPORT_SYMBOL_GPL(eventfd_signal); static void eventfd_free_ctx(struct eventfd_ctx *ctx) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index aeffff8debed..2c67c59814e1 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -551,7 +551,8 @@ out_unlock: */ #ifdef CONFIG_DEBUG_LOCK_ALLOC -static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi) +static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi, + unsigned pollflags) { struct eventpoll *ep_src; unsigned long flags; @@ -582,16 +583,17 @@ static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi) } spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests); ep->nests = nests + 1; - wake_up_locked_poll(&ep->poll_wait, EPOLLIN); + wake_up_locked_poll(&ep->poll_wait, EPOLLIN | pollflags); ep->nests = 0; spin_unlock_irqrestore(&ep->poll_wait.lock, flags); } #else -static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi) +static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi, + unsigned pollflags) { - wake_up_poll(&ep->poll_wait, EPOLLIN); + wake_up_poll(&ep->poll_wait, EPOLLIN | pollflags); } #endif @@ -818,7 +820,7 @@ static void ep_free(struct eventpoll *ep) /* We need to release all tasks waiting for these file */ if (waitqueue_active(&ep->poll_wait)) - ep_poll_safewake(ep, NULL); + ep_poll_safewake(ep, NULL, 0); /* * We need to lock this because we could be hit by @@ -1287,7 +1289,7 @@ out_unlock: /* We have to call this outside the lock */ if (pwake) - ep_poll_safewake(ep, epi); + ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE); if (!(epi->event.events & EPOLLEXCLUSIVE)) ewake = 1; @@ -1597,7 +1599,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, /* We have to call this outside the lock */ if (pwake) - ep_poll_safewake(ep, NULL); + ep_poll_safewake(ep, NULL, 0); return 0; @@ -1700,7 +1702,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, /* We have to call this outside the lock */ if (pwake) - ep_poll_safewake(ep, NULL); + ep_poll_safewake(ep, NULL, 0); return 0; } @@ -1812,6 +1814,21 @@ static inline struct timespec64 ep_set_mstimeout(long ms) return timespec64_add_safe(now, ts); } +/* + * autoremove_wake_function, but remove even on failure to wake up, because we + * know that default_wake_function/ttwu will only fail if the thread is already + * woken, and in that case the ep_poll loop will remove the entry anyways, not + * try to reuse it. + */ +static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry, + unsigned int mode, int sync, void *key) +{ + int ret = default_wake_function(wq_entry, mode, sync, key); + + list_del_init(&wq_entry->entry); + return ret; +} + /** * ep_poll - Retrieves ready events, and delivers them to the caller supplied * event buffer. @@ -1889,8 +1906,15 @@ fetch_events: * normal wakeup path no need to call __remove_wait_queue() * explicitly, thus ep->lock is not taken, which halts the * event delivery. + * + * In fact, we now use an even more aggressive function that + * unconditionally removes, because we don't reuse the wait + * entry between loop iterations. This lets us also avoid the + * performance issue if a process is killed, causing all of its + * threads to wake up without being removed normally. */ init_wait(&wait); + wait.func = ep_autoremove_wake_function; write_lock_irq(&ep->lock); /* diff --git a/fs/exec.c b/fs/exec.c index b750bd9a0d56..b798885e30ce 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1200,11 +1200,11 @@ static int unshare_sighand(struct task_struct *me) return -ENOMEM; refcount_set(&newsighand->count, 1); - memcpy(newsighand->action, oldsighand->action, - sizeof(newsighand->action)); write_lock_irq(&tasklist_lock); spin_lock(&oldsighand->siglock); + memcpy(newsighand->action, oldsighand->action, + sizeof(newsighand->action)); rcu_assign_pointer(me->sighand, newsighand); spin_unlock(&oldsighand->siglock); write_unlock_irq(&tasklist_lock); @@ -1288,7 +1288,10 @@ int begin_new_exec(struct linux_binprm * bprm) bprm->mm = NULL; #ifdef CONFIG_POSIX_TIMERS - exit_itimers(me->signal); + spin_lock_irq(&me->sighand->siglock); + posix_cpu_timers_exit(me); + spin_unlock_irq(&me->sighand->siglock); + exit_itimers(me); flush_itimer_signals(); #endif diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c index 75ab43905f73..da2c94c652b0 100644 --- a/fs/exfat/balloc.c +++ b/fs/exfat/balloc.c @@ -148,7 +148,9 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync) struct super_block *sb = inode->i_sb; struct exfat_sb_info *sbi = EXFAT_SB(sb); - WARN_ON(clu < EXFAT_FIRST_CLUSTER); + if (!is_valid_cluster(sbi, clu)) + return -EINVAL; + ent_idx = CLUSTER_TO_BITMAP_ENT(clu); i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx); b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx); @@ -158,7 +160,7 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync) return 0; } -void exfat_clear_bitmap(struct inode *inode, unsigned int clu) +void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync) { int i, b; unsigned int ent_idx; @@ -166,13 +168,15 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu) struct exfat_sb_info *sbi = EXFAT_SB(sb); struct exfat_mount_options *opts = &sbi->options; - WARN_ON(clu < EXFAT_FIRST_CLUSTER); + if (!is_valid_cluster(sbi, clu)) + return; + ent_idx = CLUSTER_TO_BITMAP_ENT(clu); i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx); b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx); clear_bit_le(b, sbi->vol_amap[i]->b_data); - exfat_update_bh(sbi->vol_amap[i], IS_DIRSYNC(inode)); + exfat_update_bh(sbi->vol_amap[i], sync); if (opts->discard) { int ret_discard; diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index c5d370990d39..2aa02a7c643a 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -380,6 +380,14 @@ static inline int exfat_sector_to_cluster(struct exfat_sb_info *sbi, EXFAT_RESERVED_CLUSTERS; } +static inline bool is_valid_cluster(struct exfat_sb_info *sbi, + unsigned int clus) +{ + if (clus < EXFAT_FIRST_CLUSTER || sbi->num_clusters <= clus) + return false; + return true; +} + /* super.c */ int exfat_set_volume_dirty(struct super_block *sb); int exfat_clear_volume_dirty(struct super_block *sb); @@ -408,7 +416,7 @@ int exfat_count_num_clusters(struct super_block *sb, int exfat_load_bitmap(struct super_block *sb); void exfat_free_bitmap(struct exfat_sb_info *sbi); int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync); -void exfat_clear_bitmap(struct inode *inode, unsigned int clu); +void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync); unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu); int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count); diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c index 6078cbfdfe8a..78aa02e2d2c1 100644 --- a/fs/exfat/fatent.c +++ b/fs/exfat/fatent.c @@ -81,14 +81,6 @@ int exfat_ent_set(struct super_block *sb, unsigned int loc, return 0; } -static inline bool is_valid_cluster(struct exfat_sb_info *sbi, - unsigned int clus) -{ - if (clus < EXFAT_FIRST_CLUSTER || sbi->num_clusters <= clus) - return false; - return true; -} - int exfat_ent_get(struct super_block *sb, unsigned int loc, unsigned int *content) { @@ -157,6 +149,7 @@ int exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain) unsigned int clu; struct super_block *sb = inode->i_sb; struct exfat_sb_info *sbi = EXFAT_SB(sb); + int cur_cmap_i, next_cmap_i; /* invalid cluster number */ if (p_chain->dir == EXFAT_FREE_CLUSTER || @@ -176,21 +169,53 @@ int exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain) clu = p_chain->dir; - if (p_chain->flags == ALLOC_NO_FAT_CHAIN) { - do { - exfat_clear_bitmap(inode, clu); - clu++; + cur_cmap_i = next_cmap_i = + BITMAP_OFFSET_SECTOR_INDEX(sb, CLUSTER_TO_BITMAP_ENT(clu)); + if (p_chain->flags == ALLOC_NO_FAT_CHAIN) { + unsigned int last_cluster = p_chain->dir + p_chain->size - 1; + + do { + bool sync = false; + + if (clu < last_cluster) + next_cmap_i = + BITMAP_OFFSET_SECTOR_INDEX(sb, CLUSTER_TO_BITMAP_ENT(clu+1)); + + /* flush bitmap only if index would be changed or for last cluster */ + if (clu == last_cluster || cur_cmap_i != next_cmap_i) { + sync = true; + cur_cmap_i = next_cmap_i; + } + + exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode))); + clu++; num_clusters++; } while (num_clusters < p_chain->size); } else { do { - exfat_clear_bitmap(inode, clu); + bool sync = false; + unsigned int n_clu = clu; + int err = exfat_get_next_cluster(sb, &n_clu); - if (exfat_get_next_cluster(sb, &clu)) - goto dec_used_clus; + if (err || n_clu == EXFAT_EOF_CLUSTER) + sync = true; + else + next_cmap_i = + BITMAP_OFFSET_SECTOR_INDEX(sb, CLUSTER_TO_BITMAP_ENT(n_clu)); + + if (cur_cmap_i != next_cmap_i) { + sync = true; + cur_cmap_i = next_cmap_i; + } + + exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode))); + clu = n_clu; num_clusters++; + + if (err) + goto dec_used_clus; } while (clu != EXFAT_EOF_CLUSTER); } diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 2588d3ae1a4c..0345f8e6280b 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -1060,9 +1060,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) sbi->s_frags_per_group); goto failed_mount; } - if (sbi->s_inodes_per_group > sb->s_blocksize * 8) { + if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || + sbi->s_inodes_per_group > sb->s_blocksize * 8) { ext2_msg(sb, KERN_ERR, - "error: #inodes per group too big: %lu", + "error: invalid #inodes per group: %lu", sbi->s_inodes_per_group); goto failed_mount; } @@ -1072,6 +1073,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - le32_to_cpu(es->s_first_data_block) - 1) / EXT2_BLOCKS_PER_GROUP(sb)) + 1; + if ((u64)sbi->s_groups_count * sbi->s_inodes_per_group != + le32_to_cpu(es->s_inodes_count)) { + ext2_msg(sb, KERN_ERR, "error: invalid #inodes: %u vs computed %llu", + le32_to_cpu(es->s_inodes_count), + (u64)sbi->s_groups_count * sbi->s_inodes_per_group); + goto failed_mount; + } db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) / EXT2_DESC_PER_BLOCK(sb); sbi->s_group_desc = kmalloc_array (db_count, diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index e03510622973..666ea80b3cf3 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1419,12 +1419,6 @@ struct ext4_super_block { #ifdef __KERNEL__ -#ifdef CONFIG_FS_ENCRYPTION -#define DUMMY_ENCRYPTION_ENABLED(sbi) ((sbi)->s_dummy_enc_policy.policy != NULL) -#else -#define DUMMY_ENCRYPTION_ENABLED(sbi) (0) -#endif - /* Number of quota types we support */ #define EXT4_MAXQUOTAS 3 @@ -2159,6 +2153,10 @@ static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi) * Structure of a directory entry */ #define EXT4_NAME_LEN 255 +/* + * Base length of the ext4 directory entry excluding the name length + */ +#define EXT4_BASE_DIR_LEN (sizeof(struct ext4_dir_entry_2) - EXT4_NAME_LEN) struct ext4_dir_entry { __le32 inode; /* Inode number */ @@ -2915,7 +2913,7 @@ extern int ext4_inode_attach_jinode(struct inode *inode); extern int ext4_can_truncate(struct inode *inode); extern int ext4_truncate(struct inode *); extern int ext4_break_layouts(struct inode *); -extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length); +extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length); extern void ext4_set_inode_flags(struct inode *, bool init); extern int ext4_alloc_da_blocks(struct inode *inode); extern void ext4_set_aops(struct inode *inode); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 0fda3051760d..54750b7c162d 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -371,7 +371,7 @@ static int ext4_valid_extent_entries(struct inode *inode, { unsigned short entries; ext4_lblk_t lblock = 0; - ext4_lblk_t prev = 0; + ext4_lblk_t cur = 0; if (eh->eh_entries == 0) return 1; @@ -395,11 +395,11 @@ static int ext4_valid_extent_entries(struct inode *inode, /* Check for overlapping extents */ lblock = le32_to_cpu(ext->ee_block); - if ((lblock <= prev) && prev) { + if (lblock < cur) { *pblk = ext4_ext_pblock(ext); return 0; } - prev = lblock + ext4_ext_get_actual_len(ext) - 1; + cur = lblock + ext4_ext_get_actual_len(ext); ext++; entries--; } @@ -419,13 +419,13 @@ static int ext4_valid_extent_entries(struct inode *inode, /* Check for overlapping index extents */ lblock = le32_to_cpu(ext_idx->ei_block); - if ((lblock <= prev) && prev) { + if (lblock < cur) { *pblk = ext4_idx_pblock(ext_idx); return 0; } ext_idx++; entries--; - prev = lblock; + cur = lblock + 1; } } return 1; @@ -459,6 +459,10 @@ static int __ext4_ext_check(const char *function, unsigned int line, error_msg = "invalid eh_entries"; goto corrupted; } + if (unlikely((eh->eh_entries == 0) && (depth > 0))) { + error_msg = "eh_entries is 0 but eh_depth is > 0"; + goto corrupted; + } if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) { error_msg = "invalid extent entries"; goto corrupted; @@ -4498,9 +4502,9 @@ retry: return ret > 0 ? ret2 : ret; } -static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len); +static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len); -static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len); +static int ext4_insert_range(struct file *file, loff_t offset, loff_t len); static long ext4_zero_range(struct file *file, loff_t offset, loff_t len, int mode) @@ -4571,6 +4575,10 @@ static long ext4_zero_range(struct file *file, loff_t offset, /* Wait all existing dio workers, newcomers will block on i_mutex */ inode_dio_wait(inode); + ret = file_modified(file); + if (ret) + goto out_mutex; + /* Preallocate the range including the unaligned edges */ if (partial_begin || partial_end) { ret = ext4_alloc_file_blocks(file, @@ -4687,23 +4695,24 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) return -EOPNOTSUPP; ext4_fc_start_update(inode); - - if (mode & FALLOC_FL_PUNCH_HOLE) { - ret = ext4_punch_hole(inode, offset, len); - goto exit; - } - + inode_lock(inode); ret = ext4_convert_inline_data(inode); + inode_unlock(inode); if (ret) goto exit; + if (mode & FALLOC_FL_PUNCH_HOLE) { + ret = ext4_punch_hole(file, offset, len); + goto exit; + } + if (mode & FALLOC_FL_COLLAPSE_RANGE) { - ret = ext4_collapse_range(inode, offset, len); + ret = ext4_collapse_range(file, offset, len); goto exit; } if (mode & FALLOC_FL_INSERT_RANGE) { - ret = ext4_insert_range(inode, offset, len); + ret = ext4_insert_range(file, offset, len); goto exit; } @@ -4739,6 +4748,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) /* Wait all existing dio workers, newcomers will block on i_mutex */ inode_dio_wait(inode); + ret = file_modified(file); + if (ret) + goto out; + ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags); if (ret) goto out; @@ -5169,6 +5182,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, * and it is decreased till we reach start. */ again: + ret = 0; if (SHIFT == SHIFT_LEFT) iterator = &start; else @@ -5212,14 +5226,21 @@ again: ext4_ext_get_actual_len(extent); } else { extent = EXT_FIRST_EXTENT(path[depth].p_hdr); - if (le32_to_cpu(extent->ee_block) > 0) + if (le32_to_cpu(extent->ee_block) > start) *iterator = le32_to_cpu(extent->ee_block) - 1; - else - /* Beginning is reached, end of the loop */ + else if (le32_to_cpu(extent->ee_block) == start) iterator = NULL; - /* Update path extent in case we need to stop */ - while (le32_to_cpu(extent->ee_block) < start) + else { + extent = EXT_LAST_EXTENT(path[depth].p_hdr); + while (le32_to_cpu(extent->ee_block) >= start) + extent--; + + if (extent == EXT_LAST_EXTENT(path[depth].p_hdr)) + break; + extent++; + iterator = NULL; + } path[depth].p_ext = extent; } ret = ext4_ext_shift_path_extents(path, shift, inode, @@ -5241,8 +5262,9 @@ out: * This implements the fallocate's collapse range functionality for ext4 * Returns: 0 and non-zero on error. */ -static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) +static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len) { + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; ext4_lblk_t punch_start, punch_stop; handle_t *handle; @@ -5293,6 +5315,10 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) /* Wait for existing dio to complete */ inode_dio_wait(inode); + ret = file_modified(file); + if (ret) + goto out_mutex; + /* * Prevent page faults from reinstantiating pages we have released from * page cache. @@ -5387,8 +5413,9 @@ out_mutex: * by len bytes. * Returns 0 on success, error otherwise. */ -static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) +static int ext4_insert_range(struct file *file, loff_t offset, loff_t len) { + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; handle_t *handle; struct ext4_ext_path *path; @@ -5444,6 +5471,10 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) /* Wait for existing dio to complete */ inode_dio_wait(inode); + ret = file_modified(file); + if (ret) + goto out_mutex; + /* * Prevent page faults from reinstantiating pages we have released from * page cache. diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c index 501e60713010..41dcf21558c4 100644 --- a/fs/ext4/fast_commit.c +++ b/fs/ext4/fast_commit.c @@ -766,22 +766,25 @@ static int ext4_fc_write_inode(struct inode *inode, u32 *crc) tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_INODE); tl.fc_len = cpu_to_le16(inode_len + sizeof(fc_inode.fc_ino)); + ret = -ECANCELED; dst = ext4_fc_reserve_space(inode->i_sb, sizeof(tl) + inode_len + sizeof(fc_inode.fc_ino), crc); if (!dst) - return -ECANCELED; + goto err; if (!ext4_fc_memcpy(inode->i_sb, dst, &tl, sizeof(tl), crc)) - return -ECANCELED; + goto err; dst += sizeof(tl); if (!ext4_fc_memcpy(inode->i_sb, dst, &fc_inode, sizeof(fc_inode), crc)) - return -ECANCELED; + goto err; dst += sizeof(fc_inode); if (!ext4_fc_memcpy(inode->i_sb, dst, (u8 *)ext4_raw_inode(&iloc), inode_len, crc)) - return -ECANCELED; - - return 0; + goto err; + ret = 0; +err: + brelse(iloc.bh); + return ret; } /* @@ -1388,13 +1391,15 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino) if (state->fc_modified_inodes[i] == ino) return 0; if (state->fc_modified_inodes_used == state->fc_modified_inodes_size) { - state->fc_modified_inodes = krealloc( - state->fc_modified_inodes, + int *fc_modified_inodes; + + fc_modified_inodes = krealloc(state->fc_modified_inodes, sizeof(int) * (state->fc_modified_inodes_size + EXT4_FC_REPLAY_REALLOC_INCREMENT), GFP_KERNEL); - if (!state->fc_modified_inodes) + if (!fc_modified_inodes) return -ENOMEM; + state->fc_modified_inodes = fc_modified_inodes; state->fc_modified_inodes_size += EXT4_FC_REPLAY_REALLOC_INCREMENT; } @@ -1579,15 +1584,18 @@ int ext4_fc_record_regions(struct super_block *sb, int ino, if (replay && state->fc_regions_used != state->fc_regions_valid) state->fc_regions_used = state->fc_regions_valid; if (state->fc_regions_used == state->fc_regions_size) { + struct ext4_fc_alloc_region *fc_regions; + + fc_regions = krealloc(state->fc_regions, + sizeof(struct ext4_fc_alloc_region) * + (state->fc_regions_size + + EXT4_FC_REPLAY_REALLOC_INCREMENT), + GFP_KERNEL); + if (!fc_regions) + return -ENOMEM; state->fc_regions_size += EXT4_FC_REPLAY_REALLOC_INCREMENT; - state->fc_regions = krealloc( - state->fc_regions, - state->fc_regions_size * - sizeof(struct ext4_fc_alloc_region), - GFP_KERNEL); - if (!state->fc_regions) - return -ENOMEM; + state->fc_regions = fc_regions; } region = &state->fc_regions[state->fc_regions_used++]; region->ino = ino; diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 98c13bf3228e..b41472c9697a 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -531,6 +531,12 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from) ret = -EAGAIN; goto out; } + /* + * Make sure inline data cannot be created anymore since we are going + * to allocate blocks for DIO. We know the inode does not have any + * inline data now because ext4_dio_supported() checked for that. + */ + ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); offset = iocb->ki_pos; count = ret; diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 650091bf4945..4dac989c5ae3 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -511,7 +511,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, goto fallback; } - max_dirs = ndirs / ngroups + inodes_per_group / 16; + max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16; min_inodes = avefreei - inodes_per_group*flex_size / 4; if (min_inodes < 1) min_inodes = 1; diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 58ce2b0c90bd..e0c639b73bca 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -35,6 +35,9 @@ static int get_max_inline_xattr_value_size(struct inode *inode, struct ext4_inode *raw_inode; int free, min_offs; + if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) + return 0; + min_offs = EXT4_SB(inode->i_sb)->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE - EXT4_I(inode)->i_extra_isize - @@ -1993,6 +1996,18 @@ int ext4_convert_inline_data(struct inode *inode) if (!ext4_has_inline_data(inode)) { ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); return 0; + } else if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { + /* + * Inode has inline data but EXT4_STATE_MAY_INLINE_DATA is + * cleared. This means we are in the middle of moving of + * inline data to delay allocated block. Just force writeout + * here to finish conversion. + */ + error = filemap_flush(inode->i_mapping); + if (error) + return error; + if (!ext4_has_inline_data(inode)) + return 0; } needed_blocks = ext4_writepage_trans_blocks(inode); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index a6b3a5553fde..3c8859b33326 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1186,6 +1186,13 @@ retry_grab: page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; + /* + * The same as page allocation, we prealloc buffer heads before + * starting the handle. + */ + if (!page_has_buffers(page)) + create_empty_buffers(page, inode->i_sb->s_blocksize, 0); + unlock_page(page); retry_journal: @@ -1590,7 +1597,14 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd, ext4_lblk_t start, last; start = index << (PAGE_SHIFT - inode->i_blkbits); last = end << (PAGE_SHIFT - inode->i_blkbits); + + /* + * avoid racing with extent status tree scans made by + * ext4_insert_delayed_block() + */ + down_write(&EXT4_I(inode)->i_data_sem); ext4_es_remove_extent(inode, start, last - start + 1); + up_write(&EXT4_I(inode)->i_data_sem); } pagevec_init(&pvec); @@ -3243,13 +3257,15 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; journal_t *journal; + sector_t ret = 0; int err; + inode_lock_shared(inode); /* * We can get here for an inline file via the FIBMAP ioctl */ if (ext4_has_inline_data(inode)) - return 0; + goto out; if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && test_opt(inode->i_sb, DELALLOC)) { @@ -3288,10 +3304,14 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block) jbd2_journal_unlock_updates(journal); if (err) - return 0; + goto out; } - return iomap_bmap(mapping, block, &ext4_iomap_ops); + ret = iomap_bmap(mapping, block, &ext4_iomap_ops); + +out: + inode_unlock_shared(inode); + return ret; } static int ext4_readpage(struct file *file, struct page *page) @@ -4060,27 +4080,20 @@ int ext4_break_layouts(struct inode *inode) * Returns: 0 on success or negative on failure */ -int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) +int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) { + struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; ext4_lblk_t first_block, stop_block; struct address_space *mapping = inode->i_mapping; - loff_t first_block_offset, last_block_offset; + loff_t first_block_offset, last_block_offset, max_length; + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); handle_t *handle; unsigned int credits; int ret = 0, ret2 = 0; trace_ext4_punch_hole(inode, offset, length, 0); - ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); - if (ext4_has_inline_data(inode)) { - down_write(&EXT4_I(inode)->i_mmap_sem); - ret = ext4_convert_inline_data(inode); - up_write(&EXT4_I(inode)->i_mmap_sem); - if (ret) - return ret; - } - /* * Write out all dirty pages to avoid race conditions * Then release them. @@ -4108,6 +4121,14 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) offset; } + /* + * For punch hole the length + offset needs to be within one block + * before last range. Adjust the length if it goes beyond that limit. + */ + max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize; + if (offset + length > max_length) + length = max_length - offset; + if (offset & (sb->s_blocksize - 1) || (offset + length) & (sb->s_blocksize - 1)) { /* @@ -4123,6 +4144,10 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) /* Wait all existing dio workers, newcomers will block on i_mutex */ inode_dio_wait(inode); + ret = file_modified(file); + if (ret) + goto out_mutex; + /* * Prevent page faults from reinstantiating pages we have released from * page cache. @@ -4627,8 +4652,7 @@ static inline int ext4_iget_extra_inode(struct inode *inode, __le32 *magic = (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; - if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <= - EXT4_INODE_SIZE(inode->i_sb) && + if (EXT4_INODE_HAS_XATTR_SPACE(inode) && *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { ext4_set_inode_state(inode, EXT4_STATE_XATTR); return ext4_find_inline_data_nolock(inode); @@ -5462,6 +5486,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) if (attr->ia_valid & ATTR_SIZE) { handle_t *handle; loff_t oldsize = inode->i_size; + loff_t old_disksize; int shrink = (attr->ia_size < inode->i_size); if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { @@ -5535,6 +5560,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) inode->i_sb->s_blocksize_bits); down_write(&EXT4_I(inode)->i_data_sem); + old_disksize = EXT4_I(inode)->i_disksize; EXT4_I(inode)->i_disksize = attr->ia_size; rc = ext4_mark_inode_dirty(handle, inode); if (!error) @@ -5546,6 +5572,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) */ if (!error) i_size_write(inode, attr->ia_size); + else + EXT4_I(inode)->i_disksize = old_disksize; up_write(&EXT4_I(inode)->i_data_sem); ext4_journal_stop(handle); if (error) @@ -5780,7 +5808,12 @@ int ext4_mark_iloc_dirty(handle_t *handle, } ext4_fc_track_inode(handle, inode); - if (IS_I_VERSION(inode)) + /* + * ea_inodes are using i_version for storing reference count, don't + * mess with it + */ + if (IS_I_VERSION(inode) && + !(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) inode_inc_iversion(inode); /* the do_update_inode consumes one bh->b_count */ diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 15223b5a3af9..d5ca02a7766e 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3520,6 +3520,15 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, size = size >> bsbits; start = start_off >> bsbits; + /* + * For tiny groups (smaller than 8MB) the chosen allocation + * alignment may be larger than group size. Make sure the + * alignment does not move allocation to a different group which + * makes mballoc fail assertions later. + */ + start = max(start, rounddown(ac->ac_o_ex.fe_logical, + (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); + /* don't cover already allocated blocks in selected range */ if (ar->pleft && start <= ar->lleft) { size -= ar->lleft + 1 - start; @@ -4950,6 +4959,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, ext4_fsblk_t block = 0; unsigned int inquota = 0; unsigned int reserv_clstrs = 0; + int retries = 0; u64 seq; might_sleep(); @@ -5052,7 +5062,8 @@ repeat: ar->len = ac->ac_b_ex.fe_len; } } else { - if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) + if (++retries < 3 && + ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) goto repeat; /* * If block allocation fails then the pa allocated above diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index 49912814f3d8..4bfe2252d9a4 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -417,7 +417,7 @@ int ext4_ext_migrate(struct inode *inode) struct inode *tmp_inode = NULL; struct migrate_struct lb; unsigned long max_entries; - __u32 goal; + __u32 goal, tmp_csum_seed; uid_t owner[2]; /* @@ -425,7 +425,8 @@ int ext4_ext_migrate(struct inode *inode) * already is extent-based, error out. */ if (!ext4_has_feature_extents(inode->i_sb) || - (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) + ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) || + ext4_has_inline_data(inode)) return -EINVAL; if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0) @@ -465,6 +466,7 @@ int ext4_ext_migrate(struct inode *inode) * the migration. */ ei = EXT4_I(inode); + tmp_csum_seed = EXT4_I(tmp_inode)->i_csum_seed; EXT4_I(tmp_inode)->i_csum_seed = ei->i_csum_seed; i_size_write(tmp_inode, i_size_read(inode)); /* @@ -575,6 +577,7 @@ err_out: * the inode is not visible to user space. */ tmp_inode->i_blocks = 0; + EXT4_I(tmp_inode)->i_csum_seed = tmp_csum_seed; /* Reset the extent details */ ext4_ext_tree_init(handle, tmp_inode); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 49fd86cac7b9..6d221fafc5b9 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -54,6 +54,7 @@ static struct buffer_head *ext4_append(handle_t *handle, struct inode *inode, ext4_lblk_t *block) { + struct ext4_map_blocks map; struct buffer_head *bh; int err; @@ -63,6 +64,21 @@ static struct buffer_head *ext4_append(handle_t *handle, return ERR_PTR(-ENOSPC); *block = inode->i_size >> inode->i_sb->s_blocksize_bits; + map.m_lblk = *block; + map.m_len = 1; + + /* + * We're appending new directory block. Make sure the block is not + * allocated yet, otherwise we will end up corrupting the + * directory. + */ + err = ext4_map_blocks(NULL, inode, &map, 0); + if (err < 0) + return ERR_PTR(err); + if (err) { + EXT4_ERROR_INODE(inode, "Logical block already allocated"); + return ERR_PTR(-EFSCORRUPTED); + } bh = ext4_bread(handle, inode, *block, EXT4_GET_BLOCKS_CREATE); if (IS_ERR(bh)) @@ -109,6 +125,13 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode, struct ext4_dir_entry *dirent; int is_dx_block = 0; + if (block >= inode->i_size >> inode->i_blkbits) { + ext4_error_inode(inode, func, line, block, + "Attempting to read directory block (%u) that is past i_size (%llu)", + block, inode->i_size); + return ERR_PTR(-EFSCORRUPTED); + } + if (ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_EIO)) bh = ERR_PTR(-EIO); else @@ -757,12 +780,14 @@ static struct dx_frame * dx_probe(struct ext4_filename *fname, struct inode *dir, struct dx_hash_info *hinfo, struct dx_frame *frame_in) { - unsigned count, indirect; + unsigned count, indirect, level, i; struct dx_entry *at, *entries, *p, *q, *m; struct dx_root *root; struct dx_frame *frame = frame_in; struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR); u32 hash; + ext4_lblk_t block; + ext4_lblk_t blocks[EXT4_HTREE_LEVEL]; memset(frame_in, 0, EXT4_HTREE_LEVEL * sizeof(frame_in[0])); frame->bh = ext4_read_dirblock(dir, 0, INDEX); @@ -834,6 +859,8 @@ dx_probe(struct ext4_filename *fname, struct inode *dir, } dxtrace(printk("Look up %x", hash)); + level = 0; + blocks[0] = 0; while (1) { count = dx_get_count(entries); if (!count || count > dx_get_limit(entries)) { @@ -875,15 +902,27 @@ dx_probe(struct ext4_filename *fname, struct inode *dir, dx_get_block(at))); frame->entries = entries; frame->at = at; - if (!indirect--) + + block = dx_get_block(at); + for (i = 0; i <= level; i++) { + if (blocks[i] == block) { + ext4_warning_inode(dir, + "dx entry: tree cycle block %u points back to block %u", + blocks[level], block); + goto fail; + } + } + if (++level > indirect) return frame; + blocks[level] = block; frame++; - frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX); + frame->bh = ext4_read_dirblock(dir, block, INDEX); if (IS_ERR(frame->bh)) { ret_err = (struct dx_frame *) frame->bh; frame->bh = NULL; goto fail; } + entries = ((struct dx_node *) frame->bh->b_data)->entries; if (dx_get_limit(entries) != dx_node_limit(dir)) { @@ -1461,10 +1500,10 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, de = (struct ext4_dir_entry_2 *)search_buf; dlimit = search_buf + buf_size; - while ((char *) de < dlimit) { + while ((char *) de < dlimit - EXT4_BASE_DIR_LEN) { /* this code is executed quadratically often */ /* do minimal checking `by hand' */ - if ((char *) de + de->name_len <= dlimit && + if (de->name + de->name_len <= dlimit && ext4_match(dir, fname, de)) { /* found a match - just to be sure, do * a full check */ @@ -1901,7 +1940,8 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, struct dx_hash_info *hinfo, ext4_lblk_t *newblock) { unsigned blocksize = dir->i_sb->s_blocksize; - unsigned count, continued; + unsigned continued; + int count; struct buffer_head *bh2; u32 hash2; struct dx_map_entry *map; @@ -2195,8 +2235,16 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname, memcpy(data2, de, len); de = (struct ext4_dir_entry_2 *) data2; top = data2 + len; - while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) + while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) { + if (ext4_check_dir_entry(dir, NULL, de, bh2, data2, len, block, + (data2 + (blocksize - csum_size) - + (char *) de))) { + brelse(bh2); + brelse(bh); + return -EFSCORRUPTED; + } de = de2; + } de->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) - (char *) de, blocksize); @@ -3018,11 +3066,8 @@ bool ext4_empty_dir(struct inode *inode) de = (struct ext4_dir_entry_2 *) (bh->b_data + (offset & (sb->s_blocksize - 1))); if (ext4_check_dir_entry(inode, NULL, de, bh, - bh->b_data, bh->b_size, 0, offset)) { - offset = (offset | (sb->s_blocksize - 1)) + 1; - continue; - } - if (le32_to_cpu(de->inode)) { + bh->b_data, bh->b_size, 0, offset) || + le32_to_cpu(de->inode)) { brelse(bh); return false; } @@ -3610,6 +3655,9 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle, struct buffer_head *bh; if (!ext4_has_inline_data(inode)) { + struct ext4_dir_entry_2 *de; + unsigned int offset; + /* The first directory block must not be a hole, so * treat it as DIRENT_HTREE */ @@ -3618,9 +3666,30 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle, *retval = PTR_ERR(bh); return NULL; } - *parent_de = ext4_next_entry( - (struct ext4_dir_entry_2 *)bh->b_data, - inode->i_sb->s_blocksize); + + de = (struct ext4_dir_entry_2 *) bh->b_data; + if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, + bh->b_size, 0, 0) || + le32_to_cpu(de->inode) != inode->i_ino || + strcmp(".", de->name)) { + EXT4_ERROR_INODE(inode, "directory missing '.'"); + brelse(bh); + *retval = -EFSCORRUPTED; + return NULL; + } + offset = ext4_rec_len_from_disk(de->rec_len, + inode->i_sb->s_blocksize); + de = ext4_next_entry(de, inode->i_sb->s_blocksize); + if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, + bh->b_size, 0, offset) || + le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) { + EXT4_ERROR_INODE(inode, "directory missing '..'"); + brelse(bh); + *retval = -EFSCORRUPTED; + return NULL; + } + *parent_de = de; + return bh; } diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index defd2e10dfd1..4569075a7da0 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -137,8 +137,10 @@ static void ext4_finish_bio(struct bio *bio) continue; } clear_buffer_async_write(bh); - if (bio->bi_status) + if (bio->bi_status) { + set_buffer_write_io_error(bh); buffer_io_error(bh); + } } while ((bh = bh->b_this_page) != head); spin_unlock_irqrestore(&head->b_uptodate_lock, flags); if (!under_io) { diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 6513079c728b..c55ba0390021 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -52,6 +52,16 @@ int ext4_resize_begin(struct super_block *sb) if (!capable(CAP_SYS_RESOURCE)) return -EPERM; + /* + * If the reserved GDT blocks is non-zero, the resize_inode feature + * should always be set. + */ + if (EXT4_SB(sb)->s_es->s_reserved_gdt_blocks && + !ext4_has_feature_resize_inode(sb)) { + ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero"); + return -EFSCORRUPTED; + } + /* * If we are not using the primary superblock/GDT copy don't resize, * because the user tools have no way of handling this. Probably a @@ -1451,6 +1461,7 @@ static void ext4_update_super(struct super_block *sb, * Update the fs overhead information */ ext4_calculate_overhead(sb); + es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead); if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: added group %u:" @@ -1946,6 +1957,16 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) } brelse(bh); + /* + * For bigalloc, trim the requested size to the nearest cluster + * boundary to avoid creating an unusable filesystem. We do this + * silently, instead of returning an error, to avoid breaking + * callers that blindly resize the filesystem to the full size of + * the underlying block device. + */ + if (ext4_has_feature_bigalloc(sb)) + n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1); + retry: o_blocks_count = ext4_blocks_count(es); @@ -2047,7 +2068,7 @@ retry: goto out; } - if (ext4_blocks_count(es) == n_blocks_count) + if (ext4_blocks_count(es) == n_blocks_count && n_blocks_count_retry == 0) goto out; err = ext4_alloc_flex_bg_array(sb, n_group + 1); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index c6397d029d1f..8904e213e7dc 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -188,19 +188,12 @@ int ext4_read_bh(struct buffer_head *bh, int op_flags, bh_end_io_t *end_io) int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait) { - if (trylock_buffer(bh)) { - if (wait) - return ext4_read_bh(bh, op_flags, NULL); + lock_buffer(bh); + if (!wait) { ext4_read_bh_nowait(bh, op_flags, NULL); return 0; } - if (wait) { - wait_on_buffer(bh); - if (buffer_uptodate(bh)) - return 0; - return -EIO; - } - return 0; + return ext4_read_bh(bh, op_flags, NULL); } /* @@ -247,7 +240,8 @@ void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block) struct buffer_head *bh = sb_getblk_gfp(sb, block, 0); if (likely(bh)) { - ext4_read_bh_lock(bh, REQ_RAHEAD, false); + if (trylock_buffer(bh)) + ext4_read_bh_nowait(bh, REQ_RAHEAD, NULL); brelse(bh); } } @@ -1176,18 +1170,23 @@ static void ext4_put_super(struct super_block *sb) int aborted = 0; int i, err; - ext4_unregister_li_request(sb); - ext4_quota_off_umount(sb); - - destroy_workqueue(sbi->rsv_conversion_wq); - /* * Unregister sysfs before destroying jbd2 journal. * Since we could still access attr_journal_task attribute via sysfs * path which could have sbi->s_journal->j_task as NULL + * Unregister sysfs before flush sbi->s_error_work. + * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If + * read metadata verify failed then will queue error work. + * flush_stashed_error_work will call start_this_handle may trigger + * BUG_ON. */ ext4_unregister_sysfs(sb); + ext4_unregister_li_request(sb); + ext4_quota_off_umount(sb); + + destroy_workqueue(sbi->rsv_conversion_wq); + if (sbi->s_journal) { aborted = is_journal_aborted(sbi->s_journal); err = jbd2_journal_destroy(sbi->s_journal); @@ -1955,6 +1954,7 @@ static const struct mount_opts { MOPT_EXT4_ONLY | MOPT_CLEAR}, {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET}, {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR}, + {Opt_commit, 0, MOPT_NO_EXT2}, {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, MOPT_EXT4_ONLY | MOPT_CLEAR}, {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, @@ -2078,6 +2078,12 @@ static int ext4_set_test_dummy_encryption(struct super_block *sb, struct ext4_sb_info *sbi = EXT4_SB(sb); int err; + if (!ext4_has_feature_encrypt(sb)) { + ext4_msg(sb, KERN_WARNING, + "test_dummy_encryption requires encrypt feature"); + return -1; + } + /* * This mount option is just for testing, and it's not worthwhile to * implement the extra complexity (e.g. RCU protection) that would be @@ -2105,11 +2111,13 @@ static int ext4_set_test_dummy_encryption(struct super_block *sb, return -1; } ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled"); + return 1; #else ext4_msg(sb, KERN_WARNING, - "Test dummy encryption mount option ignored"); + "test_dummy_encryption option not supported"); + return -1; + #endif - return 1; } static int handle_mount_opt(struct super_block *sb, char *opt, int token, @@ -3536,6 +3544,7 @@ static int ext4_lazyinit_thread(void *arg) unsigned long next_wakeup, cur; BUG_ON(NULL == eli); + set_freezable(); cont_thread: while (true) { @@ -3870,9 +3879,11 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp, ext4_fsblk_t first_block, last_block, b; ext4_group_t i, ngroups = ext4_get_groups_count(sb); int s, j, count = 0; + int has_super = ext4_bg_has_super(sb, grp); if (!ext4_has_feature_bigalloc(sb)) - return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) + + return (has_super + ext4_bg_num_gdb(sb, grp) + + (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) + sbi->s_itb_per_group + 2); first_block = le32_to_cpu(sbi->s_es->s_first_data_block) + @@ -4524,7 +4535,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) sbi->s_inodes_per_block; sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb); sbi->s_sbh = bh; - sbi->s_mount_state = le16_to_cpu(es->s_state); + sbi->s_mount_state = le16_to_cpu(es->s_state) & ~EXT4_FC_REPLAY; sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb)); sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb)); @@ -4915,19 +4926,22 @@ no_journal: goto failed_mount_wq; } - if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) && - !ext4_has_feature_encrypt(sb)) { - ext4_set_feature_encrypt(sb); - ext4_commit_super(sb, 1); - } - /* * Get the # of file system overhead blocks from the * superblock if present. */ - if (es->s_overhead_clusters) - sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters); - else { + sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters); + /* ignore the precalculated value if it is ridiculous */ + if (sbi->s_overhead > ext4_blocks_count(es)) + sbi->s_overhead = 0; + /* + * If the bigalloc feature is not enabled recalculating the + * overhead doesn't take long, so we might as well just redo + * it to make sure we are using the correct value. + */ + if (!ext4_has_feature_bigalloc(sb)) + sbi->s_overhead = 0; + if (sbi->s_overhead == 0) { err = ext4_calculate_overhead(sb); if (err) goto failed_mount_wq; @@ -5970,7 +5984,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) if (err) goto restore_opts; } - sbi->s_mount_state = le16_to_cpu(es->s_state); + sbi->s_mount_state = (le16_to_cpu(es->s_state) & + ~EXT4_FC_REPLAY); err = ext4_setup_super(sb, es, 0); if (err) @@ -6242,7 +6257,7 @@ static int ext4_write_info(struct super_block *sb, int type) handle_t *handle; /* Data block + inode block */ - handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2); + handle = ext4_journal_start_sb(sb, EXT4_HT_QUOTA, 2); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit_info(sb, type); diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c index 00e3cbde472e..35be8e7ec2a0 100644 --- a/fs/ext4/verity.c +++ b/fs/ext4/verity.c @@ -370,13 +370,14 @@ static struct page *ext4_read_merkle_tree_page(struct inode *inode, pgoff_t index, unsigned long num_ra_pages) { - DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index); struct page *page; index += ext4_verity_metadata_pos(inode) >> PAGE_SHIFT; page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED); if (!page || !PageUptodate(page)) { + DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index); + if (page) put_page(page); else if (num_ra_pages > 1) diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 5462f26907c1..38531c5e16c6 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -2167,8 +2167,9 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, struct ext4_inode *raw_inode; int error; - if (EXT4_I(inode)->i_extra_isize == 0) + if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) return 0; + raw_inode = ext4_raw_inode(&is->iloc); header = IHDR(inode, raw_inode); is->s.base = is->s.first = IFIRST(header); @@ -2196,8 +2197,9 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode, struct ext4_xattr_search *s = &is->s; int error; - if (EXT4_I(inode)->i_extra_isize == 0) + if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) return -ENOSPC; + error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); if (error) return error; diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h index 730b91fa0dd7..87e5863bb493 100644 --- a/fs/ext4/xattr.h +++ b/fs/ext4/xattr.h @@ -95,6 +95,19 @@ struct ext4_xattr_entry { #define EXT4_ZERO_XATTR_VALUE ((void *)-1) +/* + * If we want to add an xattr to the inode, we should make sure that + * i_extra_isize is not 0 and that the inode size is not less than + * EXT4_GOOD_OLD_INODE_SIZE + extra_isize + pad. + * EXT4_GOOD_OLD_INODE_SIZE extra_isize header entry pad data + * |--------------------------|------------|------|---------|---|-------| + */ +#define EXT4_INODE_HAS_XATTR_SPACE(inode) \ + ((EXT4_I(inode)->i_extra_isize != 0) && \ + (EXT4_GOOD_OLD_INODE_SIZE + EXT4_I(inode)->i_extra_isize + \ + sizeof(struct ext4_xattr_ibody_header) + EXT4_XATTR_PAD <= \ + EXT4_INODE_SIZE((inode)->i_sb))) + struct ext4_xattr_info { const char *name; const void *value; diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 7bc120937874..84c734e20c8a 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -25,12 +25,16 @@ static struct kmem_cache *ino_entry_slab; struct kmem_cache *f2fs_inode_entry_slab; -void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io) +void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, + unsigned char reason) { f2fs_build_fault_attr(sbi, 0, 0); set_ckpt_flags(sbi, CP_ERROR_FLAG); - if (!end_io) + if (!end_io) { f2fs_flush_merged_writes(sbi); + + f2fs_handle_stop(sbi, reason); + } } /* @@ -120,7 +124,7 @@ retry: if (PTR_ERR(page) == -EIO && ++count <= DEFAULT_RETRY_IO_COUNT) goto retry; - f2fs_stop_checkpoint(sbi, false); + f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_META_PAGE); } return page; } @@ -138,7 +142,7 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr, unsigned int segno, offset; bool exist; - if (type != DATA_GENERIC_ENHANCE && type != DATA_GENERIC_ENHANCE_READ) + if (type == DATA_GENERIC) return true; segno = GET_SEGNO(sbi, blkaddr); @@ -146,11 +150,18 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr, se = get_seg_entry(sbi, segno); exist = f2fs_test_bit(offset, se->cur_valid_map); + if (exist && type == DATA_GENERIC_ENHANCE_UPDATE) { + f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d", + blkaddr, exist); + set_sbi_flag(sbi, SBI_NEED_FSCK); + return exist; + } + if (!exist && type == DATA_GENERIC_ENHANCE) { f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d", blkaddr, exist); set_sbi_flag(sbi, SBI_NEED_FSCK); - WARN_ON(1); + dump_stack(); } return exist; } @@ -183,12 +194,13 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, case DATA_GENERIC: case DATA_GENERIC_ENHANCE: case DATA_GENERIC_ENHANCE_READ: + case DATA_GENERIC_ENHANCE_UPDATE: if (unlikely(blkaddr >= MAX_BLKADDR(sbi) || blkaddr < MAIN_BLKADDR(sbi))) { f2fs_warn(sbi, "access invalid blkaddr:%u", blkaddr); set_sbi_flag(sbi, SBI_NEED_FSCK); - WARN_ON(1); + dump_stack(); return false; } else { return __is_bitmap_valid(sbi, blkaddr, type); @@ -1040,7 +1052,8 @@ void f2fs_remove_dirty_inode(struct inode *inode) spin_unlock(&sbi->inode_lock[type]); } -int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type) +int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type, + bool from_cp) { struct list_head *head; struct inode *inode; @@ -1075,11 +1088,15 @@ retry: if (inode) { unsigned long cur_ino = inode->i_ino; - F2FS_I(inode)->cp_task = current; + if (from_cp) + F2FS_I(inode)->cp_task = current; + F2FS_I(inode)->wb_task = current; filemap_fdatawrite(inode->i_mapping); - F2FS_I(inode)->cp_task = NULL; + F2FS_I(inode)->wb_task = NULL; + if (from_cp) + F2FS_I(inode)->cp_task = NULL; iput(inode); /* We need to give cpu to another writers. */ @@ -1208,7 +1225,7 @@ retry_flush_dents: /* write all the dirty dentry pages */ if (get_pages(sbi, F2FS_DIRTY_DENTS)) { f2fs_unlock_all(sbi); - err = f2fs_sync_dirty_inodes(sbi, DIR_INODE); + err = f2fs_sync_dirty_inodes(sbi, DIR_INODE, true); if (err) return err; cond_resched(); @@ -1863,15 +1880,27 @@ int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi) void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi) { struct ckpt_req_control *cprc = &sbi->cprc_info; + struct task_struct *ckpt_task; - if (cprc->f2fs_issue_ckpt) { - struct task_struct *ckpt_task = cprc->f2fs_issue_ckpt; + if (!cprc->f2fs_issue_ckpt) + return; - cprc->f2fs_issue_ckpt = NULL; - kthread_stop(ckpt_task); + ckpt_task = cprc->f2fs_issue_ckpt; + cprc->f2fs_issue_ckpt = NULL; + kthread_stop(ckpt_task); - flush_remained_ckpt_reqs(sbi, NULL); - } + f2fs_flush_ckpt_thread(sbi); +} + +void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi) +{ + struct ckpt_req_control *cprc = &sbi->cprc_info; + + flush_remained_ckpt_reqs(sbi, NULL); + + /* Let's wait for the previous dispatched checkpoint. */ + while (atomic_read(&cprc->queued_ckpt)) + io_schedule_timeout(DEFAULT_IO_TIMEOUT); } void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi) diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 7a4abd303fa0..c448eaa08cf2 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -737,14 +737,19 @@ out: return ret; } -void f2fs_decompress_cluster(struct decompress_io_ctx *dic) +static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic, + bool pre_alloc); +static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic, + bool bypass_destroy_callback, bool pre_alloc); + +void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task) { struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode); struct f2fs_inode_info *fi = F2FS_I(dic->inode); const struct f2fs_compress_ops *cops = f2fs_cops[fi->i_compress_algorithm]; + bool bypass_callback = false; int ret; - int i; trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx, dic->cluster_size, fi->i_compress_algorithm); @@ -754,41 +759,10 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic) goto out_end_io; } - dic->tpages = page_array_alloc(dic->inode, dic->cluster_size); - if (!dic->tpages) { - ret = -ENOMEM; - goto out_end_io; - } - - for (i = 0; i < dic->cluster_size; i++) { - if (dic->rpages[i]) { - dic->tpages[i] = dic->rpages[i]; - continue; - } - - dic->tpages[i] = f2fs_compress_alloc_page(); - if (!dic->tpages[i]) { - ret = -ENOMEM; - goto out_end_io; - } - } - - if (cops->init_decompress_ctx) { - ret = cops->init_decompress_ctx(dic); - if (ret) - goto out_end_io; - } - - dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size); - if (!dic->rbuf) { - ret = -ENOMEM; - goto out_destroy_decompress_ctx; - } - - dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages); - if (!dic->cbuf) { - ret = -ENOMEM; - goto out_vunmap_rbuf; + ret = f2fs_prepare_decomp_mem(dic, false); + if (ret) { + bypass_callback = true; + goto out_release; } dic->clen = le32_to_cpu(dic->cbuf->clen); @@ -796,7 +770,7 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic) if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) { ret = -EFSCORRUPTED; - goto out_vunmap_cbuf; + goto out_release; } ret = cops->decompress_pages(dic); @@ -817,17 +791,13 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic) } } -out_vunmap_cbuf: - vm_unmap_ram(dic->cbuf, dic->nr_cpages); -out_vunmap_rbuf: - vm_unmap_ram(dic->rbuf, dic->cluster_size); -out_destroy_decompress_ctx: - if (cops->destroy_decompress_ctx) - cops->destroy_decompress_ctx(dic); +out_release: + f2fs_release_decomp_mem(dic, bypass_callback, false); + out_end_io: trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx, dic->clen, ret); - f2fs_decompress_end_io(dic, ret); + f2fs_decompress_end_io(dic, ret, in_task); } /* @@ -837,7 +807,7 @@ out_end_io: * (or in the case of a failure, cleans up without actually decompressing). */ void f2fs_end_read_compressed_page(struct page *page, bool failed, - block_t blkaddr) + block_t blkaddr, bool in_task) { struct decompress_io_ctx *dic = (struct decompress_io_ctx *)page_private(page); @@ -847,12 +817,12 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed, if (failed) WRITE_ONCE(dic->failed, true); - else if (blkaddr) + else if (blkaddr && in_task) f2fs_cache_compressed_page(sbi, page, dic->inode->i_ino, blkaddr); if (atomic_dec_and_test(&dic->remaining_pages)) - f2fs_decompress_cluster(dic); + f2fs_decompress_cluster(dic, in_task); } static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index) @@ -1469,6 +1439,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc, if (cluster_may_compress(cc)) { err = f2fs_compress_pages(cc); if (err == -EAGAIN) { + add_compr_block_stat(cc->inode, cc->cluster_size); goto write; } else if (err) { f2fs_put_rpages_wbc(cc, wbc, true, 1); @@ -1491,13 +1462,82 @@ destroy_out: return err; } -static void f2fs_free_dic(struct decompress_io_ctx *dic); +static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi, + bool pre_alloc) +{ + return pre_alloc ^ f2fs_low_mem_mode(sbi); +} + +static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic, + bool pre_alloc) +{ + const struct f2fs_compress_ops *cops = + f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm]; + int i; + + if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc)) + return 0; + + dic->tpages = page_array_alloc(dic->inode, dic->cluster_size); + if (!dic->tpages) + return -ENOMEM; + + for (i = 0; i < dic->cluster_size; i++) { + if (dic->rpages[i]) { + dic->tpages[i] = dic->rpages[i]; + continue; + } + + dic->tpages[i] = f2fs_compress_alloc_page(); + if (!dic->tpages[i]) + return -ENOMEM; + } + + dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size); + if (!dic->rbuf) + return -ENOMEM; + + dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages); + if (!dic->cbuf) + return -ENOMEM; + + if (cops->init_decompress_ctx) { + int ret = cops->init_decompress_ctx(dic); + + if (ret) + return ret; + } + + return 0; +} + +static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic, + bool bypass_destroy_callback, bool pre_alloc) +{ + const struct f2fs_compress_ops *cops = + f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm]; + + if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc)) + return; + + if (!bypass_destroy_callback && cops->destroy_decompress_ctx) + cops->destroy_decompress_ctx(dic); + + if (dic->cbuf) + vm_unmap_ram(dic->cbuf, dic->nr_cpages); + + if (dic->rbuf) + vm_unmap_ram(dic->rbuf, dic->cluster_size); +} + +static void f2fs_free_dic(struct decompress_io_ctx *dic, + bool bypass_destroy_callback); struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) { struct decompress_io_ctx *dic; pgoff_t start_idx = start_idx_of_cluster(cc); - int i; + int i, ret; dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS); if (!dic) @@ -1525,32 +1565,43 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) dic->nr_rpages = cc->cluster_size; dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages); - if (!dic->cpages) + if (!dic->cpages) { + ret = -ENOMEM; goto out_free; + } for (i = 0; i < dic->nr_cpages; i++) { struct page *page; page = f2fs_compress_alloc_page(); - if (!page) + if (!page) { + ret = -ENOMEM; goto out_free; + } f2fs_set_compressed_page(page, cc->inode, start_idx + i + 1, dic); dic->cpages[i] = page; } + ret = f2fs_prepare_decomp_mem(dic, true); + if (ret) + goto out_free; + return dic; out_free: - f2fs_free_dic(dic); - return ERR_PTR(-ENOMEM); + f2fs_free_dic(dic, true); + return ERR_PTR(ret); } -static void f2fs_free_dic(struct decompress_io_ctx *dic) +static void f2fs_free_dic(struct decompress_io_ctx *dic, + bool bypass_destroy_callback) { int i; + f2fs_release_decomp_mem(dic, bypass_destroy_callback, true); + if (dic->tpages) { for (i = 0; i < dic->cluster_size; i++) { if (dic->rpages[i]) @@ -1575,17 +1626,33 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic) kmem_cache_free(dic_entry_slab, dic); } -static void f2fs_put_dic(struct decompress_io_ctx *dic) +static void f2fs_late_free_dic(struct work_struct *work) { - if (refcount_dec_and_test(&dic->refcnt)) - f2fs_free_dic(dic); + struct decompress_io_ctx *dic = + container_of(work, struct decompress_io_ctx, free_work); + + f2fs_free_dic(dic, false); +} + +static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task) +{ + if (refcount_dec_and_test(&dic->refcnt)) { + if (in_task) { + f2fs_free_dic(dic, false); + } else { + INIT_WORK(&dic->free_work, f2fs_late_free_dic); + queue_work(F2FS_I_SB(dic->inode)->post_read_wq, + &dic->free_work); + } + } } /* * Update and unlock the cluster's pagecache pages, and release the reference to * the decompress_io_ctx that was being held for I/O completion. */ -static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed) +static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, + bool in_task) { int i; @@ -1606,7 +1673,7 @@ static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed) unlock_page(rpage); } - f2fs_put_dic(dic); + f2fs_put_dic(dic, in_task); } static void f2fs_verify_cluster(struct work_struct *work) @@ -1623,14 +1690,15 @@ static void f2fs_verify_cluster(struct work_struct *work) SetPageError(rpage); } - __f2fs_decompress_end_io(dic, false); + __f2fs_decompress_end_io(dic, false, true); } /* * This is called when a compressed cluster has been decompressed * (or failed to be read and/or decompressed). */ -void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed) +void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, + bool in_task) { if (!failed && dic->need_verity) { /* @@ -1642,7 +1710,7 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed) INIT_WORK(&dic->verity_work, f2fs_verify_cluster); fsverity_enqueue_verify_work(&dic->verity_work); } else { - __f2fs_decompress_end_io(dic, failed); + __f2fs_decompress_end_io(dic, failed, in_task); } } @@ -1651,12 +1719,36 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed) * * This is called when the page is no longer needed and can be freed. */ -void f2fs_put_page_dic(struct page *page) +void f2fs_put_page_dic(struct page *page, bool in_task) { struct decompress_io_ctx *dic = (struct decompress_io_ctx *)page_private(page); - f2fs_put_dic(dic); + f2fs_put_dic(dic, in_task); +} + +/* + * check whether cluster blocks are contiguous, and add extent cache entry + * only if cluster blocks are logically and physically contiguous. + */ +unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) +{ + bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR; + int i = compressed ? 1 : 0; + block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page, + dn->ofs_in_node + i); + + for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) { + block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, + dn->ofs_in_node + i); + + if (!__is_valid_data_blkaddr(blkaddr)) + break; + if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr) + return 0; + } + + return compressed ? i - 1 : i; } const struct address_space_operations f2fs_compress_aops = { diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 653ab9564c56..17ead29e21ab 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -119,7 +119,7 @@ struct bio_post_read_ctx { unsigned int enabled_steps; }; -static void f2fs_finish_read_bio(struct bio *bio) +static void f2fs_finish_read_bio(struct bio *bio, bool in_task) { struct bio_vec *bv; struct bvec_iter_all iter_all; @@ -133,8 +133,9 @@ static void f2fs_finish_read_bio(struct bio *bio) if (f2fs_is_compressed_page(page)) { if (bio->bi_status) - f2fs_end_read_compressed_page(page, true, 0); - f2fs_put_page_dic(page); + f2fs_end_read_compressed_page(page, true, 0, + in_task); + f2fs_put_page_dic(page, in_task); continue; } @@ -191,7 +192,7 @@ static void f2fs_verify_bio(struct work_struct *work) fsverity_verify_bio(bio); } - f2fs_finish_read_bio(bio); + f2fs_finish_read_bio(bio, true); } /* @@ -203,7 +204,7 @@ static void f2fs_verify_bio(struct work_struct *work) * can involve reading verity metadata pages from the file, and these verity * metadata pages may be encrypted and/or compressed. */ -static void f2fs_verify_and_finish_bio(struct bio *bio) +static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task) { struct bio_post_read_ctx *ctx = bio->bi_private; @@ -211,7 +212,7 @@ static void f2fs_verify_and_finish_bio(struct bio *bio) INIT_WORK(&ctx->work, f2fs_verify_bio); fsverity_enqueue_verify_work(&ctx->work); } else { - f2fs_finish_read_bio(bio); + f2fs_finish_read_bio(bio, in_task); } } @@ -224,7 +225,8 @@ static void f2fs_verify_and_finish_bio(struct bio *bio) * that the bio includes at least one compressed page. The actual decompression * is done on a per-cluster basis, not a per-bio basis. */ -static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx) +static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx, + bool in_task) { struct bio_vec *bv; struct bvec_iter_all iter_all; @@ -237,7 +239,7 @@ static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx) /* PG_error was set if decryption failed. */ if (f2fs_is_compressed_page(page)) f2fs_end_read_compressed_page(page, PageError(page), - blkaddr); + blkaddr, in_task); else all_compressed = false; @@ -262,15 +264,16 @@ static void f2fs_post_read_work(struct work_struct *work) fscrypt_decrypt_bio(ctx->bio); if (ctx->enabled_steps & STEP_DECOMPRESS) - f2fs_handle_step_decompress(ctx); + f2fs_handle_step_decompress(ctx, true); - f2fs_verify_and_finish_bio(ctx->bio); + f2fs_verify_and_finish_bio(ctx->bio, true); } static void f2fs_read_end_io(struct bio *bio) { struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio)); struct bio_post_read_ctx *ctx = bio->bi_private; + bool intask = in_task(); if (time_to_inject(sbi, FAULT_READ_IO)) { f2fs_show_injection_info(sbi, FAULT_READ_IO); @@ -278,16 +281,29 @@ static void f2fs_read_end_io(struct bio *bio) } if (bio->bi_status) { - f2fs_finish_read_bio(bio); + f2fs_finish_read_bio(bio, intask); return; } - if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) { - INIT_WORK(&ctx->work, f2fs_post_read_work); - queue_work(ctx->sbi->post_read_wq, &ctx->work); - } else { - f2fs_verify_and_finish_bio(bio); + if (ctx) { + unsigned int enabled_steps = ctx->enabled_steps & + (STEP_DECRYPT | STEP_DECOMPRESS); + + /* + * If we have only decompression step between decompression and + * decrypt, we don't need post processing for this. + */ + if (enabled_steps == STEP_DECOMPRESS && + !f2fs_low_mem_mode(sbi)) { + f2fs_handle_step_decompress(ctx, intask); + } else if (enabled_steps) { + INIT_WORK(&ctx->work, f2fs_post_read_work); + queue_work(ctx->sbi->post_read_wq, &ctx->work); + return; + } } + + f2fs_verify_and_finish_bio(bio, intask); } static void f2fs_write_end_io(struct bio *bio) @@ -311,7 +327,8 @@ static void f2fs_write_end_io(struct bio *bio) mempool_free(page, sbi->write_io_dummy); if (unlikely(bio->bi_status)) - f2fs_stop_checkpoint(sbi, true); + f2fs_stop_checkpoint(sbi, true, + STOP_CP_REASON_WRITE_FAIL); continue; } @@ -327,7 +344,8 @@ static void f2fs_write_end_io(struct bio *bio) if (unlikely(bio->bi_status)) { mapping_set_error(page->mapping, -EIO); if (type == F2FS_WB_CP_DATA) - f2fs_stop_checkpoint(sbi, true); + f2fs_stop_checkpoint(sbi, true, + STOP_CP_REASON_WRITE_FAIL); } f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) && @@ -1083,7 +1101,7 @@ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr) { dn->data_blkaddr = blkaddr; f2fs_set_data_blkaddr(dn); - f2fs_update_extent_cache(dn); + f2fs_update_read_extent_cache(dn); } /* dn->ofs_in_node will be returned with up-to-date last block pointer */ @@ -1149,10 +1167,10 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index) { - struct extent_info ei = {0, 0, 0}; + struct extent_info ei = {0, }; struct inode *inode = dn->inode; - if (f2fs_lookup_extent_cache(inode, index, &ei)) { + if (f2fs_lookup_read_extent_cache(inode, index, &ei)) { dn->data_blkaddr = ei.blk + index - ei.fofs; return 0; } @@ -1166,14 +1184,14 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, struct address_space *mapping = inode->i_mapping; struct dnode_of_data dn; struct page *page; - struct extent_info ei = {0,0,0}; + struct extent_info ei = {0, }; int err; page = f2fs_grab_cache_page(mapping, index, for_write); if (!page) return ERR_PTR(-ENOMEM); - if (f2fs_lookup_extent_cache(inode, index, &ei)) { + if (f2fs_lookup_read_extent_cache(inode, index, &ei)) { dn.data_blkaddr = ei.blk + index - ei.fofs; if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr, DATA_GENERIC_ENHANCE_READ)) { @@ -1464,7 +1482,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int err = 0, ofs = 1; unsigned int ofs_in_node, last_ofs_in_node; blkcnt_t prealloc; - struct extent_info ei = {0,0,0}; + struct extent_info ei = {0, }; block_t blkaddr; unsigned int start_pgofs; @@ -1478,7 +1496,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, pgofs = (pgoff_t)map->m_lblk; end = pgofs + maxblocks; - if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) { + if (!create && f2fs_lookup_read_extent_cache(inode, pgofs, &ei)) { if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO && map->m_may_create) goto next_dnode; @@ -1652,7 +1670,7 @@ skip: if (map->m_flags & F2FS_MAP_MAPPED) { unsigned int ofs = start_pgofs - map->m_lblk; - f2fs_update_extent_cache_range(&dn, + f2fs_update_read_extent_cache_range(&dn, start_pgofs, map->m_pblk + ofs, map->m_len - ofs); } @@ -1677,7 +1695,7 @@ sync_out: if (map->m_flags & F2FS_MAP_MAPPED) { unsigned int ofs = start_pgofs - map->m_lblk; - f2fs_update_extent_cache_range(&dn, + f2fs_update_read_extent_cache_range(&dn, start_pgofs, map->m_pblk + ofs, map->m_len - ofs); } @@ -2154,6 +2172,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, sector_t last_block_in_file; const unsigned blocksize = blks_to_bytes(inode, 1); struct decompress_io_ctx *dic = NULL; + struct extent_info ei = {}; + bool from_dnode = true; int i; int ret = 0; @@ -2186,6 +2206,12 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, if (f2fs_cluster_is_empty(cc)) goto out; + if (f2fs_lookup_read_extent_cache(inode, start_idx, &ei)) + from_dnode = false; + + if (!from_dnode) + goto skip_reading_dnode; + set_new_dnode(&dn, inode, NULL, NULL, 0); ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE); if (ret) @@ -2193,11 +2219,13 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR); +skip_reading_dnode: for (i = 1; i < cc->cluster_size; i++) { block_t blkaddr; - blkaddr = data_blkaddr(dn.inode, dn.node_page, - dn.ofs_in_node + i); + blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page, + dn.ofs_in_node + i) : + ei.blk + i - 1; if (!__is_valid_data_blkaddr(blkaddr)) break; @@ -2207,6 +2235,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, goto out_put_dnode; } cc->nr_cpages++; + + if (!from_dnode && i >= ei.c_len) + break; } /* nothing to decompress */ @@ -2226,14 +2257,15 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, block_t blkaddr; struct bio_post_read_ctx *ctx; - blkaddr = data_blkaddr(dn.inode, dn.node_page, - dn.ofs_in_node + i + 1); + blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page, + dn.ofs_in_node + i + 1) : + ei.blk + i; f2fs_wait_on_block_writeback(inode, blkaddr); if (f2fs_load_compressed_page(sbi, page, blkaddr)) { if (atomic_dec_and_test(&dic->remaining_pages)) - f2fs_decompress_cluster(dic); + f2fs_decompress_cluster(dic, true); continue; } @@ -2251,7 +2283,7 @@ submit_and_realloc: page->index, for_write); if (IS_ERR(bio)) { ret = PTR_ERR(bio); - f2fs_decompress_end_io(dic, ret); + f2fs_decompress_end_io(dic, ret, true); f2fs_put_dnode(&dn); *bio_ret = NULL; return ret; @@ -2272,13 +2304,15 @@ submit_and_realloc: *last_block_in_bio = blkaddr; } - f2fs_put_dnode(&dn); + if (from_dnode) + f2fs_put_dnode(&dn); *bio_ret = bio; return 0; out_put_dnode: - f2fs_put_dnode(&dn); + if (from_dnode) + f2fs_put_dnode(&dn); out: for (i = 0; i < cc->cluster_size; i++) { if (cc->rpages[i]) { @@ -2479,6 +2513,9 @@ static inline bool check_inplace_update_policy(struct inode *inode, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); unsigned int policy = SM_I(sbi)->ipu_policy; + if (policy & (0x1 << F2FS_IPU_HONOR_OPU_WRITE) && + is_inode_flag_set(inode, FI_OPU_WRITE)) + return false; if (policy & (0x1 << F2FS_IPU_FORCE)) return true; if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi)) @@ -2521,7 +2558,7 @@ bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio) return true; /* if this is cold file, we should overwrite to avoid fragmentation */ - if (file_is_cold(inode)) + if (file_is_cold(inode) && !is_inode_flag_set(inode, FI_OPU_WRITE)) return true; return check_inplace_update_policy(inode, fio); @@ -2549,6 +2586,9 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio) if (is_inode_flag_set(inode, FI_ALIGNED_WRITE)) return true; + if (is_inode_flag_set(inode, FI_OPU_WRITE)) + return true; + if (fio) { if (page_private_gcing(fio->page)) return true; @@ -2576,14 +2616,14 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio) struct page *page = fio->page; struct inode *inode = page->mapping->host; struct dnode_of_data dn; - struct extent_info ei = {0,0,0}; + struct extent_info ei = {0, }; struct node_info ni; bool ipu_force = false; int err = 0; set_new_dnode(&dn, inode, NULL, NULL, 0); if (need_inplace_update(fio) && - f2fs_lookup_extent_cache(inode, page->index, &ei)) { + f2fs_lookup_read_extent_cache(inode, page->index, &ei)) { fio->old_blkaddr = ei.blk + page->index - ei.fofs; if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, @@ -2714,6 +2754,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted, .submitted = false, .compr_blocks = compr_blocks, .need_lock = LOCK_RETRY, + .post_read = f2fs_post_read_required(inode), .io_type = io_type, .io_wbc = wbc, .bio = bio, @@ -2829,7 +2870,7 @@ out: } unlock_page(page); if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) && - !F2FS_I(inode)->cp_task && allow_balance) + !F2FS_I(inode)->wb_task && allow_balance) f2fs_balance_fs(sbi, need_balance_fs); if (unlikely(f2fs_cp_error(sbi))) { @@ -3127,7 +3168,7 @@ static inline bool __should_serialize_io(struct inode *inode, struct writeback_control *wbc) { /* to avoid deadlock in path of data flush */ - if (F2FS_I(inode)->cp_task) + if (F2FS_I(inode)->wb_task) return false; if (!S_ISREG(inode->i_mode)) @@ -3172,8 +3213,8 @@ static int __f2fs_write_data_pages(struct address_space *mapping, f2fs_available_free_memory(sbi, DIRTY_DENTS)) goto skip_write; - /* skip writing during file defragment */ - if (is_inode_flag_set(inode, FI_DO_DEFRAG)) + /* skip writing in file defragment preparing stage */ + if (is_inode_flag_set(inode, FI_SKIP_WRITES)) goto skip_write; trace_f2fs_writepages(mapping->host, wbc, DATA); @@ -3256,7 +3297,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi, struct dnode_of_data dn; struct page *ipage; bool locked = false; - struct extent_info ei = {0,0,0}; + struct extent_info ei = {0, }; int err = 0; int flag; @@ -3307,7 +3348,7 @@ restart: } else if (locked) { err = f2fs_get_block(&dn, index); } else { - if (f2fs_lookup_extent_cache(inode, index, &ei)) { + if (f2fs_lookup_read_extent_cache(inode, index, &ei)) { dn.data_blkaddr = ei.blk + index - ei.fofs; } else { /* hole case */ @@ -3955,6 +3996,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk, f2fs_down_write(&F2FS_I(inode)->i_mmap_sem); set_inode_flag(inode, FI_ALIGNED_WRITE); + set_inode_flag(inode, FI_OPU_WRITE); for (; secidx < end_sec; secidx++) { f2fs_down_write(&sbi->pin_sem); @@ -3963,7 +4005,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk, f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false); f2fs_unlock_op(sbi); - set_inode_flag(inode, FI_DO_DEFRAG); + set_inode_flag(inode, FI_SKIP_WRITES); for (blkofs = 0; blkofs < blk_per_sec; blkofs++) { struct page *page; @@ -3980,7 +4022,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk, f2fs_put_page(page, 1); } - clear_inode_flag(inode, FI_DO_DEFRAG); + clear_inode_flag(inode, FI_SKIP_WRITES); ret = filemap_fdatawrite(inode->i_mapping); @@ -3991,7 +4033,8 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk, } done: - clear_inode_flag(inode, FI_DO_DEFRAG); + clear_inode_flag(inode, FI_SKIP_WRITES); + clear_inode_flag(inode, FI_OPU_WRITE); clear_inode_flag(inode, FI_ALIGNED_WRITE); f2fs_up_write(&F2FS_I(inode)->i_mmap_sem); diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 53ed1e9191f0..f33e64acaf2b 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -72,15 +72,26 @@ static void update_general_status(struct f2fs_sb_info *sbi) si->main_area_zones = si->main_area_sections / le32_to_cpu(raw_super->secs_per_zone); - /* validation check of the segment numbers */ + /* general extent cache stats */ + for (i = 0; i < NR_EXTENT_CACHES; i++) { + struct extent_tree_info *eti = &sbi->extent_tree[i]; + + si->hit_cached[i] = atomic64_read(&sbi->read_hit_cached[i]); + si->hit_rbtree[i] = atomic64_read(&sbi->read_hit_rbtree[i]); + si->total_ext[i] = atomic64_read(&sbi->total_hit_ext[i]); + si->hit_total[i] = si->hit_cached[i] + si->hit_rbtree[i]; + si->ext_tree[i] = atomic_read(&eti->total_ext_tree); + si->zombie_tree[i] = atomic_read(&eti->total_zombie_tree); + si->ext_node[i] = atomic_read(&eti->total_ext_node); + } + /* read extent_cache only */ si->hit_largest = atomic64_read(&sbi->read_hit_largest); - si->hit_cached = atomic64_read(&sbi->read_hit_cached); - si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree); - si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree; - si->total_ext = atomic64_read(&sbi->total_hit_ext); - si->ext_tree = atomic_read(&sbi->total_ext_tree); - si->zombie_tree = atomic_read(&sbi->total_zombie_tree); - si->ext_node = atomic_read(&sbi->total_ext_node); + si->hit_total[EX_READ] += si->hit_largest; + + /* block age extent_cache only */ + si->allocated_data_blocks = atomic64_read(&sbi->allocated_data_blocks); + + /* validation check of the segment numbers */ si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES); si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS); si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META); @@ -299,10 +310,16 @@ get_cache: si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages); for (i = 0; i < MAX_INO_ENTRY; i++) si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry); - si->cache_mem += atomic_read(&sbi->total_ext_tree) * + + for (i = 0; i < NR_EXTENT_CACHES; i++) { + struct extent_tree_info *eti = &sbi->extent_tree[i]; + + si->ext_mem[i] = atomic_read(&eti->total_ext_tree) * sizeof(struct extent_tree); - si->cache_mem += atomic_read(&sbi->total_ext_node) * + si->ext_mem[i] += atomic_read(&eti->total_ext_node) * sizeof(struct extent_node); + si->cache_mem += si->ext_mem[i]; + } si->page_mem = 0; if (sbi->node_inode) { @@ -471,16 +488,34 @@ static int stat_show(struct seq_file *s, void *v) si->skipped_atomic_files[BG_GC]); seq_printf(s, "BG skip : IO: %u, Other: %u\n", si->io_skip_bggc, si->other_skip_bggc); - seq_puts(s, "\nExtent Cache:\n"); + seq_puts(s, "\nExtent Cache (Read):\n"); seq_printf(s, " - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n", - si->hit_largest, si->hit_cached, - si->hit_rbtree); + si->hit_largest, si->hit_cached[EX_READ], + si->hit_rbtree[EX_READ]); seq_printf(s, " - Hit Ratio: %llu%% (%llu / %llu)\n", - !si->total_ext ? 0 : - div64_u64(si->hit_total * 100, si->total_ext), - si->hit_total, si->total_ext); + !si->total_ext[EX_READ] ? 0 : + div64_u64(si->hit_total[EX_READ] * 100, + si->total_ext[EX_READ]), + si->hit_total[EX_READ], si->total_ext[EX_READ]); seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n", - si->ext_tree, si->zombie_tree, si->ext_node); + si->ext_tree[EX_READ], si->zombie_tree[EX_READ], + si->ext_node[EX_READ]); + seq_puts(s, "\nExtent Cache (Block Age):\n"); + seq_printf(s, " - Allocated Data Blocks: %llu\n", + si->allocated_data_blocks); + seq_printf(s, " - Hit Count: L1:%llu L2:%llu\n", + si->hit_cached[EX_BLOCK_AGE], + si->hit_rbtree[EX_BLOCK_AGE]); + seq_printf(s, " - Hit Ratio: %llu%% (%llu / %llu)\n", + !si->total_ext[EX_BLOCK_AGE] ? 0 : + div64_u64(si->hit_total[EX_BLOCK_AGE] * 100, + si->total_ext[EX_BLOCK_AGE]), + si->hit_total[EX_BLOCK_AGE], + si->total_ext[EX_BLOCK_AGE]); + seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n", + si->ext_tree[EX_BLOCK_AGE], + si->zombie_tree[EX_BLOCK_AGE], + si->ext_node[EX_BLOCK_AGE]); seq_puts(s, "\nBalancing F2FS Async:\n"); seq_printf(s, " - DIO (R: %4d, W: %4d)\n", si->nr_dio_read, si->nr_dio_write); @@ -546,8 +581,12 @@ static int stat_show(struct seq_file *s, void *v) (si->base_mem + si->cache_mem + si->page_mem) >> 10); seq_printf(s, " - static: %llu KB\n", si->base_mem >> 10); - seq_printf(s, " - cached: %llu KB\n", + seq_printf(s, " - cached all: %llu KB\n", si->cache_mem >> 10); + seq_printf(s, " - read extent cache: %llu KB\n", + si->ext_mem[EX_READ] >> 10); + seq_printf(s, " - block age extent cache: %llu KB\n", + si->ext_mem[EX_BLOCK_AGE] >> 10); seq_printf(s, " - paged : %llu KB\n", si->page_mem >> 10); } @@ -579,10 +618,15 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi) si->sbi = sbi; sbi->stat_info = si; - atomic64_set(&sbi->total_hit_ext, 0); - atomic64_set(&sbi->read_hit_rbtree, 0); + /* general extent cache stats */ + for (i = 0; i < NR_EXTENT_CACHES; i++) { + atomic64_set(&sbi->total_hit_ext[i], 0); + atomic64_set(&sbi->read_hit_rbtree[i], 0); + atomic64_set(&sbi->read_hit_cached[i], 0); + } + + /* read extent_cache only */ atomic64_set(&sbi->read_hit_largest, 0); - atomic64_set(&sbi->read_hit_cached, 0); atomic_set(&sbi->inline_xattr, 0); atomic_set(&sbi->inline_inode, 0); diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 9ee895af75e4..9936d5466b6c 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -82,7 +82,8 @@ int f2fs_init_casefolded_name(const struct inode *dir, #ifdef CONFIG_UNICODE struct super_block *sb = dir->i_sb; - if (IS_CASEFOLDED(dir)) { + if (IS_CASEFOLDED(dir) && + !is_dot_dotdot(fname->usr_fname->name, fname->usr_fname->len)) { fname->cf_name.name = kmem_cache_alloc(f2fs_cf_name_slab, GFP_NOFS); if (!fname->cf_name.name) diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c index 3ebf976a682d..33eb6dea04f0 100644 --- a/fs/f2fs/extent_cache.c +++ b/fs/f2fs/extent_cache.c @@ -6,6 +6,10 @@ * Copyright (c) 2015 Samsung Electronics * Authors: Jaegeuk Kim * Chao Yu + * + * block_age-based extent cache added by: + * Copyright (c) 2022 xiaomi Co., Ltd. + * http://www.xiaomi.com/ */ #include @@ -15,6 +19,123 @@ #include "node.h" #include +static void __set_extent_info(struct extent_info *ei, + unsigned int fofs, unsigned int len, + block_t blk, bool keep_clen, + unsigned long age, unsigned long last_blocks, + enum extent_type type) +{ + ei->fofs = fofs; + ei->len = len; + + if (type == EX_READ) { + ei->blk = blk; + if (keep_clen) + return; +#ifdef CONFIG_F2FS_FS_COMPRESSION + ei->c_len = 0; +#endif + } else if (type == EX_BLOCK_AGE) { + ei->age = age; + ei->last_blocks = last_blocks; + } +} + +static bool __may_read_extent_tree(struct inode *inode) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + + if (!test_opt(sbi, READ_EXTENT_CACHE)) + return false; + if (is_inode_flag_set(inode, FI_NO_EXTENT)) + return false; + if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) && + !f2fs_sb_has_readonly(sbi)) + return false; + return S_ISREG(inode->i_mode); +} + +static bool __may_age_extent_tree(struct inode *inode) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + + if (!test_opt(sbi, AGE_EXTENT_CACHE)) + return false; + /* don't cache block age info for cold file */ + if (is_inode_flag_set(inode, FI_COMPRESSED_FILE)) + return false; + if (file_is_cold(inode)) + return false; + + return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); +} + +static bool __init_may_extent_tree(struct inode *inode, enum extent_type type) +{ + if (type == EX_READ) + return __may_read_extent_tree(inode); + else if (type == EX_BLOCK_AGE) + return __may_age_extent_tree(inode); + return false; +} + +static bool __may_extent_tree(struct inode *inode, enum extent_type type) +{ + /* + * for recovered files during mount do not create extents + * if shrinker is not registered. + */ + if (list_empty(&F2FS_I_SB(inode)->s_list)) + return false; + + return __init_may_extent_tree(inode, type); +} + +static void __try_update_largest_extent(struct extent_tree *et, + struct extent_node *en) +{ + if (et->type != EX_READ) + return; + if (en->ei.len <= et->largest.len) + return; + + et->largest = en->ei; + et->largest_updated = true; +} + +static bool __is_extent_mergeable(struct extent_info *back, + struct extent_info *front, enum extent_type type) +{ + if (type == EX_READ) { +#ifdef CONFIG_F2FS_FS_COMPRESSION + if (back->c_len && back->len != back->c_len) + return false; + if (front->c_len && front->len != front->c_len) + return false; +#endif + return (back->fofs + back->len == front->fofs && + back->blk + back->len == front->blk); + } else if (type == EX_BLOCK_AGE) { + return (back->fofs + back->len == front->fofs && + abs(back->age - front->age) <= SAME_AGE_REGION && + abs(back->last_blocks - front->last_blocks) <= + SAME_AGE_REGION); + } + return false; +} + +static bool __is_back_mergeable(struct extent_info *cur, + struct extent_info *back, enum extent_type type) +{ + return __is_extent_mergeable(back, cur, type); +} + +static bool __is_front_mergeable(struct extent_info *cur, + struct extent_info *front, enum extent_type type) +{ + return __is_extent_mergeable(cur, front, type); +} + static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re, unsigned int ofs) { @@ -237,6 +358,7 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi, struct rb_node *parent, struct rb_node **p, bool leftmost) { + struct extent_tree_info *eti = &sbi->extent_tree[et->type]; struct extent_node *en; en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC); @@ -250,16 +372,18 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi, rb_link_node(&en->rb_node, parent, p); rb_insert_color_cached(&en->rb_node, &et->root, leftmost); atomic_inc(&et->node_cnt); - atomic_inc(&sbi->total_ext_node); + atomic_inc(&eti->total_ext_node); return en; } static void __detach_extent_node(struct f2fs_sb_info *sbi, struct extent_tree *et, struct extent_node *en) { + struct extent_tree_info *eti = &sbi->extent_tree[et->type]; + rb_erase_cached(&en->rb_node, &et->root); atomic_dec(&et->node_cnt); - atomic_dec(&sbi->total_ext_node); + atomic_dec(&eti->total_ext_node); if (et->cached_en == en) et->cached_en = NULL; @@ -275,60 +399,50 @@ static void __detach_extent_node(struct f2fs_sb_info *sbi, static void __release_extent_node(struct f2fs_sb_info *sbi, struct extent_tree *et, struct extent_node *en) { - spin_lock(&sbi->extent_lock); + struct extent_tree_info *eti = &sbi->extent_tree[et->type]; + + spin_lock(&eti->extent_lock); f2fs_bug_on(sbi, list_empty(&en->list)); list_del_init(&en->list); - spin_unlock(&sbi->extent_lock); + spin_unlock(&eti->extent_lock); __detach_extent_node(sbi, et, en); } -static struct extent_tree *__grab_extent_tree(struct inode *inode) +static struct extent_tree *__grab_extent_tree(struct inode *inode, + enum extent_type type) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct extent_tree_info *eti = &sbi->extent_tree[type]; struct extent_tree *et; nid_t ino = inode->i_ino; - mutex_lock(&sbi->extent_tree_lock); - et = radix_tree_lookup(&sbi->extent_tree_root, ino); + mutex_lock(&eti->extent_tree_lock); + et = radix_tree_lookup(&eti->extent_tree_root, ino); if (!et) { et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS); - f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et); + f2fs_radix_tree_insert(&eti->extent_tree_root, ino, et); memset(et, 0, sizeof(struct extent_tree)); et->ino = ino; + et->type = type; et->root = RB_ROOT_CACHED; et->cached_en = NULL; rwlock_init(&et->lock); INIT_LIST_HEAD(&et->list); atomic_set(&et->node_cnt, 0); - atomic_inc(&sbi->total_ext_tree); + atomic_inc(&eti->total_ext_tree); } else { - atomic_dec(&sbi->total_zombie_tree); + atomic_dec(&eti->total_zombie_tree); list_del_init(&et->list); } - mutex_unlock(&sbi->extent_tree_lock); + mutex_unlock(&eti->extent_tree_lock); /* never died until evict_inode */ - F2FS_I(inode)->extent_tree = et; + F2FS_I(inode)->extent_tree[type] = et; return et; } -static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi, - struct extent_tree *et, struct extent_info *ei) -{ - struct rb_node **p = &et->root.rb_root.rb_node; - struct extent_node *en; - - en = __attach_extent_node(sbi, et, ei, NULL, p, true); - if (!en) - return NULL; - - et->largest = en->ei; - et->cached_en = en; - return en; -} - static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi, struct extent_tree *et) { @@ -357,70 +471,89 @@ static void __drop_largest_extent(struct extent_tree *et, } } -/* return true, if inode page is changed */ -static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage) +void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct f2fs_extent *i_ext = ipage ? &F2FS_INODE(ipage)->i_ext : NULL; + struct extent_tree_info *eti = &sbi->extent_tree[EX_READ]; + struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext; struct extent_tree *et; struct extent_node *en; struct extent_info ei; - if (!f2fs_may_extent_tree(inode)) { - /* drop largest extent */ + if (!__may_extent_tree(inode, EX_READ)) { + /* drop largest read extent */ if (i_ext && i_ext->len) { f2fs_wait_on_page_writeback(ipage, NODE, true, true); i_ext->len = 0; set_page_dirty(ipage); - return; } - return; + goto out; } - et = __grab_extent_tree(inode); + et = __grab_extent_tree(inode, EX_READ); if (!i_ext || !i_ext->len) - return; + goto out; - get_extent_info(&ei, i_ext); + get_read_extent_info(&ei, i_ext); write_lock(&et->lock); if (atomic_read(&et->node_cnt)) - goto out; + goto unlock_out; - en = __init_extent_tree(sbi, et, &ei); + en = __attach_extent_node(sbi, et, &ei, NULL, + &et->root.rb_root.rb_node, true); if (en) { - spin_lock(&sbi->extent_lock); - list_add_tail(&en->list, &sbi->extent_list); - spin_unlock(&sbi->extent_lock); + et->largest = en->ei; + et->cached_en = en; + + spin_lock(&eti->extent_lock); + list_add_tail(&en->list, &eti->extent_list); + spin_unlock(&eti->extent_lock); } -out: +unlock_out: write_unlock(&et->lock); -} - -void f2fs_init_extent_tree(struct inode *inode, struct page *ipage) -{ - __f2fs_init_extent_tree(inode, ipage); - - if (!F2FS_I(inode)->extent_tree) +out: + if (!F2FS_I(inode)->extent_tree[EX_READ]) set_inode_flag(inode, FI_NO_EXTENT); } -static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs, - struct extent_info *ei) +void f2fs_init_age_extent_tree(struct inode *inode) +{ + if (!__init_may_extent_tree(inode, EX_BLOCK_AGE)) + return; + __grab_extent_tree(inode, EX_BLOCK_AGE); +} + +void f2fs_init_extent_tree(struct inode *inode) +{ + /* initialize read cache */ + if (__init_may_extent_tree(inode, EX_READ)) + __grab_extent_tree(inode, EX_READ); + + /* initialize block age cache */ + if (__init_may_extent_tree(inode, EX_BLOCK_AGE)) + __grab_extent_tree(inode, EX_BLOCK_AGE); +} + +static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs, + struct extent_info *ei, enum extent_type type) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct extent_tree *et = F2FS_I(inode)->extent_tree; + struct extent_tree_info *eti = &sbi->extent_tree[type]; + struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; struct extent_node *en; bool ret = false; - f2fs_bug_on(sbi, !et); + if (!et) + return false; - trace_f2fs_lookup_extent_tree_start(inode, pgofs); + trace_f2fs_lookup_extent_tree_start(inode, pgofs, type); read_lock(&et->lock); - if (et->largest.fofs <= pgofs && + if (type == EX_READ && + et->largest.fofs <= pgofs && et->largest.fofs + et->largest.len > pgofs) { *ei = et->largest; ret = true; @@ -434,23 +567,26 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs, goto out; if (en == et->cached_en) - stat_inc_cached_node_hit(sbi); + stat_inc_cached_node_hit(sbi, type); else - stat_inc_rbtree_node_hit(sbi); + stat_inc_rbtree_node_hit(sbi, type); *ei = en->ei; - spin_lock(&sbi->extent_lock); + spin_lock(&eti->extent_lock); if (!list_empty(&en->list)) { - list_move_tail(&en->list, &sbi->extent_list); + list_move_tail(&en->list, &eti->extent_list); et->cached_en = en; } - spin_unlock(&sbi->extent_lock); + spin_unlock(&eti->extent_lock); ret = true; out: - stat_inc_total_hit(sbi); + stat_inc_total_hit(sbi, type); read_unlock(&et->lock); - trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei); + if (type == EX_READ) + trace_f2fs_lookup_read_extent_tree_end(inode, pgofs, ei); + else if (type == EX_BLOCK_AGE) + trace_f2fs_lookup_age_extent_tree_end(inode, pgofs, ei); return ret; } @@ -459,18 +595,20 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi, struct extent_node *prev_ex, struct extent_node *next_ex) { + struct extent_tree_info *eti = &sbi->extent_tree[et->type]; struct extent_node *en = NULL; - if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) { + if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, et->type)) { prev_ex->ei.len += ei->len; ei = &prev_ex->ei; en = prev_ex; } - if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) { + if (next_ex && __is_front_mergeable(ei, &next_ex->ei, et->type)) { next_ex->ei.fofs = ei->fofs; - next_ex->ei.blk = ei->blk; next_ex->ei.len += ei->len; + if (et->type == EX_READ) + next_ex->ei.blk = ei->blk; if (en) __release_extent_node(sbi, et, prev_ex); @@ -482,12 +620,12 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi, __try_update_largest_extent(et, en); - spin_lock(&sbi->extent_lock); + spin_lock(&eti->extent_lock); if (!list_empty(&en->list)) { - list_move_tail(&en->list, &sbi->extent_list); + list_move_tail(&en->list, &eti->extent_list); et->cached_en = en; } - spin_unlock(&sbi->extent_lock); + spin_unlock(&eti->extent_lock); return en; } @@ -497,6 +635,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi, struct rb_node *insert_parent, bool leftmost) { + struct extent_tree_info *eti = &sbi->extent_tree[et->type]; struct rb_node **p; struct rb_node *parent = NULL; struct extent_node *en = NULL; @@ -519,48 +658,55 @@ do_insert: __try_update_largest_extent(et, en); /* update in global extent list */ - spin_lock(&sbi->extent_lock); - list_add_tail(&en->list, &sbi->extent_list); + spin_lock(&eti->extent_lock); + list_add_tail(&en->list, &eti->extent_list); et->cached_en = en; - spin_unlock(&sbi->extent_lock); + spin_unlock(&eti->extent_lock); return en; } -static void f2fs_update_extent_tree_range(struct inode *inode, - pgoff_t fofs, block_t blkaddr, unsigned int len) +static void __update_extent_tree_range(struct inode *inode, + struct extent_info *tei, enum extent_type type) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct extent_tree *et = F2FS_I(inode)->extent_tree; + struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; struct extent_node *en = NULL, *en1 = NULL; struct extent_node *prev_en = NULL, *next_en = NULL; struct extent_info ei, dei, prev; struct rb_node **insert_p = NULL, *insert_parent = NULL; + unsigned int fofs = tei->fofs, len = tei->len; unsigned int end = fofs + len; - unsigned int pos = (unsigned int)fofs; bool updated = false; bool leftmost = false; if (!et) return; - trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len); + if (type == EX_READ) + trace_f2fs_update_read_extent_tree_range(inode, fofs, len, + tei->blk, 0); + else if (type == EX_BLOCK_AGE) + trace_f2fs_update_age_extent_tree_range(inode, fofs, len, + tei->age, tei->last_blocks); write_lock(&et->lock); - if (is_inode_flag_set(inode, FI_NO_EXTENT)) { - write_unlock(&et->lock); - return; + if (type == EX_READ) { + if (is_inode_flag_set(inode, FI_NO_EXTENT)) { + write_unlock(&et->lock); + return; + } + + prev = et->largest; + dei.len = 0; + + /* + * drop largest extent before lookup, in case it's already + * been shrunk from extent tree + */ + __drop_largest_extent(et, fofs, len); } - prev = et->largest; - dei.len = 0; - - /* - * drop largest extent before lookup, in case it's already - * been shrunk from extent tree - */ - __drop_largest_extent(et, fofs, len); - /* 1. lookup first extent node in range [fofs, fofs + len - 1] */ en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root, (struct rb_entry *)et->cached_en, fofs, @@ -580,26 +726,32 @@ static void f2fs_update_extent_tree_range(struct inode *inode, dei = en->ei; org_end = dei.fofs + dei.len; - f2fs_bug_on(sbi, pos >= org_end); + f2fs_bug_on(sbi, fofs >= org_end); - if (pos > dei.fofs && pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) { - en->ei.len = pos - en->ei.fofs; + if (fofs > dei.fofs && (type != EX_READ || + fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN)) { + en->ei.len = fofs - en->ei.fofs; prev_en = en; parts = 1; } - if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) { + if (end < org_end && (type != EX_READ || + org_end - end >= F2FS_MIN_EXTENT_LEN)) { if (parts) { - set_extent_info(&ei, end, - end - dei.fofs + dei.blk, - org_end - end); + __set_extent_info(&ei, + end, org_end - end, + end - dei.fofs + dei.blk, false, + dei.age, dei.last_blocks, + type); en1 = __insert_extent_tree(sbi, et, &ei, NULL, NULL, true); next_en = en1; } else { - en->ei.fofs = end; - en->ei.blk += end - dei.fofs; - en->ei.len -= end - dei.fofs; + __set_extent_info(&en->ei, + end, en->ei.len - (end - dei.fofs), + en->ei.blk + (end - dei.fofs), true, + dei.age, dei.last_blocks, + type); next_en = en; } parts++; @@ -629,10 +781,15 @@ static void f2fs_update_extent_tree_range(struct inode *inode, en = next_en; } - /* 3. update extent in extent cache */ - if (blkaddr) { + if (type == EX_BLOCK_AGE) + goto update_age_extent_cache; - set_extent_info(&ei, fofs, blkaddr, len); + /* 3. update extent in read extent cache */ + BUG_ON(type != EX_READ); + + if (tei->blk) { + __set_extent_info(&ei, fofs, len, tei->blk, false, + 0, 0, EX_READ); if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en)) __insert_extent_tree(sbi, et, &ei, insert_p, insert_parent, leftmost); @@ -654,31 +811,182 @@ static void f2fs_update_extent_tree_range(struct inode *inode, et->largest_updated = false; updated = true; } + goto out_read_extent_cache; +update_age_extent_cache: + if (!tei->last_blocks) + goto out_read_extent_cache; + __set_extent_info(&ei, fofs, len, 0, false, + tei->age, tei->last_blocks, EX_BLOCK_AGE); + if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en)) + __insert_extent_tree(sbi, et, &ei, + insert_p, insert_parent, leftmost); +out_read_extent_cache: write_unlock(&et->lock); if (updated) f2fs_mark_inode_dirty_sync(inode, true); } -unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) +#ifdef CONFIG_F2FS_FS_COMPRESSION +void f2fs_update_read_extent_tree_range_compressed(struct inode *inode, + pgoff_t fofs, block_t blkaddr, unsigned int llen, + unsigned int c_len) { + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ]; + struct extent_node *en = NULL; + struct extent_node *prev_en = NULL, *next_en = NULL; + struct extent_info ei; + struct rb_node **insert_p = NULL, *insert_parent = NULL; + bool leftmost = false; + + trace_f2fs_update_read_extent_tree_range(inode, fofs, llen, + blkaddr, c_len); + + /* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */ + if (is_inode_flag_set(inode, FI_NO_EXTENT)) + return; + + write_lock(&et->lock); + + en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root, + (struct rb_entry *)et->cached_en, fofs, + (struct rb_entry **)&prev_en, + (struct rb_entry **)&next_en, + &insert_p, &insert_parent, false, + &leftmost); + if (en) + goto unlock_out; + + __set_extent_info(&ei, fofs, llen, blkaddr, true, 0, 0, EX_READ); + ei.c_len = c_len; + + if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en)) + __insert_extent_tree(sbi, et, &ei, + insert_p, insert_parent, leftmost); +unlock_out: + write_unlock(&et->lock); +} +#endif + +static unsigned long long __calculate_block_age(struct f2fs_sb_info *sbi, + unsigned long long new, + unsigned long long old) +{ + unsigned int rem_old, rem_new; + unsigned long long res; + unsigned int weight = sbi->last_age_weight; + + res = div_u64_rem(new, 100, &rem_new) * (100 - weight) + + div_u64_rem(old, 100, &rem_old) * weight; + + if (rem_new) + res += rem_new * (100 - weight) / 100; + if (rem_old) + res += rem_old * weight / 100; + + return res; +} + +/* This returns a new age and allocated blocks in ei */ +static int __get_new_block_age(struct inode *inode, struct extent_info *ei, + block_t blkaddr) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + loff_t f_size = i_size_read(inode); + unsigned long long cur_blocks = + atomic64_read(&sbi->allocated_data_blocks); + struct extent_info tei = *ei; /* only fofs and len are valid */ + + /* + * When I/O is not aligned to a PAGE_SIZE, update will happen to the last + * file block even in seq write. So don't record age for newly last file + * block here. + */ + if ((f_size >> PAGE_SHIFT) == ei->fofs && f_size & (PAGE_SIZE - 1) && + blkaddr == NEW_ADDR) + return -EINVAL; + + if (__lookup_extent_tree(inode, ei->fofs, &tei, EX_BLOCK_AGE)) { + unsigned long long cur_age; + + if (cur_blocks >= tei.last_blocks) + cur_age = cur_blocks - tei.last_blocks; + else + /* allocated_data_blocks overflow */ + cur_age = ULLONG_MAX - tei.last_blocks + cur_blocks; + + if (tei.age) + ei->age = __calculate_block_age(sbi, cur_age, tei.age); + else + ei->age = cur_age; + ei->last_blocks = cur_blocks; + WARN_ON(ei->age > cur_blocks); + return 0; + } + + f2fs_bug_on(sbi, blkaddr == NULL_ADDR); + + /* the data block was allocated for the first time */ + if (blkaddr == NEW_ADDR) + goto out; + + if (__is_valid_data_blkaddr(blkaddr) && + !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) { + f2fs_bug_on(sbi, 1); + return -EINVAL; + } +out: + /* + * init block age with zero, this can happen when the block age extent + * was reclaimed due to memory constraint or system reboot + */ + ei->age = 0; + ei->last_blocks = cur_blocks; + return 0; +} + +static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type) +{ + struct extent_info ei = {}; + + if (!__may_extent_tree(dn->inode, type)) + return; + + ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + + dn->ofs_in_node; + ei.len = 1; + + if (type == EX_READ) { + if (dn->data_blkaddr == NEW_ADDR) + ei.blk = NULL_ADDR; + else + ei.blk = dn->data_blkaddr; + } else if (type == EX_BLOCK_AGE) { + if (__get_new_block_age(dn->inode, &ei, dn->data_blkaddr)) + return; + } + __update_extent_tree_range(dn->inode, &ei, type); +} + +static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink, + enum extent_type type) +{ + struct extent_tree_info *eti = &sbi->extent_tree[type]; struct extent_tree *et, *next; struct extent_node *en; unsigned int node_cnt = 0, tree_cnt = 0; int remained; - if (!test_opt(sbi, EXTENT_CACHE)) - return 0; - - if (!atomic_read(&sbi->total_zombie_tree)) + if (!atomic_read(&eti->total_zombie_tree)) goto free_node; - if (!mutex_trylock(&sbi->extent_tree_lock)) + if (!mutex_trylock(&eti->extent_tree_lock)) goto out; /* 1. remove unreferenced extent tree */ - list_for_each_entry_safe(et, next, &sbi->zombie_list, list) { + list_for_each_entry_safe(et, next, &eti->zombie_list, list) { if (atomic_read(&et->node_cnt)) { write_lock(&et->lock); node_cnt += __free_extent_tree(sbi, et); @@ -686,61 +994,137 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) } f2fs_bug_on(sbi, atomic_read(&et->node_cnt)); list_del_init(&et->list); - radix_tree_delete(&sbi->extent_tree_root, et->ino); + radix_tree_delete(&eti->extent_tree_root, et->ino); kmem_cache_free(extent_tree_slab, et); - atomic_dec(&sbi->total_ext_tree); - atomic_dec(&sbi->total_zombie_tree); + atomic_dec(&eti->total_ext_tree); + atomic_dec(&eti->total_zombie_tree); tree_cnt++; if (node_cnt + tree_cnt >= nr_shrink) goto unlock_out; cond_resched(); } - mutex_unlock(&sbi->extent_tree_lock); + mutex_unlock(&eti->extent_tree_lock); free_node: /* 2. remove LRU extent entries */ - if (!mutex_trylock(&sbi->extent_tree_lock)) + if (!mutex_trylock(&eti->extent_tree_lock)) goto out; remained = nr_shrink - (node_cnt + tree_cnt); - spin_lock(&sbi->extent_lock); + spin_lock(&eti->extent_lock); for (; remained > 0; remained--) { - if (list_empty(&sbi->extent_list)) + if (list_empty(&eti->extent_list)) break; - en = list_first_entry(&sbi->extent_list, + en = list_first_entry(&eti->extent_list, struct extent_node, list); et = en->et; if (!write_trylock(&et->lock)) { /* refresh this extent node's position in extent list */ - list_move_tail(&en->list, &sbi->extent_list); + list_move_tail(&en->list, &eti->extent_list); continue; } list_del_init(&en->list); - spin_unlock(&sbi->extent_lock); + spin_unlock(&eti->extent_lock); __detach_extent_node(sbi, et, en); write_unlock(&et->lock); node_cnt++; - spin_lock(&sbi->extent_lock); + spin_lock(&eti->extent_lock); } - spin_unlock(&sbi->extent_lock); + spin_unlock(&eti->extent_lock); unlock_out: - mutex_unlock(&sbi->extent_tree_lock); + mutex_unlock(&eti->extent_tree_lock); out: - trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt); + trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt, type); return node_cnt + tree_cnt; } -unsigned int f2fs_destroy_extent_node(struct inode *inode) +/* read extent cache operations */ +bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs, + struct extent_info *ei) +{ + if (!__may_extent_tree(inode, EX_READ)) + return false; + + return __lookup_extent_tree(inode, pgofs, ei, EX_READ); +} + +void f2fs_update_read_extent_cache(struct dnode_of_data *dn) +{ + return __update_extent_cache(dn, EX_READ); +} + +void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn, + pgoff_t fofs, block_t blkaddr, unsigned int len) +{ + struct extent_info ei = { + .fofs = fofs, + .len = len, + .blk = blkaddr, + }; + + if (!__may_extent_tree(dn->inode, EX_READ)) + return; + + __update_extent_tree_range(dn->inode, &ei, EX_READ); +} + +unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) +{ + if (!test_opt(sbi, READ_EXTENT_CACHE)) + return 0; + + return __shrink_extent_tree(sbi, nr_shrink, EX_READ); +} + +/* block age extent cache operations */ +bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs, + struct extent_info *ei) +{ + if (!__may_extent_tree(inode, EX_BLOCK_AGE)) + return false; + + return __lookup_extent_tree(inode, pgofs, ei, EX_BLOCK_AGE); +} + +void f2fs_update_age_extent_cache(struct dnode_of_data *dn) +{ + return __update_extent_cache(dn, EX_BLOCK_AGE); +} + +void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn, + pgoff_t fofs, unsigned int len) +{ + struct extent_info ei = { + .fofs = fofs, + .len = len, + }; + + if (!__may_extent_tree(dn->inode, EX_BLOCK_AGE)) + return; + + __update_extent_tree_range(dn->inode, &ei, EX_BLOCK_AGE); +} + +unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) +{ + if (!test_opt(sbi, AGE_EXTENT_CACHE)) + return 0; + + return __shrink_extent_tree(sbi, nr_shrink, EX_BLOCK_AGE); +} + +static unsigned int __destroy_extent_node(struct inode *inode, + enum extent_type type) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct extent_tree *et = F2FS_I(inode)->extent_tree; + struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; unsigned int node_cnt = 0; if (!et || !atomic_read(&et->node_cnt)) @@ -753,32 +1137,47 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode) return node_cnt; } -void f2fs_drop_extent_tree(struct inode *inode) +void f2fs_destroy_extent_node(struct inode *inode) +{ + __destroy_extent_node(inode, EX_READ); + __destroy_extent_node(inode, EX_BLOCK_AGE); +} + +static void __drop_extent_tree(struct inode *inode, enum extent_type type) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct extent_tree *et = F2FS_I(inode)->extent_tree; + struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; bool updated = false; - if (!f2fs_may_extent_tree(inode)) + if (!__may_extent_tree(inode, type)) return; - set_inode_flag(inode, FI_NO_EXTENT); - write_lock(&et->lock); + set_inode_flag(inode, FI_NO_EXTENT); __free_extent_tree(sbi, et); - if (et->largest.len) { - et->largest.len = 0; - updated = true; + if (type == EX_READ) { + set_inode_flag(inode, FI_NO_EXTENT); + if (et->largest.len) { + et->largest.len = 0; + updated = true; + } } write_unlock(&et->lock); if (updated) f2fs_mark_inode_dirty_sync(inode, true); } -void f2fs_destroy_extent_tree(struct inode *inode) +void f2fs_drop_extent_tree(struct inode *inode) +{ + __drop_extent_tree(inode, EX_READ); + __drop_extent_tree(inode, EX_BLOCK_AGE); +} + +static void __destroy_extent_tree(struct inode *inode, enum extent_type type) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct extent_tree *et = F2FS_I(inode)->extent_tree; + struct extent_tree_info *eti = &sbi->extent_tree[type]; + struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; unsigned int node_cnt = 0; if (!et) @@ -786,76 +1185,57 @@ void f2fs_destroy_extent_tree(struct inode *inode) if (inode->i_nlink && !is_bad_inode(inode) && atomic_read(&et->node_cnt)) { - mutex_lock(&sbi->extent_tree_lock); - list_add_tail(&et->list, &sbi->zombie_list); - atomic_inc(&sbi->total_zombie_tree); - mutex_unlock(&sbi->extent_tree_lock); + mutex_lock(&eti->extent_tree_lock); + list_add_tail(&et->list, &eti->zombie_list); + atomic_inc(&eti->total_zombie_tree); + mutex_unlock(&eti->extent_tree_lock); return; } /* free all extent info belong to this extent tree */ - node_cnt = f2fs_destroy_extent_node(inode); + node_cnt = __destroy_extent_node(inode, type); /* delete extent tree entry in radix tree */ - mutex_lock(&sbi->extent_tree_lock); + mutex_lock(&eti->extent_tree_lock); f2fs_bug_on(sbi, atomic_read(&et->node_cnt)); - radix_tree_delete(&sbi->extent_tree_root, inode->i_ino); + radix_tree_delete(&eti->extent_tree_root, inode->i_ino); kmem_cache_free(extent_tree_slab, et); - atomic_dec(&sbi->total_ext_tree); - mutex_unlock(&sbi->extent_tree_lock); + atomic_dec(&eti->total_ext_tree); + mutex_unlock(&eti->extent_tree_lock); - F2FS_I(inode)->extent_tree = NULL; + F2FS_I(inode)->extent_tree[type] = NULL; - trace_f2fs_destroy_extent_tree(inode, node_cnt); + trace_f2fs_destroy_extent_tree(inode, node_cnt, type); } -bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, - struct extent_info *ei) +void f2fs_destroy_extent_tree(struct inode *inode) { - if (!f2fs_may_extent_tree(inode)) - return false; - - return f2fs_lookup_extent_tree(inode, pgofs, ei); + __destroy_extent_tree(inode, EX_READ); + __destroy_extent_tree(inode, EX_BLOCK_AGE); } -void f2fs_update_extent_cache(struct dnode_of_data *dn) +static void __init_extent_tree_info(struct extent_tree_info *eti) { - pgoff_t fofs; - block_t blkaddr; - - if (!f2fs_may_extent_tree(dn->inode)) - return; - - if (dn->data_blkaddr == NEW_ADDR) - blkaddr = NULL_ADDR; - else - blkaddr = dn->data_blkaddr; - - fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + - dn->ofs_in_node; - f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1); -} - -void f2fs_update_extent_cache_range(struct dnode_of_data *dn, - pgoff_t fofs, block_t blkaddr, unsigned int len) - -{ - if (!f2fs_may_extent_tree(dn->inode)) - return; - - f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len); + INIT_RADIX_TREE(&eti->extent_tree_root, GFP_NOIO); + mutex_init(&eti->extent_tree_lock); + INIT_LIST_HEAD(&eti->extent_list); + spin_lock_init(&eti->extent_lock); + atomic_set(&eti->total_ext_tree, 0); + INIT_LIST_HEAD(&eti->zombie_list); + atomic_set(&eti->total_zombie_tree, 0); + atomic_set(&eti->total_ext_node, 0); } void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi) { - INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO); - mutex_init(&sbi->extent_tree_lock); - INIT_LIST_HEAD(&sbi->extent_list); - spin_lock_init(&sbi->extent_lock); - atomic_set(&sbi->total_ext_tree, 0); - INIT_LIST_HEAD(&sbi->zombie_list); - atomic_set(&sbi->total_zombie_tree, 0); - atomic_set(&sbi->total_ext_node, 0); + __init_extent_tree_info(&sbi->extent_tree[EX_READ]); + __init_extent_tree_info(&sbi->extent_tree[EX_BLOCK_AGE]); + + /* initialize for block age extents */ + atomic64_set(&sbi->allocated_data_blocks, 0); + sbi->hot_data_age_threshold = DEF_HOT_DATA_AGE_THRESHOLD; + sbi->warm_data_age_threshold = DEF_WARM_DATA_AGE_THRESHOLD; + sbi->last_age_weight = LAST_AGE_WEIGHT; } int __init f2fs_create_extent_cache(void) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index d006c60f0e5e..28c605fe2fbd 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -86,7 +86,7 @@ extern const char *f2fs_fault_name[FAULT_MAX]; #define F2FS_MOUNT_FLUSH_MERGE 0x00000400 #define F2FS_MOUNT_NOBARRIER 0x00000800 #define F2FS_MOUNT_FASTBOOT 0x00001000 -#define F2FS_MOUNT_EXTENT_CACHE 0x00002000 +#define F2FS_MOUNT_READ_EXTENT_CACHE 0x00002000 #define F2FS_MOUNT_DATA_FLUSH 0x00008000 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000 #define F2FS_MOUNT_USRQUOTA 0x00080000 @@ -101,6 +101,7 @@ extern const char *f2fs_fault_name[FAULT_MAX]; #define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000 #define F2FS_MOUNT_GC_MERGE 0x20000000 #define F2FS_MOUNT_COMPRESS_CACHE 0x40000000 +#define F2FS_MOUNT_AGE_EXTENT_CACHE 0x80000000 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) @@ -153,6 +154,7 @@ struct f2fs_mount_info { int fsync_mode; /* fsync policy */ int fs_mode; /* fs mode: LFS or ADAPTIVE */ int bggc_mode; /* bggc mode: off, on or sync */ + int memory_mode; /* memory mode */ struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */ block_t unusable_cap_perc; /* percentage for cap */ block_t unusable_cap; /* Amount of space allowed to be @@ -253,6 +255,10 @@ enum { * condition of read on truncated area * by extent_cache */ + DATA_GENERIC_ENHANCE_UPDATE, /* + * strong check on range and segment + * bitmap for update case + */ META_GENERIC, }; @@ -492,11 +498,11 @@ struct f2fs_filename { #ifdef CONFIG_UNICODE /* * For casefolded directories: the casefolded name, but it's left NULL - * if the original name is not valid Unicode, if the directory is both - * casefolded and encrypted and its encryption key is unavailable, or if - * the filesystem is doing an internal operation where usr_fname is also - * NULL. In all these cases we fall back to treating the name as an - * opaque byte sequence. + * if the original name is not valid Unicode, if the original name is + * "." or "..", if the directory is both casefolded and encrypted and + * its encryption key is unavailable, or if the filesystem is doing an + * internal operation where usr_fname is also NULL. In all these cases + * we fall back to treating the name as an opaque byte sequence. */ struct fscrypt_str cf_name; #endif @@ -570,7 +576,26 @@ enum { #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ /* number of extent info in extent cache we try to shrink */ -#define EXTENT_CACHE_SHRINK_NUMBER 128 +#define READ_EXTENT_CACHE_SHRINK_NUMBER 128 + +/* number of age extent info in extent cache we try to shrink */ +#define AGE_EXTENT_CACHE_SHRINK_NUMBER 128 +#define LAST_AGE_WEIGHT 30 +#define SAME_AGE_REGION 1024 + +/* + * Define data block with age less than 1GB as hot data + * define data block with age less than 10GB but more than 1GB as warm data + */ +#define DEF_HOT_DATA_AGE_THRESHOLD 262144 +#define DEF_WARM_DATA_AGE_THRESHOLD 2621440 + +/* extent cache type */ +enum extent_type { + EX_READ, + EX_BLOCK_AGE, + NR_EXTENT_CACHES, +}; struct rb_entry { struct rb_node rb_node; /* rb node located in rb-tree */ @@ -586,7 +611,24 @@ struct rb_entry { struct extent_info { unsigned int fofs; /* start offset in a file */ unsigned int len; /* length of the extent */ - u32 blk; /* start block address of the extent */ + union { + /* read extent_cache */ + struct { + /* start block address of the extent */ + block_t blk; +#ifdef CONFIG_F2FS_FS_COMPRESSION + /* physical extent length of compressed blocks */ + unsigned int c_len; +#endif + }; + /* block age extent_cache */ + struct { + /* block age of the extent */ + unsigned long long age; + /* last total blocks allocated */ + unsigned long long last_blocks; + }; + }; }; struct extent_node { @@ -598,13 +640,25 @@ struct extent_node { struct extent_tree { nid_t ino; /* inode number */ + enum extent_type type; /* keep the extent tree type */ struct rb_root_cached root; /* root of extent info rb-tree */ struct extent_node *cached_en; /* recently accessed extent node */ - struct extent_info largest; /* largested extent info */ struct list_head list; /* to be used by sbi->zombie_list */ rwlock_t lock; /* protect extent info rb-tree */ atomic_t node_cnt; /* # of extent node in rb-tree*/ bool largest_updated; /* largest extent updated */ + struct extent_info largest; /* largest cached extent for EX_READ */ +}; + +struct extent_tree_info { + struct radix_tree_root extent_tree_root;/* cache extent cache entries */ + struct mutex extent_tree_lock; /* locking extent radix tree */ + struct list_head extent_list; /* lru list for shrinker */ + spinlock_t extent_lock; /* locking extent lru list */ + atomic_t total_ext_tree; /* extent tree count */ + struct list_head zombie_list; /* extent zombie tree list */ + atomic_t total_zombie_tree; /* extent zombie tree count */ + atomic_t total_ext_node; /* extent info count */ }; /* @@ -709,7 +763,8 @@ enum { FI_DROP_CACHE, /* drop dirty page cache */ FI_DATA_EXIST, /* indicate data exists */ FI_INLINE_DOTS, /* indicate inline dot dentries */ - FI_DO_DEFRAG, /* indicate defragment is running */ + FI_SKIP_WRITES, /* should skip data page writeback */ + FI_OPU_WRITE, /* used for opu per file */ FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ FI_NO_PREALLOC, /* indicate skipped preallocated blocks */ FI_HOT_DATA, /* indicate file is hot */ @@ -746,6 +801,7 @@ struct f2fs_inode_info { unsigned int clevel; /* maximum level of given file name */ struct task_struct *task; /* lookup and create consistency */ struct task_struct *cp_task; /* separate cp/wb IO stats*/ + struct task_struct *wb_task; /* indicate inode is in context of writeback */ nid_t i_xattr_nid; /* node id that contains xattrs */ loff_t last_disk_size; /* lastly written file size */ spinlock_t i_size_lock; /* protect last_disk_size */ @@ -762,7 +818,8 @@ struct f2fs_inode_info { struct list_head inmem_pages; /* inmemory pages managed by f2fs */ struct task_struct *inmem_task; /* store inmemory task */ struct mutex inmem_lock; /* lock for inmemory pages */ - struct extent_tree *extent_tree; /* cached extent_tree entry */ + struct extent_tree *extent_tree[NR_EXTENT_CACHES]; + /* cached extent_tree entry */ /* avoid racing between foreground op and gc */ struct f2fs_rwsem i_gc_rwsem[2]; @@ -784,7 +841,7 @@ struct f2fs_inode_info { unsigned int i_cluster_size; /* cluster size */ }; -static inline void get_extent_info(struct extent_info *ext, +static inline void get_read_extent_info(struct extent_info *ext, struct f2fs_extent *i_ext) { ext->fofs = le32_to_cpu(i_ext->fofs); @@ -792,7 +849,7 @@ static inline void get_extent_info(struct extent_info *ext, ext->len = le32_to_cpu(i_ext->len); } -static inline void set_raw_extent(struct extent_info *ext, +static inline void set_raw_read_extent(struct extent_info *ext, struct f2fs_extent *i_ext) { i_ext->fofs = cpu_to_le32(ext->fofs); @@ -800,14 +857,6 @@ static inline void set_raw_extent(struct extent_info *ext, i_ext->len = cpu_to_le32(ext->len); } -static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, - u32 blk, unsigned int len) -{ - ei->fofs = fofs; - ei->blk = blk; - ei->len = len; -} - static inline bool __is_discard_mergeable(struct discard_info *back, struct discard_info *front, unsigned int max_len) { @@ -827,35 +876,6 @@ static inline bool __is_discard_front_mergeable(struct discard_info *cur, return __is_discard_mergeable(cur, front, max_len); } -static inline bool __is_extent_mergeable(struct extent_info *back, - struct extent_info *front) -{ - return (back->fofs + back->len == front->fofs && - back->blk + back->len == front->blk); -} - -static inline bool __is_back_mergeable(struct extent_info *cur, - struct extent_info *back) -{ - return __is_extent_mergeable(back, cur); -} - -static inline bool __is_front_mergeable(struct extent_info *cur, - struct extent_info *front) -{ - return __is_extent_mergeable(cur, front); -} - -extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); -static inline void __try_update_largest_extent(struct extent_tree *et, - struct extent_node *en) -{ - if (en->ei.len > et->largest.len) { - et->largest = en->ei; - et->largest_updated = true; - } -} - /* * For free nid management */ @@ -1072,8 +1092,8 @@ enum count_type { */ #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) enum page_type { - DATA, - NODE, + DATA = 0, + NODE = 1, /* should not change this */ META, NR_PAGE_TYPE, META_FLUSH, @@ -1163,6 +1183,7 @@ struct f2fs_io_info { bool retry; /* need to reallocate block address */ int compr_blocks; /* # of compressed block addresses */ bool encrypted; /* indicate file is encrypted */ + bool post_read; /* require post read */ enum iostat_type io_type; /* io type */ struct writeback_control *io_wbc; /* writeback control */ struct bio **bio; /* bio for ipu */ @@ -1247,6 +1268,7 @@ enum { SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ SBI_IS_RESIZEFS, /* resizefs is in process */ + SBI_IS_FREEZING, /* freezefs is in process */ }; enum { @@ -1312,6 +1334,11 @@ enum { */ }; +enum { + MEMORY_MODE_NORMAL, /* memory mode for normal devices */ + MEMORY_MODE_LOW, /* memory mode for low memry devices */ +}; + static inline int f2fs_test_bit(unsigned int nr, char *addr); static inline void f2fs_set_bit(unsigned int nr, char *addr); static inline void f2fs_clear_bit(unsigned int nr, char *addr); @@ -1531,6 +1558,7 @@ struct decompress_io_ctx { void *private; /* payload buffer for specified decompression algorithm */ void *private2; /* extra payload buffer */ struct work_struct verity_work; /* work to verify the decompressed pages */ + struct work_struct free_work; /* work for late free this structure itself */ }; #define NULL_CLUSTER ((unsigned int)(~0)) @@ -1595,14 +1623,13 @@ struct f2fs_sb_info { struct mutex flush_lock; /* for flush exclusion */ /* for extent tree cache */ - struct radix_tree_root extent_tree_root;/* cache extent cache entries */ - struct mutex extent_tree_lock; /* locking extent radix tree */ - struct list_head extent_list; /* lru list for shrinker */ - spinlock_t extent_lock; /* locking extent lru list */ - atomic_t total_ext_tree; /* extent tree count */ - struct list_head zombie_list; /* extent zombie tree list */ - atomic_t total_zombie_tree; /* extent zombie tree count */ - atomic_t total_ext_node; /* extent info count */ + struct extent_tree_info extent_tree[NR_EXTENT_CACHES]; + atomic64_t allocated_data_blocks; /* for block age extent_cache */ + + /* The threshold used for hot and warm data seperation*/ + unsigned int hot_data_age_threshold; + unsigned int warm_data_age_threshold; + unsigned int last_age_weight; /* basic filesystem units */ unsigned int log_sectors_per_block; /* log2 sectors per block */ @@ -1685,10 +1712,14 @@ struct f2fs_sb_info { unsigned int segment_count[2]; /* # of allocated segments */ unsigned int block_count[2]; /* # of allocated blocks */ atomic_t inplace_count; /* # of inplace update */ - atomic64_t total_hit_ext; /* # of lookup extent cache */ - atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */ - atomic64_t read_hit_largest; /* # of hit largest extent node */ - atomic64_t read_hit_cached; /* # of hit cached extent node */ + /* # of lookup extent cache */ + atomic64_t total_hit_ext[NR_EXTENT_CACHES]; + /* # of hit rbtree extent node */ + atomic64_t read_hit_rbtree[NR_EXTENT_CACHES]; + /* # of hit cached extent node */ + atomic64_t read_hit_cached[NR_EXTENT_CACHES]; + /* # of hit largest extent node in read extent cache */ + atomic64_t read_hit_largest; atomic_t inline_xattr; /* # of inline_xattr inodes */ atomic_t inline_inode; /* # of inline_data inodes */ atomic_t inline_dir; /* # of inline_dentry inodes */ @@ -2481,6 +2512,7 @@ static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); } +extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, struct inode *inode, bool is_inode) { @@ -2561,11 +2593,17 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, { spin_lock(&sbi->stat_lock); - f2fs_bug_on(sbi, !sbi->total_valid_block_count); - f2fs_bug_on(sbi, !sbi->total_valid_node_count); + if (unlikely(!sbi->total_valid_block_count || + !sbi->total_valid_node_count)) { + f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u", + sbi->total_valid_block_count, + sbi->total_valid_node_count); + set_sbi_flag(sbi, SBI_NEED_FSCK); + } else { + sbi->total_valid_block_count--; + sbi->total_valid_node_count--; + } - sbi->total_valid_node_count--; - sbi->total_valid_block_count--; if (sbi->reserved_blocks && sbi->current_reserved_blocks < sbi->reserved_blocks) sbi->current_reserved_blocks++; @@ -3477,6 +3515,7 @@ int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); int f2fs_quota_sync(struct super_block *sb, int type); loff_t max_file_blocks(struct inode *inode); void f2fs_quota_off_umount(struct super_block *sb); +void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason); int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); int f2fs_sync_fs(struct super_block *sb, int sync); int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); @@ -3626,7 +3665,9 @@ unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, /* * checkpoint.c */ -void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io); +void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, + unsigned char reason); +void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi); struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); @@ -3655,7 +3696,8 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); void f2fs_update_dirty_page(struct inode *inode, struct page *page); void f2fs_remove_dirty_inode(struct inode *inode); -int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type); +int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type, + bool from_cp); void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi); int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); @@ -3761,9 +3803,19 @@ struct f2fs_stat_info { struct f2fs_sb_info *sbi; int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; int main_area_segs, main_area_sections, main_area_zones; - unsigned long long hit_largest, hit_cached, hit_rbtree; - unsigned long long hit_total, total_ext; - int ext_tree, zombie_tree, ext_node; + unsigned long long hit_cached[NR_EXTENT_CACHES]; + unsigned long long hit_rbtree[NR_EXTENT_CACHES]; + unsigned long long total_ext[NR_EXTENT_CACHES]; + unsigned long long hit_total[NR_EXTENT_CACHES]; + int ext_tree[NR_EXTENT_CACHES]; + int zombie_tree[NR_EXTENT_CACHES]; + int ext_node[NR_EXTENT_CACHES]; + /* to count memory footprint */ + unsigned long long ext_mem[NR_EXTENT_CACHES]; + /* for read extent cache */ + unsigned long long hit_largest; + /* for block age extent cache */ + unsigned long long allocated_data_blocks; int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; int ndirty_data, ndirty_qdata; int inmem_pages; @@ -3824,10 +3876,10 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) -#define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext)) -#define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree)) +#define stat_inc_total_hit(sbi, type) (atomic64_inc(&(sbi)->total_hit_ext[type])) +#define stat_inc_rbtree_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_rbtree[type])) #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) -#define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached)) +#define stat_inc_cached_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_cached[type])) #define stat_inc_inline_xattr(inode) \ do { \ if (f2fs_has_inline_xattr(inode)) \ @@ -3953,10 +4005,10 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi); #define stat_other_skip_bggc_count(sbi) do { } while (0) #define stat_inc_dirty_inode(sbi, type) do { } while (0) #define stat_dec_dirty_inode(sbi, type) do { } while (0) -#define stat_inc_total_hit(sbi) do { } while (0) -#define stat_inc_rbtree_node_hit(sbi) do { } while (0) +#define stat_inc_total_hit(sbi, type) do { } while (0) +#define stat_inc_rbtree_node_hit(sbi, type) do { } while (0) #define stat_inc_largest_node_hit(sbi) do { } while (0) -#define stat_inc_cached_node_hit(sbi) do { } while (0) +#define stat_inc_cached_node_hit(sbi, type) do { } while (0) #define stat_inc_inline_xattr(inode) do { } while (0) #define stat_dec_inline_xattr(inode) do { } while (0) #define stat_inc_inline_inode(inode) do { } while (0) @@ -4003,6 +4055,7 @@ extern struct kmem_cache *f2fs_inode_entry_slab; * inline.c */ bool f2fs_may_inline_data(struct inode *inode); +bool f2fs_sanity_check_inline_data(struct inode *inode); bool f2fs_may_inline_dentry(struct inode *inode); void f2fs_do_read_inline_data(struct page *page, struct page *ipage); void f2fs_truncate_inline_inode(struct inode *inode, @@ -4060,20 +4113,34 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, bool force, bool *leftmost); bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, struct rb_root_cached *root, bool check_key); -unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); -void f2fs_init_extent_tree(struct inode *inode, struct page *ipage); +void f2fs_init_extent_tree(struct inode *inode); void f2fs_drop_extent_tree(struct inode *inode); -unsigned int f2fs_destroy_extent_node(struct inode *inode); +void f2fs_destroy_extent_node(struct inode *inode); void f2fs_destroy_extent_tree(struct inode *inode); -bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, - struct extent_info *ei); -void f2fs_update_extent_cache(struct dnode_of_data *dn); -void f2fs_update_extent_cache_range(struct dnode_of_data *dn, - pgoff_t fofs, block_t blkaddr, unsigned int len); void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); int __init f2fs_create_extent_cache(void); void f2fs_destroy_extent_cache(void); +/* read extent cache ops */ +void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage); +bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs, + struct extent_info *ei); +void f2fs_update_read_extent_cache(struct dnode_of_data *dn); +void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn, + pgoff_t fofs, block_t blkaddr, unsigned int len); +unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, + int nr_shrink); + +/* block age extent cache ops */ +void f2fs_init_age_extent_tree(struct inode *inode); +bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs, + struct extent_info *ei); +void f2fs_update_age_extent_cache(struct dnode_of_data *dn); +void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn, + pgoff_t fofs, unsigned int len); +unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, + int nr_shrink); + /* * sysfs.c */ @@ -4126,9 +4193,9 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page); bool f2fs_is_compress_backend_ready(struct inode *inode); int f2fs_init_compress_mempool(void); void f2fs_destroy_compress_mempool(void); -void f2fs_decompress_cluster(struct decompress_io_ctx *dic); +void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task); void f2fs_end_read_compressed_page(struct page *page, bool failed, - block_t blkaddr); + block_t blkaddr, bool in_task); bool f2fs_cluster_is_empty(struct compress_ctx *cc); bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); @@ -4137,12 +4204,17 @@ int f2fs_write_multi_pages(struct compress_ctx *cc, struct writeback_control *wbc, enum iostat_type io_type); int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); +void f2fs_update_read_extent_tree_range_compressed(struct inode *inode, + pgoff_t fofs, block_t blkaddr, + unsigned int llen, unsigned int c_len); int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, unsigned nr_pages, sector_t *last_block_in_bio, bool is_readahead, bool for_write); struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); -void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed); -void f2fs_put_page_dic(struct page *page); +void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, + bool in_task); +void f2fs_put_page_dic(struct page *page, bool in_task); +unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn); int f2fs_init_compress_ctx(struct compress_ctx *cc); void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse); void f2fs_init_compress_info(struct f2fs_sb_info *sbi); @@ -4187,16 +4259,18 @@ static inline struct page *f2fs_compress_control_page(struct page *page) } static inline int f2fs_init_compress_mempool(void) { return 0; } static inline void f2fs_destroy_compress_mempool(void) { } -static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { } +static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic, + bool in_task) { } static inline void f2fs_end_read_compressed_page(struct page *page, - bool failed, block_t blkaddr) + bool failed, block_t blkaddr, bool in_task) { WARN_ON_ONCE(1); } -static inline void f2fs_put_page_dic(struct page *page) +static inline void f2fs_put_page_dic(struct page *page, bool in_task) { WARN_ON_ONCE(1); } +static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; } static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; } static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { } static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } @@ -4212,10 +4286,15 @@ static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino) { } #define inc_compr_inode_stat(inode) do { } while (0) +static inline void f2fs_update_read_extent_tree_range_compressed( + struct inode *inode, + pgoff_t fofs, block_t blkaddr, + unsigned int llen, unsigned int c_len) { } #endif -static inline void set_compress_context(struct inode *inode) +static inline int set_compress_context(struct inode *inode) { +#ifdef CONFIG_F2FS_FS_COMPRESSION struct f2fs_sb_info *sbi = F2FS_I_SB(inode); F2FS_I(inode)->i_compress_algorithm = @@ -4237,6 +4316,10 @@ static inline void set_compress_context(struct inode *inode) stat_inc_compr_inode(inode); inc_compr_inode_stat(inode); f2fs_mark_inode_dirty_sync(inode, true); + return 0; +#else + return -EOPNOTSUPP; +#endif } static inline bool f2fs_disable_compressed_file(struct inode *inode) @@ -4276,26 +4359,6 @@ F2FS_FEATURE_FUNCS(casefold, CASEFOLD); F2FS_FEATURE_FUNCS(compression, COMPRESSION); F2FS_FEATURE_FUNCS(readonly, RO); -static inline bool f2fs_may_extent_tree(struct inode *inode) -{ - struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - - if (!test_opt(sbi, EXTENT_CACHE) || - is_inode_flag_set(inode, FI_NO_EXTENT) || - (is_inode_flag_set(inode, FI_COMPRESSED_FILE) && - !f2fs_sb_has_readonly(sbi))) - return false; - - /* - * for recovered files during mount do not create extents - * if shrinker is not registered. - */ - if (list_empty(&sbi->s_list)) - return false; - - return S_ISREG(inode->i_mode); -} - #ifdef CONFIG_BLK_DEV_ZONED static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, block_t blkaddr) @@ -4354,6 +4417,11 @@ static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; } +static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi) +{ + return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW; +} + static inline bool f2fs_may_compress(struct inode *inode) { if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 08c9b4579e2a..cd1353239ee7 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -628,7 +628,8 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) */ fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + ofs; - f2fs_update_extent_cache_range(dn, fofs, 0, len); + f2fs_update_read_extent_cache_range(dn, fofs, 0, len); + f2fs_update_age_extent_cache_range(dn, fofs, nr_free); dec_valid_block_count(sbi, dn->inode, nr_free); } dn->ofs_in_node = ofs; @@ -1436,14 +1437,22 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, ret = -ENOSPC; break; } - if (dn->data_blkaddr != NEW_ADDR) { - f2fs_invalidate_blocks(sbi, dn->data_blkaddr); - dn->data_blkaddr = NEW_ADDR; - f2fs_set_data_blkaddr(dn); + + if (dn->data_blkaddr == NEW_ADDR) + continue; + + if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr, + DATA_GENERIC_ENHANCE)) { + ret = -EFSCORRUPTED; + break; } + + f2fs_invalidate_blocks(sbi, dn->data_blkaddr); + dn->data_blkaddr = NEW_ADDR; + f2fs_set_data_blkaddr(dn); } - f2fs_update_extent_cache_range(dn, start, 0, index - start); + f2fs_update_read_extent_cache_range(dn, start, 0, index - start); return ret; } @@ -1759,6 +1768,10 @@ static long f2fs_fallocate(struct file *file, int mode, inode_lock(inode); + ret = file_modified(file); + if (ret) + goto out; + if (mode & FALLOC_FL_PUNCH_HOLE) { if (offset >= inode->i_size) goto out; @@ -1856,22 +1869,15 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) if (masked_flags & F2FS_COMPR_FL) { if (!f2fs_disable_compressed_file(inode)) return -EINVAL; - } - if (iflags & F2FS_NOCOMP_FL) - return -EINVAL; - if (iflags & F2FS_COMPR_FL) { + } else { if (!f2fs_may_compress(inode)) return -EINVAL; if (S_ISREG(inode->i_mode) && inode->i_size) return -EINVAL; - - set_compress_context(inode); + if (set_compress_context(inode)) + return -EOPNOTSUPP; } } - if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) { - if (masked_flags & F2FS_COMPR_FL) - return -EINVAL; - } fi->i_flags = iflags | (fi->i_flags & ~mask); f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) && @@ -2265,7 +2271,8 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) if (ret) { if (ret == -EROFS) { ret = 0; - f2fs_stop_checkpoint(sbi, false); + f2fs_stop_checkpoint(sbi, false, + STOP_CP_REASON_SHUTDOWN); set_sbi_flag(sbi, SBI_IS_SHUTDOWN); trace_f2fs_shutdown(sbi, in, ret); } @@ -2278,7 +2285,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) ret = freeze_bdev(sb->s_bdev); if (ret) goto out; - f2fs_stop_checkpoint(sbi, false); + f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN); set_sbi_flag(sbi, SBI_IS_SHUTDOWN); thaw_bdev(sb->s_bdev); break; @@ -2287,16 +2294,16 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) ret = f2fs_sync_fs(sb, 1); if (ret) goto out; - f2fs_stop_checkpoint(sbi, false); + f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN); set_sbi_flag(sbi, SBI_IS_SHUTDOWN); break; case F2FS_GOING_DOWN_NOSYNC: - f2fs_stop_checkpoint(sbi, false); + f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN); set_sbi_flag(sbi, SBI_IS_SHUTDOWN); break; case F2FS_GOING_DOWN_METAFLUSH: f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO); - f2fs_stop_checkpoint(sbi, false); + f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN); set_sbi_flag(sbi, SBI_IS_SHUTDOWN); break; case F2FS_GOING_DOWN_NEED_FSCK: @@ -2605,7 +2612,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, struct f2fs_map_blocks map = { .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE, .m_may_create = false }; - struct extent_info ei = {0, 0, 0}; + struct extent_info ei = {}; pgoff_t pg_start, pg_end, next_pgofs; unsigned int blk_per_seg = sbi->blocks_per_seg; unsigned int total = 0, sec_num; @@ -2613,10 +2620,6 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, bool fragmented = false; int err; - /* if in-place-update policy is enabled, don't waste time here */ - if (f2fs_should_update_inplace(inode, NULL)) - return -EINVAL; - pg_start = range->start >> PAGE_SHIFT; pg_end = (range->start + range->len) >> PAGE_SHIFT; @@ -2624,6 +2627,13 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, inode_lock(inode); + /* if in-place-update policy is enabled, don't waste time here */ + set_inode_flag(inode, FI_OPU_WRITE); + if (f2fs_should_update_inplace(inode, NULL)) { + err = -EINVAL; + goto out; + } + /* writeback all dirty pages in the range */ err = filemap_write_and_wait_range(inode->i_mapping, range->start, range->start + range->len - 1); @@ -2634,7 +2644,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, * lookup mapping info in extent cache, skip defragmenting if physical * block addresses are continuous. */ - if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) { + if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) { if (ei.fofs + ei.len >= pg_end) goto out; } @@ -2705,7 +2715,7 @@ do_map: goto check; } - set_inode_flag(inode, FI_DO_DEFRAG); + set_inode_flag(inode, FI_SKIP_WRITES); idx = map.m_lblk; while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) { @@ -2730,15 +2740,16 @@ check: if (map.m_lblk < pg_end && cnt < blk_per_seg) goto do_map; - clear_inode_flag(inode, FI_DO_DEFRAG); + clear_inode_flag(inode, FI_SKIP_WRITES); err = filemap_fdatawrite(inode->i_mapping); if (err) goto out; } clear_out: - clear_inode_flag(inode, FI_DO_DEFRAG); + clear_inode_flag(inode, FI_SKIP_WRITES); out: + clear_inode_flag(inode, FI_OPU_WRITE); inode_unlock(inode); if (!err) range->len = (u64)total << PAGE_SHIFT; @@ -4131,8 +4142,8 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg) goto out; } - if (f2fs_is_mmap_file(inode)) { - ret = -EBUSY; + if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) { + ret = -EINVAL; goto out; } @@ -4203,8 +4214,8 @@ static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg) goto out; } - if (f2fs_is_mmap_file(inode)) { - ret = -EBUSY; + if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) { + ret = -EINVAL; goto out; } diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 78e847010d2e..ab774c6fe035 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -68,7 +68,8 @@ static int gc_thread_func(void *data) if (time_to_inject(sbi, FAULT_CHECKPOINT)) { f2fs_show_injection_info(sbi, FAULT_CHECKPOINT); - f2fs_stop_checkpoint(sbi, false); + f2fs_stop_checkpoint(sbi, false, + STOP_CP_REASON_FAULT_INJECT); } if (!sb_start_write_trylock(sbi->sb)) { @@ -999,7 +1000,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, { struct page *node_page; nid_t nid; - unsigned int ofs_in_node; + unsigned int ofs_in_node, max_addrs; block_t source_blkaddr; nid = le32_to_cpu(sum->nid); @@ -1025,6 +1026,14 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, return false; } + max_addrs = IS_INODE(node_page) ? DEF_ADDRS_PER_INODE : + DEF_ADDRS_PER_BLOCK; + if (ofs_in_node >= max_addrs) { + f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%u, nid:%u, max:%u", + ofs_in_node, dni->ino, dni->nid, max_addrs); + return false; + } + *nofs = ofs_of_node(node_page); source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node); f2fs_put_page(node_page, 1); @@ -1053,7 +1062,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index) struct address_space *mapping = inode->i_mapping; struct dnode_of_data dn; struct page *page; - struct extent_info ei = {0, 0, 0}; + struct extent_info ei = {0, }; struct f2fs_io_info fio = { .sbi = sbi, .ino = inode->i_ino, @@ -1071,7 +1080,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index) if (!page) return -ENOMEM; - if (f2fs_lookup_extent_cache(inode, index, &ei)) { + if (f2fs_lookup_read_extent_cache(inode, index, &ei)) { dn.data_blkaddr = ei.blk + index - ei.fofs; if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, DATA_GENERIC_ENHANCE_READ))) { @@ -1181,7 +1190,8 @@ static int move_data_block(struct inode *inode, block_t bidx, } if (f2fs_is_pinned_file(inode)) { - f2fs_pin_file_control(inode, true); + if (gc_type == FG_GC) + f2fs_pin_file_control(inode, true); err = -EAGAIN; goto out; } @@ -1633,7 +1643,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT", segno, type, GET_SUM_TYPE((&sum->footer))); set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_stop_checkpoint(sbi, false); + f2fs_stop_checkpoint(sbi, false, + STOP_CP_REASON_CORRUPTED_SUMMARY); goto skip; } @@ -1761,23 +1772,31 @@ gc_more: if (sync) goto stop; - if (has_not_enough_free_secs(sbi, sec_freed, 0)) { - if (skipped_round <= MAX_SKIP_GC_COUNT || - skipped_round * 2 < round) { - segno = NULL_SEGNO; - goto gc_more; - } + if (!has_not_enough_free_secs(sbi, sec_freed, 0)) + goto stop; - if (first_skipped < last_skipped && - (last_skipped - first_skipped) > - sbi->skipped_gc_rwsem) { - f2fs_drop_inmem_pages_all(sbi, true); - segno = NULL_SEGNO; - goto gc_more; - } - if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) + if (skipped_round <= MAX_SKIP_GC_COUNT || skipped_round * 2 < round) { + + /* Write checkpoint to reclaim prefree segments */ + if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE && + prefree_segments(sbi) && + !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { ret = f2fs_write_checkpoint(sbi, &cpc); + if (ret) + goto stop; + } + segno = NULL_SEGNO; + goto gc_more; } + if (first_skipped < last_skipped && + (last_skipped - first_skipped) > + sbi->skipped_gc_rwsem) { + f2fs_drop_inmem_pages_all(sbi, true); + segno = NULL_SEGNO; + goto gc_more; + } + if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) + ret = f2fs_write_checkpoint(sbi, &cpc); stop: SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0; SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno; diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c index e3beac546c63..2788ceeaf5c2 100644 --- a/fs/f2fs/hash.c +++ b/fs/f2fs/hash.c @@ -91,7 +91,7 @@ static u32 TEA_hash_name(const u8 *p, size_t len) /* * Compute @fname->hash. For all directories, @fname->disk_name must be set. * For casefolded directories, @fname->usr_fname must be set, and also - * @fname->cf_name if the filename is valid Unicode. + * @fname->cf_name if the filename is valid Unicode and is not "." or "..". */ void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname) { @@ -110,10 +110,11 @@ void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname) /* * If the casefolded name is provided, hash it instead of the * on-disk name. If the casefolded name is *not* provided, that - * should only be because the name wasn't valid Unicode, so fall - * back to treating the name as an opaque byte sequence. Note - * that to handle encrypted directories, the fallback must use - * usr_fname (plaintext) rather than disk_name (ciphertext). + * should only be because the name wasn't valid Unicode or was + * "." or "..", so fall back to treating the name as an opaque + * byte sequence. Note that to handle encrypted directories, + * the fallback must use usr_fname (plaintext) rather than + * disk_name (ciphertext). */ WARN_ON_ONCE(!fname->usr_fname->name); if (fname->cf_name.name) { diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index bb50338a38c9..470b069fa830 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -15,21 +15,40 @@ #include #include -bool f2fs_may_inline_data(struct inode *inode) +static bool support_inline_data(struct inode *inode) { if (f2fs_is_atomic_file(inode)) return false; - if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode)) return false; - if (i_size_read(inode) > MAX_INLINE_DATA(inode)) return false; + return true; +} - if (f2fs_post_read_required(inode)) +bool f2fs_may_inline_data(struct inode *inode) +{ + if (!support_inline_data(inode)) return false; - return true; + return !f2fs_post_read_required(inode); +} + +bool f2fs_sanity_check_inline_data(struct inode *inode) +{ + if (!f2fs_has_inline_data(inode)) + return false; + + if (!support_inline_data(inode)) + return true; + + /* + * used by sanity_check_inode(), when disk layout fields has not + * been synchronized to inmem fields. + */ + return (S_ISREG(inode->i_mode) && + (file_is_encrypt(inode) || file_is_verity(inode) || + (F2FS_I(inode)->i_flags & F2FS_COMPR_FL))); } bool f2fs_may_inline_dentry(struct inode *inode) diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index f93fee11977b..5bf4f1cccd71 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -260,8 +260,8 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page) return false; } - if (F2FS_I(inode)->extent_tree) { - struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest; + if (fi->extent_tree[EX_READ]) { + struct extent_info *ei = &fi->extent_tree[EX_READ]->largest; if (ei->len && (!f2fs_is_valid_blkaddr(sbi, ei->blk, @@ -276,8 +276,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page) } } - if (f2fs_has_inline_data(inode) && - (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) { + if (f2fs_sanity_check_inline_data(inode)) { set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix", __func__, inode->i_ino, inode->i_mode); @@ -381,8 +380,6 @@ static int do_read_inode(struct inode *inode) fi->i_pino = le32_to_cpu(ri->i_pino); fi->i_dir_level = ri->i_dir_level; - f2fs_init_extent_tree(inode, node_page); - get_inline_info(inode, ri); fi->i_extra_isize = f2fs_has_extra_attr(inode) ? @@ -470,6 +467,11 @@ static int do_read_inode(struct inode *inode) F2FS_I(inode)->i_disk_time[1] = inode->i_ctime; F2FS_I(inode)->i_disk_time[2] = inode->i_mtime; F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime; + + /* Need all the flag bits */ + f2fs_init_read_extent_tree(inode, node_page); + f2fs_init_age_extent_tree(inode); + f2fs_put_page(node_page, 1); stat_inc_inline_xattr(inode); @@ -572,7 +574,7 @@ retry: void f2fs_update_inode(struct inode *inode, struct page *node_page) { struct f2fs_inode *ri; - struct extent_tree *et = F2FS_I(inode)->extent_tree; + struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ]; f2fs_wait_on_page_writeback(node_page, NODE, true, true); set_page_dirty(node_page); @@ -591,7 +593,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page) if (et) { read_lock(&et->lock); - set_raw_extent(&et->largest, &ri->i_ext); + set_raw_read_extent(&et->largest, &ri->i_ext); read_unlock(&et->lock); } else { memset(&ri->i_ext, 0, sizeof(ri->i_ext)); @@ -686,7 +688,8 @@ retry: cond_resched(); goto retry; } else if (err != -ENOENT) { - f2fs_stop_checkpoint(sbi, false); + f2fs_stop_checkpoint(sbi, false, + STOP_CP_REASON_UPDATE_INODE); } return; } @@ -764,7 +767,8 @@ void f2fs_evict_inode(struct inode *inode) f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO); f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO); - sb_start_intwrite(inode->i_sb); + if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING)) + sb_start_intwrite(inode->i_sb); set_inode_flag(inode, FI_NO_ALLOC); i_size_write(inode, 0); retry: @@ -780,8 +784,22 @@ retry: f2fs_lock_op(sbi); err = f2fs_remove_inode_page(inode); f2fs_unlock_op(sbi); - if (err == -ENOENT) + if (err == -ENOENT) { err = 0; + + /* + * in fuzzed image, another node may has the same + * block address as inode's, if it was truncated + * previously, truncation of inode node will fail. + */ + if (is_inode_flag_set(inode, FI_DIRTY_INODE)) { + f2fs_warn(F2FS_I_SB(inode), + "f2fs_evict_inode: inconsistent node id, ino:%lu", + inode->i_ino); + f2fs_inode_synced(inode); + set_sbi_flag(sbi, SBI_NEED_FSCK); + } + } } /* give more chances, if ENOMEM case */ @@ -795,7 +813,8 @@ retry: if (dquot_initialize_needed(inode)) set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); } - sb_end_intwrite(inode->i_sb); + if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING)) + sb_end_intwrite(inode->i_sb); no_delete: dquot_drop(inode); diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 3048074618ff..14b2eb8a65e0 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -91,8 +91,6 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) if (test_opt(sbi, INLINE_XATTR)) set_inode_flag(inode, FI_INLINE_XATTR); - if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode)) - set_inode_flag(inode, FI_INLINE_DATA); if (f2fs_may_inline_dentry(inode)) set_inode_flag(inode, FI_INLINE_DENTRY); @@ -107,12 +105,6 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) } F2FS_I(inode)->i_inline_xattr_size = xattr_size; - f2fs_init_extent_tree(inode, NULL); - - stat_inc_inline_xattr(inode); - stat_inc_inline_inode(inode); - stat_inc_inline_dir(inode); - F2FS_I(inode)->i_flags = f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED); @@ -129,8 +121,18 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) set_compress_context(inode); } + /* Should enable inline_data after compression set */ + if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode)) + set_inode_flag(inode, FI_INLINE_DATA); + + stat_inc_inline_xattr(inode); + stat_inc_inline_inode(inode); + stat_inc_inline_dir(inode); + f2fs_set_inode_flags(inode); + f2fs_init_extent_tree(inode); + trace_f2fs_new_inode(inode, 0); return inode; @@ -317,6 +319,9 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode, if (!is_extension_exist(name, ext[i], false)) continue; + /* Do not use inline_data with compression */ + stat_dec_inline_inode(inode); + clear_inode_flag(inode, FI_INLINE_DATA); set_compress_context(inode); return; } @@ -613,6 +618,8 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) goto fail; } f2fs_delete_entry(de, page, dir, inode); + f2fs_unlock_op(sbi); + #ifdef CONFIG_UNICODE /* VFS negative dentries are incompatible with Encoding and * Case-insensitiveness. Eventually we'll want avoid @@ -623,8 +630,6 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) if (IS_CASEFOLDED(dir)) d_invalidate(dentry); #endif - f2fs_unlock_op(sbi); - if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); fail: diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 93fc1116f4a7..230c63c99b55 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -58,7 +58,7 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) avail_ram = val.totalram - val.totalhigh; /* - * give 25%, 25%, 50%, 50%, 50% memory for each components respectively + * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively */ if (type == FREE_NIDS) { mem_size = (nm_i->nid_cnt[FREE_NID] * @@ -83,12 +83,16 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) sizeof(struct ino_entry); mem_size >>= PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); - } else if (type == EXTENT_CACHE) { - mem_size = (atomic_read(&sbi->total_ext_tree) * + } else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) { + enum extent_type etype = type == READ_EXTENT_CACHE ? + EX_READ : EX_BLOCK_AGE; + struct extent_tree_info *eti = &sbi->extent_tree[etype]; + + mem_size = (atomic_read(&eti->total_ext_tree) * sizeof(struct extent_tree) + - atomic_read(&sbi->total_ext_node) * + atomic_read(&eti->total_ext_node) * sizeof(struct extent_node)) >> PAGE_SHIFT; - res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); + res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); } else if (type == INMEM_PAGES) { /* it allows 20% / total_ram for inmemory pages */ mem_size = get_pages(sbi, F2FS_INMEM_PAGES); @@ -846,6 +850,26 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) dn->ofs_in_node = offset[level]; dn->node_page = npage[level]; dn->data_blkaddr = f2fs_data_blkaddr(dn); + + if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) && + f2fs_sb_has_readonly(sbi)) { + unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn); + block_t blkaddr; + + if (!c_len) + goto out; + + blkaddr = f2fs_data_blkaddr(dn); + if (blkaddr == COMPRESS_ADDR) + blkaddr = data_blkaddr(dn->inode, dn->node_page, + dn->ofs_in_node + 1); + + f2fs_update_read_extent_tree_range_compressed(dn->inode, + index, blkaddr, + F2FS_I(dn->inode)->i_cluster_size, + c_len); + } +out: return 0; release_pages: @@ -1274,7 +1298,11 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) dec_valid_node_count(sbi, dn->inode, !ofs); goto fail; } - f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR); + if (unlikely(new_ni.blk_addr != NULL_ADDR)) { + err = -EFSCORRUPTED; + set_sbi_flag(sbi, SBI_NEED_FSCK); + goto fail; + } #endif new_ni.nid = dn->nid; new_ni.ino = dn->inode->i_ino; @@ -1335,8 +1363,8 @@ static int read_node_page(struct page *page, int op_flags) if (err) return err; - if (unlikely(ni.blk_addr == NULL_ADDR) || - is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) { + /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */ + if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) { ClearPageUptodate(page); return -ENOENT; } diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index ff14a6e5ac1c..3de7891c8e79 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -148,7 +148,8 @@ enum mem_type { NAT_ENTRIES, /* indicates the cached nat entry */ DIRTY_DENTS, /* indicates dirty dentry pages */ INO_ENTRIES, /* indicates inode entries */ - EXTENT_CACHE, /* indicates extent cache */ + READ_EXTENT_CACHE, /* indicates read extent cache */ + AGE_EXTENT_CACHE, /* indicates age extent cache */ INMEM_PAGES, /* indicates inmemory pages */ DISCARD_CACHE, /* indicates memory of cached discard cmds */ COMPRESS_PAGE, /* indicates memory of cached compressed pages */ diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 564cec003beb..faadb5a8cfe2 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -451,7 +451,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, struct dnode_of_data tdn = *dn; nid_t ino, nid; struct inode *inode; - unsigned int offset; + unsigned int offset, ofs_in_node, max_addrs; block_t bidx; int i; @@ -478,15 +478,24 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, got_it: /* Use the locked dnode page and inode */ nid = le32_to_cpu(sum.nid); + ofs_in_node = le16_to_cpu(sum.ofs_in_node); + + max_addrs = ADDRS_PER_PAGE(dn->node_page, dn->inode); + if (ofs_in_node >= max_addrs) { + f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%lu, nid:%u, max:%u", + ofs_in_node, dn->inode->i_ino, nid, max_addrs); + return -EFSCORRUPTED; + } + if (dn->inode->i_ino == nid) { tdn.nid = nid; if (!dn->inode_page_locked) lock_page(dn->inode_page); tdn.node_page = dn->inode_page; - tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); + tdn.ofs_in_node = ofs_in_node; goto truncate_out; } else if (dn->nid == nid) { - tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); + tdn.ofs_in_node = ofs_in_node; goto truncate_out; } @@ -676,6 +685,14 @@ retry_prev: goto err; } + if (f2fs_is_valid_blkaddr(sbi, dest, + DATA_GENERIC_ENHANCE_UPDATE)) { + f2fs_err(sbi, "Inconsistent dest blkaddr:%u, ino:%lu, ofs:%u", + dest, inode->i_ino, dn.ofs_in_node); + err = -EFSCORRUPTED; + goto err; + } + /* write dummy data page */ f2fs_replace_block(sbi, &dn, src, dest, ni.version, false, false); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index dfd418c97666..175402ba5c04 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -354,16 +354,19 @@ void f2fs_drop_inmem_page(struct inode *inode, struct page *page) struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct list_head *head = &fi->inmem_pages; struct inmem_pages *cur = NULL; + struct inmem_pages *tmp; f2fs_bug_on(sbi, !page_private_atomic(page)); mutex_lock(&fi->inmem_lock); - list_for_each_entry(cur, head, list) { - if (cur->page == page) + list_for_each_entry(tmp, head, list) { + if (tmp->page == page) { + cur = tmp; break; + } } - f2fs_bug_on(sbi, list_empty(head) || cur->page != page); + f2fs_bug_on(sbi, !cur); list_del(&cur->list); mutex_unlock(&fi->inmem_lock); @@ -496,7 +499,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) { if (time_to_inject(sbi, FAULT_CHECKPOINT)) { f2fs_show_injection_info(sbi, FAULT_CHECKPOINT); - f2fs_stop_checkpoint(sbi, false); + f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT); } /* balance_fs_bg is able to be pending */ @@ -533,8 +536,14 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg) return; /* try to shrink extent cache when there is no enough memory */ - if (!f2fs_available_free_memory(sbi, EXTENT_CACHE)) - f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER); + if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE)) + f2fs_shrink_read_extent_tree(sbi, + READ_EXTENT_CACHE_SHRINK_NUMBER); + + /* try to shrink age extent cache when there is no enough memory */ + if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE)) + f2fs_shrink_age_extent_tree(sbi, + AGE_EXTENT_CACHE_SHRINK_NUMBER); /* check the # of cached NAT entries */ if (!f2fs_available_free_memory(sbi, NAT_ENTRIES)) @@ -570,7 +579,7 @@ do_sync: mutex_lock(&sbi->flush_lock); blk_start_plug(&plug); - f2fs_sync_dirty_inodes(sbi, FILE_INODE); + f2fs_sync_dirty_inodes(sbi, FILE_INODE, NULL); blk_finish_plug(&plug); mutex_unlock(&sbi->flush_lock); @@ -779,8 +788,11 @@ int f2fs_flush_device_cache(struct f2fs_sb_info *sbi) if (!f2fs_test_bit(i, (char *)&sbi->dirty_device)) continue; ret = __submit_flush_wait(sbi, FDEV(i).bdev); - if (ret) + if (ret) { + f2fs_stop_checkpoint(sbi, false, + STOP_CP_REASON_FLUSH_FAIL); break; + } spin_lock(&sbi->dev_lock); f2fs_clear_bit(i, (char *)&sbi->dirty_device); @@ -3286,10 +3298,28 @@ static int __get_segment_type_4(struct f2fs_io_info *fio) } } +static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct extent_info ei = {}; + + if (f2fs_lookup_age_extent_cache(inode, pgofs, &ei)) { + if (!ei.age) + return NO_CHECK_TYPE; + if (ei.age <= sbi->hot_data_age_threshold) + return CURSEG_HOT_DATA; + if (ei.age <= sbi->warm_data_age_threshold) + return CURSEG_WARM_DATA; + return CURSEG_COLD_DATA; + } + return NO_CHECK_TYPE; +} + static int __get_segment_type_6(struct f2fs_io_info *fio) { if (fio->type == DATA) { struct inode *inode = fio->page->mapping->host; + int type; if (is_inode_flag_set(inode, FI_ALIGNED_WRITE)) return CURSEG_COLD_DATA_PINNED; @@ -3304,6 +3334,11 @@ static int __get_segment_type_6(struct f2fs_io_info *fio) } if (file_is_cold(inode) || f2fs_need_compress_data(inode)) return CURSEG_COLD_DATA; + + type = __get_age_segment_type(inode, fio->page->index); + if (type != NO_CHECK_TYPE) + return type; + if (file_is_hot(inode) || is_inode_flag_set(inode, FI_HOT_DATA) || f2fs_is_atomic_file(inode) || @@ -3415,6 +3450,9 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr)); + if (IS_DATASEG(type)) + atomic64_inc(&sbi->allocated_data_blocks); + up_write(&sit_i->sentry_lock); if (page && IS_NODESEG(type)) { @@ -3536,6 +3574,8 @@ void f2fs_outplace_write_data(struct dnode_of_data *dn, struct f2fs_summary sum; f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); + if (fio->io_type == FS_DATA_IO || fio->io_type == FS_CP_DATA_IO) + f2fs_update_age_extent_cache(dn); set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version); do_write_page(&sum, fio); f2fs_update_data_blkaddr(dn, fio->new_blkaddr); @@ -3568,6 +3608,10 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio) goto drop_bio; } + if (fio->post_read) + invalidate_mapping_pages(META_MAPPING(sbi), + fio->new_blkaddr, fio->new_blkaddr); + stat_inc_inplace_blocks(fio->sbi); if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE))) @@ -3747,10 +3791,16 @@ void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr) void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, block_t len) { + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); block_t i; + if (!f2fs_post_read_required(inode)) + return; + for (i = 0; i < len; i++) f2fs_wait_on_block_writeback(inode, blkaddr + i); + + invalidate_mapping_pages(META_MAPPING(sbi), blkaddr, blkaddr + len - 1); } static int read_compacted_summaries(struct f2fs_sb_info *sbi) @@ -4461,7 +4511,7 @@ static int build_sit_entries(struct f2fs_sb_info *sbi) unsigned int i, start, end; unsigned int readed, start_blk = 0; int err = 0; - block_t total_node_blocks = 0; + block_t sit_valid_blocks[2] = {0, 0}; do { readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES, @@ -4486,8 +4536,14 @@ static int build_sit_entries(struct f2fs_sb_info *sbi) if (err) return err; seg_info_from_raw_sit(se, &sit); - if (IS_NODESEG(se->type)) - total_node_blocks += se->valid_blocks; + + if (se->type >= NR_PERSISTENT_LOG) { + f2fs_err(sbi, "Invalid segment type: %u, segno: %u", + se->type, start); + return -EFSCORRUPTED; + } + + sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks; /* build discard map only one time */ if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { @@ -4525,15 +4581,22 @@ static int build_sit_entries(struct f2fs_sb_info *sbi) sit = sit_in_journal(journal, i); old_valid_blocks = se->valid_blocks; - if (IS_NODESEG(se->type)) - total_node_blocks -= old_valid_blocks; + + sit_valid_blocks[SE_PAGETYPE(se)] -= old_valid_blocks; err = check_block_count(sbi, start, &sit); if (err) break; seg_info_from_raw_sit(se, &sit); - if (IS_NODESEG(se->type)) - total_node_blocks += se->valid_blocks; + + if (se->type >= NR_PERSISTENT_LOG) { + f2fs_err(sbi, "Invalid segment type: %u, segno: %u", + se->type, start); + err = -EFSCORRUPTED; + break; + } + + sit_valid_blocks[SE_PAGETYPE(se)] += se->valid_blocks; if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE); @@ -4553,13 +4616,24 @@ static int build_sit_entries(struct f2fs_sb_info *sbi) } up_read(&curseg->journal_rwsem); - if (!err && total_node_blocks != valid_node_count(sbi)) { + if (err) + return err; + + if (sit_valid_blocks[NODE] != valid_node_count(sbi)) { f2fs_err(sbi, "SIT is corrupted node# %u vs %u", - total_node_blocks, valid_node_count(sbi)); - err = -EFSCORRUPTED; + sit_valid_blocks[NODE], valid_node_count(sbi)); + return -EFSCORRUPTED; } - return err; + if (sit_valid_blocks[DATA] + sit_valid_blocks[NODE] > + valid_user_blocks(sbi)) { + f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u", + sit_valid_blocks[DATA], sit_valid_blocks[NODE], + valid_user_blocks(sbi)); + return -EFSCORRUPTED; + } + + return 0; } static void init_free_segmap(struct f2fs_sb_info *sbi) diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 7d9166a2ff41..d31eb2f2019c 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -24,6 +24,7 @@ #define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA) #define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE && (t) <= CURSEG_COLD_NODE) +#define SE_PAGETYPE(se) ((IS_NODESEG((se)->type) ? NODE : DATA)) static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi, unsigned short seg_type) @@ -571,11 +572,10 @@ static inline int reserved_sections(struct f2fs_sb_info *sbi) return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi)); } -static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi) +static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi, + unsigned int node_blocks, unsigned int dent_blocks) { - unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) + - get_pages(sbi, F2FS_DIRTY_DENTS); - unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS); + unsigned int segno, left_blocks; int i; @@ -601,19 +601,28 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi) static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed, int needed) { - int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); - int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); - int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA); + unsigned int total_node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) + + get_pages(sbi, F2FS_DIRTY_DENTS) + + get_pages(sbi, F2FS_DIRTY_IMETA); + unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS); + unsigned int node_secs = total_node_blocks / BLKS_PER_SEC(sbi); + unsigned int dent_secs = total_dent_blocks / BLKS_PER_SEC(sbi); + unsigned int node_blocks = total_node_blocks % BLKS_PER_SEC(sbi); + unsigned int dent_blocks = total_dent_blocks % BLKS_PER_SEC(sbi); + unsigned int free, need_lower, need_upper; if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) return false; - if (free_sections(sbi) + freed == reserved_sections(sbi) + needed && - has_curseg_enough_space(sbi)) + free = free_sections(sbi) + freed; + need_lower = node_secs + dent_secs + reserved_sections(sbi) + needed; + need_upper = need_lower + (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0); + + if (free > need_upper) return false; - return (free_sections(sbi) + freed) <= - (node_secs + 2 * dent_secs + imeta_secs + - reserved_sections(sbi) + needed); + else if (free <= need_lower) + return true; + return !has_curseg_enough_space(sbi, node_blocks, dent_blocks); } static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi) @@ -650,7 +659,9 @@ static inline int utilization(struct f2fs_sb_info *sbi) * pages over min_fsync_blocks. (=default option) * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests. * F2FS_IPU_NOCACHE - disable IPU bio cache. - * F2FS_IPUT_DISABLE - disable IPU. (=default option in LFS mode) + * F2FS_IPU_HONOR_OPU_WRITE - use OPU write prior to IPU write if inode has + * FI_OPU_WRITE flag. + * F2FS_IPU_DISABLE - disable IPU. (=default option in LFS mode) */ #define DEF_MIN_IPU_UTIL 70 #define DEF_MIN_FSYNC_BLOCKS 8 @@ -666,6 +677,7 @@ enum { F2FS_IPU_FSYNC, F2FS_IPU_ASYNC, F2FS_IPU_NOCACHE, + F2FS_IPU_HONOR_OPU_WRITE, }; static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi, diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c index dd3c3c7a90ec..83d6fb97dcae 100644 --- a/fs/f2fs/shrinker.c +++ b/fs/f2fs/shrinker.c @@ -28,10 +28,13 @@ static unsigned long __count_free_nids(struct f2fs_sb_info *sbi) return count > 0 ? count : 0; } -static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi) +static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi, + enum extent_type type) { - return atomic_read(&sbi->total_zombie_tree) + - atomic_read(&sbi->total_ext_node); + struct extent_tree_info *eti = &sbi->extent_tree[type]; + + return atomic_read(&eti->total_zombie_tree) + + atomic_read(&eti->total_ext_node); } unsigned long f2fs_shrink_count(struct shrinker *shrink, @@ -53,8 +56,11 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink, } spin_unlock(&f2fs_list_lock); - /* count extent cache entries */ - count += __count_extent_cache(sbi); + /* count read extent cache entries */ + count += __count_extent_cache(sbi, EX_READ); + + /* count block age extent cache entries */ + count += __count_extent_cache(sbi, EX_BLOCK_AGE); /* count clean nat cache entries */ count += __count_nat_entries(sbi); @@ -100,7 +106,10 @@ unsigned long f2fs_shrink_scan(struct shrinker *shrink, sbi->shrinker_run_no = run_no; /* shrink extent cache entries */ - freed += f2fs_shrink_extent_tree(sbi, nr >> 1); + freed += f2fs_shrink_age_extent_tree(sbi, nr >> 2); + + /* shrink read extent cache entries */ + freed += f2fs_shrink_read_extent_tree(sbi, nr >> 2); /* shrink clean nat cache entries */ if (freed < nr) @@ -130,7 +139,9 @@ void f2fs_join_shrinker(struct f2fs_sb_info *sbi) void f2fs_leave_shrinker(struct f2fs_sb_info *sbi) { - f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi)); + f2fs_shrink_read_extent_tree(sbi, __count_extent_cache(sbi, EX_READ)); + f2fs_shrink_age_extent_tree(sbi, + __count_extent_cache(sbi, EX_BLOCK_AGE)); spin_lock(&f2fs_list_lock); list_del_init(&sbi->s_list); diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 74b0564d8d7d..40ba04016644 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -154,6 +154,8 @@ enum { Opt_atgc, Opt_gc_merge, Opt_nogc_merge, + Opt_memory_mode, + Opt_age_extent_cache, Opt_err, }; @@ -229,6 +231,8 @@ static match_table_t f2fs_tokens = { {Opt_atgc, "atgc"}, {Opt_gc_merge, "gc_merge"}, {Opt_nogc_merge, "nogc_merge"}, + {Opt_memory_mode, "memory=%s"}, + {Opt_age_extent_cache, "age_extent_cache"}, {Opt_err, NULL}, }; @@ -299,10 +303,10 @@ static void f2fs_destroy_casefold_cache(void) { } static inline void limit_reserve_root(struct f2fs_sb_info *sbi) { - block_t limit = min((sbi->user_block_count << 1) / 1000, + block_t limit = min((sbi->user_block_count >> 3), sbi->user_block_count - sbi->reserved_blocks); - /* limit is 0.2% */ + /* limit is 12.5% */ if (test_opt(sbi, RESERVE_ROOT) && F2FS_OPTION(sbi).root_reserved_blocks > limit && F2FS_OPTION(sbi).root_reserved_blocks > MIN_ROOT_RESERVED_BLOCKS) { @@ -754,10 +758,10 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) set_opt(sbi, FASTBOOT); break; case Opt_extent_cache: - set_opt(sbi, EXTENT_CACHE); + set_opt(sbi, READ_EXTENT_CACHE); break; case Opt_noextent_cache: - clear_opt(sbi, EXTENT_CACHE); + clear_opt(sbi, READ_EXTENT_CACHE); break; case Opt_noinline_data: clear_opt(sbi, INLINE_DATA); @@ -1149,6 +1153,25 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) case Opt_nogc_merge: clear_opt(sbi, GC_MERGE); break; + case Opt_age_extent_cache: + set_opt(sbi, AGE_EXTENT_CACHE); + break; + case Opt_memory_mode: + name = match_strdup(&args[0]); + if (!name) + return -ENOMEM; + if (!strcmp(name, "normal")) { + F2FS_OPTION(sbi).memory_mode = + MEMORY_MODE_NORMAL; + } else if (!strcmp(name, "low")) { + F2FS_OPTION(sbi).memory_mode = + MEMORY_MODE_LOW; + } else { + kfree(name); + return -EINVAL; + } + kfree(name); + break; default: f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value", p); @@ -1558,14 +1581,17 @@ static int f2fs_freeze(struct super_block *sb) if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY)) return -EINVAL; - /* ensure no checkpoint required */ - if (!llist_empty(&F2FS_SB(sb)->cprc_info.issue_list)) - return -EINVAL; + /* Let's flush checkpoints and stop the thread. */ + f2fs_flush_ckpt_thread(F2FS_SB(sb)); + + /* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */ + set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING); return 0; } static int f2fs_unfreeze(struct super_block *sb) { + clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING); return 0; } @@ -1818,10 +1844,12 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) seq_puts(seq, ",nobarrier"); if (test_opt(sbi, FASTBOOT)) seq_puts(seq, ",fastboot"); - if (test_opt(sbi, EXTENT_CACHE)) + if (test_opt(sbi, READ_EXTENT_CACHE)) seq_puts(seq, ",extent_cache"); else seq_puts(seq, ",noextent_cache"); + if (test_opt(sbi, AGE_EXTENT_CACHE)) + seq_puts(seq, ",age_extent_cache"); if (test_opt(sbi, DATA_FLUSH)) seq_puts(seq, ",data_flush"); @@ -1895,6 +1923,12 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) if (test_opt(sbi, ATGC)) seq_puts(seq, ",atgc"); + + if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL) + seq_printf(seq, ",memory=%s", "normal"); + else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW) + seq_printf(seq, ",memory=%s", "low"); + return 0; } @@ -1917,13 +1951,14 @@ static void default_options(struct f2fs_sb_info *sbi) F2FS_OPTION(sbi).compress_ext_cnt = 0; F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS; F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; + F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL; sbi->sb->s_flags &= ~SB_INLINECRYPT; set_opt(sbi, INLINE_XATTR); set_opt(sbi, INLINE_DATA); set_opt(sbi, INLINE_DENTRY); - set_opt(sbi, EXTENT_CACHE); + set_opt(sbi, READ_EXTENT_CACHE); set_opt(sbi, NOHEAP); clear_opt(sbi, DISABLE_CHECKPOINT); set_opt(sbi, MERGE_CHECKPOINT); @@ -2029,6 +2064,9 @@ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) f2fs_up_write(&sbi->gc_lock); f2fs_sync_fs(sbi->sb, 1); + + /* Let's ensure there's no pending checkpoint anymore */ + f2fs_flush_ckpt_thread(sbi); } static int f2fs_remount(struct super_block *sb, int *flags, char *data) @@ -2040,7 +2078,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) bool need_restart_gc = false, need_stop_gc = false; bool need_restart_ckpt = false, need_stop_ckpt = false; bool need_restart_flush = false, need_stop_flush = false; - bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE); + bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE); + bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE); bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT); bool no_io_align = !F2FS_IO_ALIGNED(sbi); bool no_atgc = !test_opt(sbi, ATGC); @@ -2130,11 +2169,17 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) } /* disallow enable/disable extent_cache dynamically */ - if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) { + if (no_read_extent_cache == !!test_opt(sbi, READ_EXTENT_CACHE)) { err = -EINVAL; f2fs_warn(sbi, "switch extent_cache option is not allowed"); goto restore_opts; } + /* disallow enable/disable age extent_cache dynamically */ + if (no_age_extent_cache == !!test_opt(sbi, AGE_EXTENT_CACHE)) { + err = -EINVAL; + f2fs_warn(sbi, "switch age_extent_cache option is not allowed"); + goto restore_opts; + } if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) { err = -EINVAL; @@ -2188,6 +2233,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) f2fs_stop_ckpt_thread(sbi); need_restart_ckpt = true; } else { + /* Flush if the prevous checkpoint, if exists. */ + f2fs_flush_ckpt_thread(sbi); + err = f2fs_start_ckpt_thread(sbi); if (err) { f2fs_err(sbi, @@ -2543,7 +2591,8 @@ int f2fs_quota_sync(struct super_block *sb, int type) if (!sb_has_quota_active(sb, cnt)) continue; - inode_lock(dqopt->files[cnt]); + if (!f2fs_sb_has_quota_ino(sbi)) + inode_lock(dqopt->files[cnt]); /* * do_quotactl @@ -2562,7 +2611,8 @@ int f2fs_quota_sync(struct super_block *sb, int type) f2fs_up_read(&sbi->quota_sem); f2fs_unlock_op(sbi); - inode_unlock(dqopt->files[cnt]); + if (!f2fs_sb_has_quota_ino(sbi)) + inode_unlock(dqopt->files[cnt]); if (ret) break; @@ -3635,6 +3685,26 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) return err; } +void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason) +{ + struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); + int err; + + f2fs_bug_on(sbi, reason >= MAX_STOP_REASON); + + f2fs_down_write(&sbi->sb_lock); + + if (raw_super->s_stop_reason[reason] < ((1 << BITS_PER_BYTE) - 1)) + raw_super->s_stop_reason[reason]++; + + err = f2fs_commit_super(sbi, false); + if (err) + f2fs_err(sbi, "f2fs_commit_super fails to record reason:%u err:%d", + reason, err); + + f2fs_up_write(&sbi->sb_lock); +} + static int f2fs_scan_devices(struct f2fs_sb_info *sbi) { struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); @@ -3775,7 +3845,8 @@ static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi) if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) { F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; sm_i->dcc_info->discard_granularity = 1; - sm_i->ipu_policy = 1 << F2FS_IPU_FORCE; + sm_i->ipu_policy = 1 << F2FS_IPU_FORCE | + 1 << F2FS_IPU_HONOR_OPU_WRITE; } sbi->readdir_ra = 1; diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index 23eba7514c9c..a095919c55ad 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -549,6 +549,33 @@ out: return count; } + if (!strcmp(a->attr.name, "hot_data_age_threshold")) { + if (t == 0 || t >= sbi->warm_data_age_threshold) + return -EINVAL; + if (t == *ui) + return count; + *ui = (unsigned int)t; + return count; + } + + if (!strcmp(a->attr.name, "warm_data_age_threshold")) { + if (t == 0 || t <= sbi->hot_data_age_threshold) + return -EINVAL; + if (t == *ui) + return count; + *ui = (unsigned int)t; + return count; + } + + if (!strcmp(a->attr.name, "last_age_weight")) { + if (t > 100) + return -EINVAL; + if (t == *ui) + return count; + *ui = (unsigned int)t; + return count; + } + *ui = (unsigned int)t; return count; @@ -778,6 +805,11 @@ F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_age_threshold, age_threshold); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_segment_mode, gc_segment_mode); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_reclaimed_segments, gc_reclaimed_segs); +/* For block age extent cache */ +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, hot_data_age_threshold, hot_data_age_threshold); +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, warm_data_age_threshold, warm_data_age_threshold); +F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, last_age_weight, last_age_weight); + #define ATTR_LIST(name) (&f2fs_attr_##name.attr) static struct attribute *f2fs_attrs[] = { ATTR_LIST(gc_urgent_sleep_time), @@ -853,6 +885,9 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(atgc_age_threshold), ATTR_LIST(gc_segment_mode), ATTR_LIST(gc_reclaimed_segments), + ATTR_LIST(hot_data_age_threshold), + ATTR_LIST(warm_data_age_threshold), + ATTR_LIST(last_age_weight), NULL, }; ATTRIBUTE_GROUPS(f2fs); diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c index c700fb47e895..4a9db2d89d0f 100644 --- a/fs/f2fs/verity.c +++ b/fs/f2fs/verity.c @@ -261,13 +261,14 @@ static struct page *f2fs_read_merkle_tree_page(struct inode *inode, pgoff_t index, unsigned long num_ra_pages) { - DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index); struct page *page; index += f2fs_verity_metadata_pos(inode) >> PAGE_SHIFT; page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED); if (!page || !PageUptodate(page)) { + DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index); + if (page) put_page(page); else if (num_ra_pages > 1) diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c index f7e3304b7802..353735032947 100644 --- a/fs/fat/fatent.c +++ b/fs/fat/fatent.c @@ -93,7 +93,8 @@ static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent, err_brelse: brelse(bhs[0]); err: - fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr); + fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)", + (llu)blocknr); return -EIO; } @@ -106,8 +107,8 @@ static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent, fatent->fat_inode = MSDOS_SB(sb)->fat_inode; fatent->bhs[0] = sb_bread(sb, blocknr); if (!fatent->bhs[0]) { - fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", - (llu)blocknr); + fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)", + (llu)blocknr); return -EIO; } fatent->nr_bhs = 1; diff --git a/fs/fcntl.c b/fs/fcntl.c index 71b43538fa44..fcf34f83bf6a 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -148,12 +148,17 @@ void f_delown(struct file *filp) pid_t f_getown(struct file *filp) { - pid_t pid; - read_lock(&filp->f_owner.lock); - pid = pid_vnr(filp->f_owner.pid); - if (filp->f_owner.pid_type == PIDTYPE_PGID) - pid = -pid; - read_unlock(&filp->f_owner.lock); + pid_t pid = 0; + + read_lock_irq(&filp->f_owner.lock); + rcu_read_lock(); + if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) { + pid = pid_vnr(filp->f_owner.pid); + if (filp->f_owner.pid_type == PIDTYPE_PGID) + pid = -pid; + } + rcu_read_unlock(); + read_unlock_irq(&filp->f_owner.lock); return pid; } @@ -200,11 +205,14 @@ static int f_setown_ex(struct file *filp, unsigned long arg) static int f_getown_ex(struct file *filp, unsigned long arg) { struct f_owner_ex __user *owner_p = (void __user *)arg; - struct f_owner_ex owner; + struct f_owner_ex owner = {}; int ret = 0; - read_lock(&filp->f_owner.lock); - owner.pid = pid_vnr(filp->f_owner.pid); + read_lock_irq(&filp->f_owner.lock); + rcu_read_lock(); + if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) + owner.pid = pid_vnr(filp->f_owner.pid); + rcu_read_unlock(); switch (filp->f_owner.pid_type) { case PIDTYPE_PID: owner.type = F_OWNER_TID; @@ -223,7 +231,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg) ret = -EINVAL; break; } - read_unlock(&filp->f_owner.lock); + read_unlock_irq(&filp->f_owner.lock); if (!ret) { ret = copy_to_user(owner_p, &owner, sizeof(owner)); @@ -241,10 +249,10 @@ static int f_getowner_uids(struct file *filp, unsigned long arg) uid_t src[2]; int err; - read_lock(&filp->f_owner.lock); + read_lock_irq(&filp->f_owner.lock); src[0] = from_kuid(user_ns, filp->f_owner.uid); src[1] = from_kuid(user_ns, filp->f_owner.euid); - read_unlock(&filp->f_owner.lock); + read_unlock_irq(&filp->f_owner.lock); err = put_user(src[0], &dst[0]); err |= put_user(src[1], &dst[1]); diff --git a/fs/file.c b/fs/file.c index 8431dfde036c..97a0cd31faec 100644 --- a/fs/file.c +++ b/fs/file.c @@ -22,6 +22,8 @@ #include #include +#include "internal.h" + unsigned int sysctl_nr_open __read_mostly = 1024*1024; unsigned int sysctl_nr_open_min = BITS_PER_LONG; /* our min() is unusable in constant expressions ;-/ */ @@ -780,9 +782,8 @@ int __close_range(unsigned fd, unsigned max_fd, unsigned int flags) } /* - * variant of __close_fd that gets a ref on the file for later fput. - * The caller must ensure that filp_close() called on the file, and then - * an fput(). + * See close_fd_get_file() below, this variant assumes current->files->file_lock + * is held. */ int __close_fd_get_file(unsigned int fd, struct file **res) { @@ -790,26 +791,39 @@ int __close_fd_get_file(unsigned int fd, struct file **res) struct file *file; struct fdtable *fdt; - spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) - goto out_unlock; + goto out_err; file = fdt->fd[fd]; if (!file) - goto out_unlock; + goto out_err; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); - spin_unlock(&files->file_lock); get_file(file); *res = file; return 0; - -out_unlock: - spin_unlock(&files->file_lock); +out_err: *res = NULL; return -ENOENT; } +/* + * variant of close_fd that gets a ref on the file for later fput. + * The caller must ensure that filp_close() called on the file, and then + * an fput(). + */ +int close_fd_get_file(unsigned int fd, struct file **res) +{ + struct files_struct *files = current->files; + int ret; + + spin_lock(&files->file_lock); + ret = __close_fd_get_file(fd, res); + spin_unlock(&files->file_lock); + + return ret; +} + void do_close_on_exec(struct files_struct *files) { unsigned i; diff --git a/fs/file_table.c b/fs/file_table.c index 709ada3151da..7a3b4a7f6808 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -376,6 +376,7 @@ void __fput_sync(struct file *file) } EXPORT_SYMBOL(fput); +EXPORT_SYMBOL(__fput_sync); void __init files_init(void) { diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index d51354221fe7..87bf2ffc7c7f 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -1650,11 +1650,12 @@ static long writeback_sb_inodes(struct super_block *sb, }; unsigned long start_time = jiffies; long write_chunk; - long wrote = 0; /* count both pages and inodes */ + long total_wrote = 0; /* count both pages and inodes */ while (!list_empty(&wb->b_io)) { struct inode *inode = wb_inode(wb->b_io.prev); struct bdi_writeback *tmp_wb; + long wrote; if (inode->i_sb != sb) { if (work->sb) { @@ -1730,7 +1731,9 @@ static long writeback_sb_inodes(struct super_block *sb, wbc_detach_inode(&wbc); work->nr_pages -= write_chunk - wbc.nr_to_write; - wrote += write_chunk - wbc.nr_to_write; + wrote = write_chunk - wbc.nr_to_write - wbc.pages_skipped; + wrote = wrote < 0 ? 0 : wrote; + total_wrote += wrote; if (need_resched()) { /* @@ -1752,7 +1755,7 @@ static long writeback_sb_inodes(struct super_block *sb, tmp_wb = inode_to_wb_and_lock_list(inode); spin_lock(&inode->i_lock); if (!(inode->i_state & I_DIRTY_ALL)) - wrote++; + total_wrote++; requeue_inode(inode, tmp_wb, &wbc); inode_sync_complete(inode); spin_unlock(&inode->i_lock); @@ -1766,14 +1769,14 @@ static long writeback_sb_inodes(struct super_block *sb, * bail out to wb_writeback() often enough to check * background threshold and other termination conditions. */ - if (wrote) { + if (total_wrote) { if (time_is_before_jiffies(start_time + HZ / 10UL)) break; if (work->nr_pages <= 0) break; } } - return wrote; + return total_wrote; } static long __writeback_inodes_wb(struct bdi_writeback *wb, diff --git a/fs/fuse/control.c b/fs/fuse/control.c index cc7e94d73c6c..24b4d9db231d 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c @@ -275,7 +275,7 @@ int fuse_ctl_add_conn(struct fuse_conn *fc) struct dentry *parent; char name[32]; - if (!fuse_control_sb) + if (!fuse_control_sb || fc->no_control) return 0; parent = fuse_control_sb->s_root; @@ -313,7 +313,7 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc) { int i; - if (!fuse_control_sb) + if (!fuse_control_sb || fc->no_control) return; for (i = fc->ctl_ndents - 1; i >= 0; i--) { diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 42e8bcc011f0..fbd7a8b4e90c 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -208,10 +208,13 @@ static unsigned int fuse_req_hash(u64 unique) /** * A new request is available, wake fiq->waitq */ -static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq) +static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq, bool sync) __releases(fiq->lock) { - wake_up(&fiq->waitq); + if (sync) + wake_up_sync(&fiq->waitq); + else + wake_up(&fiq->waitq); kill_fasync(&fiq->fasync, SIGIO, POLL_IN); spin_unlock(&fiq->lock); } @@ -224,14 +227,14 @@ const struct fuse_iqueue_ops fuse_dev_fiq_ops = { EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops); static void queue_request_and_unlock(struct fuse_iqueue *fiq, - struct fuse_req *req) + struct fuse_req *req, bool sync) __releases(fiq->lock) { req->in.h.len = sizeof(struct fuse_in_header) + fuse_len_args(req->args->in_numargs, (struct fuse_arg *) req->args->in_args); list_add_tail(&req->list, &fiq->pending); - fiq->ops->wake_pending_and_unlock(fiq); + fiq->ops->wake_pending_and_unlock(fiq, sync); } void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, @@ -246,7 +249,7 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, if (fiq->connected) { fiq->forget_list_tail->next = forget; fiq->forget_list_tail = forget; - fiq->ops->wake_forget_and_unlock(fiq); + fiq->ops->wake_forget_and_unlock(fiq, false); } else { kfree(forget); spin_unlock(&fiq->lock); @@ -266,7 +269,7 @@ static void flush_bg_queue(struct fuse_conn *fc) fc->active_background++; spin_lock(&fiq->lock); req->in.h.unique = fuse_get_unique(fiq); - queue_request_and_unlock(fiq, req); + queue_request_and_unlock(fiq, req, false); } } @@ -359,7 +362,7 @@ static int queue_interrupt(struct fuse_req *req) spin_unlock(&fiq->lock); return 0; } - fiq->ops->wake_interrupt_and_unlock(fiq); + fiq->ops->wake_interrupt_and_unlock(fiq, false); } else { spin_unlock(&fiq->lock); } @@ -426,7 +429,7 @@ static void __fuse_request_send(struct fuse_req *req) /* acquire extra reference, since request is still needed after fuse_request_end() */ __fuse_get_request(req); - queue_request_and_unlock(fiq, req); + queue_request_and_unlock(fiq, req, true); request_wait_answer(req); /* Pairs with smp_wmb() in fuse_request_end() */ @@ -601,7 +604,7 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm, spin_lock(&fiq->lock); if (fiq->connected) { - queue_request_and_unlock(fiq, req); + queue_request_and_unlock(fiq, req, false); } else { err = -ENODEV; spin_unlock(&fiq->lock); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 895d608b6ab5..b3730a21516a 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -205,7 +205,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) if (inode && fuse_is_bad(inode)) goto invalid; else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) || - (flags & LOOKUP_REVAL)) { + (flags & (LOOKUP_EXCL | LOOKUP_REVAL))) { struct fuse_entry_out outarg; FUSE_ARGS(args); struct fuse_forget_link *forget; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index a68c8310942f..34dd6466b7a6 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -3284,10 +3284,9 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, .mode = mode }; int err; - bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) || - (mode & FALLOC_FL_PUNCH_HOLE); - - bool block_faults = FUSE_IS_DAX(inode) && lock_inode; + bool block_faults = FUSE_IS_DAX(inode) && + (!(mode & FALLOC_FL_KEEP_SIZE) || + (mode & FALLOC_FL_PUNCH_HOLE)); if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; @@ -3295,22 +3294,20 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, if (fm->fc->no_fallocate) return -EOPNOTSUPP; - if (lock_inode) { - inode_lock(inode); - if (block_faults) { - down_write(&fi->i_mmap_sem); - err = fuse_dax_break_layouts(inode, 0, 0); - if (err) - goto out; - } + inode_lock(inode); + if (block_faults) { + down_write(&fi->i_mmap_sem); + err = fuse_dax_break_layouts(inode, 0, 0); + if (err) + goto out; + } - if (mode & FALLOC_FL_PUNCH_HOLE) { - loff_t endbyte = offset + length - 1; + if (mode & FALLOC_FL_PUNCH_HOLE) { + loff_t endbyte = offset + length - 1; - err = fuse_writeback_range(inode, offset, endbyte); - if (err) - goto out; - } + err = fuse_writeback_range(inode, offset, endbyte); + if (err) + goto out; } if (!(mode & FALLOC_FL_KEEP_SIZE) && @@ -3320,6 +3317,10 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, goto out; } + err = file_modified(file); + if (err) + goto out; + if (!(mode & FALLOC_FL_KEEP_SIZE)) set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); @@ -3356,8 +3357,7 @@ out: if (block_faults) up_write(&fi->i_mmap_sem); - if (lock_inode) - inode_unlock(inode); + inode_unlock(inode); fuse_flush_time_update(inode); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 69a631c12b15..e20c341864b5 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -412,19 +412,19 @@ struct fuse_iqueue_ops { /** * Signal that a forget has been queued */ - void (*wake_forget_and_unlock)(struct fuse_iqueue *fiq) + void (*wake_forget_and_unlock)(struct fuse_iqueue *fiq, bool sync) __releases(fiq->lock); /** * Signal that an INTERRUPT request has been queued */ - void (*wake_interrupt_and_unlock)(struct fuse_iqueue *fiq) + void (*wake_interrupt_and_unlock)(struct fuse_iqueue *fiq, bool sync) __releases(fiq->lock); /** * Signal that a request has been queued */ - void (*wake_pending_and_unlock)(struct fuse_iqueue *fiq) + void (*wake_pending_and_unlock)(struct fuse_iqueue *fiq, bool sync) __releases(fiq->lock); /** diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index b6222bdaea80..50c595bf8ee2 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -183,6 +183,12 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, inode->i_uid = make_kuid(fc->user_ns, attr->uid); inode->i_gid = make_kgid(fc->user_ns, attr->gid); inode->i_blocks = attr->blocks; + + /* Sanitize nsecs */ + attr->atimensec = min_t(u32, attr->atimensec, NSEC_PER_SEC - 1); + attr->mtimensec = min_t(u32, attr->mtimensec, NSEC_PER_SEC - 1); + attr->ctimensec = min_t(u32, attr->ctimensec, NSEC_PER_SEC - 1); + inode->i_atime.tv_sec = attr->atime; inode->i_atime.tv_nsec = attr->atimensec; /* mtime from server may be stale due to local buffered write */ diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c index bc267832310c..d5294e663df5 100644 --- a/fs/fuse/readdir.c +++ b/fs/fuse/readdir.c @@ -77,8 +77,10 @@ static void fuse_add_dirent_to_cache(struct file *file, goto unlock; addr = kmap_atomic(page); - if (!offset) + if (!offset) { clear_page(addr); + SetPageUptodate(page); + } memcpy(addr + offset, dirent, reclen); kunmap_atomic(addr); fi->rdc.size = (index << PAGE_SHIFT) + offset + reclen; @@ -516,6 +518,12 @@ retry_locked: page = find_get_page_flags(file->f_mapping, index, FGP_ACCESSED | FGP_LOCK); + /* Page gone missing, then re-added to cache, but not initialized? */ + if (page && !PageUptodate(page)) { + unlock_page(page); + put_page(page); + page = NULL; + } spin_lock(&fi->rdc.lock); if (!page) { /* diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index b9cfb1165ff4..90a574ba92cf 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -971,7 +971,7 @@ static struct virtio_driver virtio_fs_driver = { #endif }; -static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq) +static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq, bool sync) __releases(fiq->lock) { struct fuse_forget_link *link; @@ -1006,7 +1006,8 @@ __releases(fiq->lock) kfree(link); } -static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq) +static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq, + bool sync) __releases(fiq->lock) { /* @@ -1221,7 +1222,8 @@ out: return ret; } -static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq) +static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq, + bool sync) __releases(fiq->lock) { unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */ diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index b34c02985d9d..b4fde3a8eeb4 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1235,13 +1235,12 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length, if (length != written && (iomap->flags & IOMAP_F_NEW)) { /* Deallocate blocks that were just allocated. */ - loff_t blockmask = i_blocksize(inode) - 1; - loff_t end = (pos + length) & ~blockmask; + loff_t hstart = round_up(pos + written, i_blocksize(inode)); + loff_t hend = iomap->offset + iomap->length; - pos = (pos + written + blockmask) & ~blockmask; - if (pos < end) { - truncate_pagecache_range(inode, pos, end - 1); - punch_hole(ip, pos, end - pos); + if (hstart < hend) { + truncate_pagecache_range(inode, hstart, hend - 1); + punch_hole(ip, hstart, hend - hstart); } } @@ -2200,7 +2199,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize) ret = do_shrink(inode, newsize); out: - gfs2_rs_delete(ip, NULL); + gfs2_rs_delete(ip); gfs2_qa_put(ip); return ret; } diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index cfd9d03f604f..55a8eb3c1963 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -717,7 +717,8 @@ static int gfs2_release(struct inode *inode, struct file *file) file->private_data = NULL; if (file->f_mode & FMODE_WRITE) { - gfs2_rs_delete(ip, &inode->i_writecount); + if (gfs2_rs_active(&ip->i_res)) + gfs2_rs_delete(ip); gfs2_qa_put(ip); } return 0; @@ -857,14 +858,16 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to) return ret; iocb->ki_flags &= ~IOCB_DIRECT; } + pagefault_disable(); iocb->ki_flags |= IOCB_NOIO; ret = generic_file_read_iter(iocb, to); iocb->ki_flags &= ~IOCB_NOIO; + pagefault_enable(); if (ret >= 0) { if (!iov_iter_count(to)) return ret; written = ret; - } else { + } else if (ret != -EFAULT) { if (ret != -EAGAIN) return ret; if (iocb->ki_flags & IOCB_NOWAIT) diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 65ae4fc28ede..74a6b0800e05 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -811,7 +811,7 @@ fail_free_inode: if (free_vfs_inode) /* else evict will do the put for us */ gfs2_glock_put(ip->i_gl); } - gfs2_rs_delete(ip, NULL); + gfs2_rs_deltree(&ip->i_res); gfs2_qa_put(ip); fail_free_acls: posix_acl_release(default_acl); diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index b9ed6a6dbcf5..648f7336043f 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -182,7 +182,10 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent) pr_warn("Invalid superblock size\n"); return -EINVAL; } - + if (sb->sb_bsize_shift != ffs(sb->sb_bsize) - 1) { + pr_warn("Invalid block size shift\n"); + return -EINVAL; + } return 0; } @@ -381,8 +384,10 @@ static int init_names(struct gfs2_sbd *sdp, int silent) if (!table[0]) table = sdp->sd_vfs->s_id; - strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN); - strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN); + BUILD_BUG_ON(GFS2_LOCKNAME_LEN > GFS2_FSNAME_LEN); + + strscpy(sdp->sd_proto_name, proto, GFS2_LOCKNAME_LEN); + strscpy(sdp->sd_table_name, table, GFS2_LOCKNAME_LEN); table = sdp->sd_table_name; while ((table = strchr(table, '/'))) @@ -1414,13 +1419,13 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param) switch (o) { case Opt_lockproto: - strlcpy(args->ar_lockproto, param->string, GFS2_LOCKNAME_LEN); + strscpy(args->ar_lockproto, param->string, GFS2_LOCKNAME_LEN); break; case Opt_locktable: - strlcpy(args->ar_locktable, param->string, GFS2_LOCKNAME_LEN); + strscpy(args->ar_locktable, param->string, GFS2_LOCKNAME_LEN); break; case Opt_hostdata: - strlcpy(args->ar_hostdata, param->string, GFS2_LOCKNAME_LEN); + strscpy(args->ar_hostdata, param->string, GFS2_LOCKNAME_LEN); break; case Opt_spectator: args->ar_spectator = 1; diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 6e173ae378c4..ad953ecb5853 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -531,34 +531,42 @@ static void qdsb_put(struct gfs2_quota_data *qd) */ int gfs2_qa_get(struct gfs2_inode *ip) { - int error = 0; struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); + struct inode *inode = &ip->i_inode; if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) return 0; - down_write(&ip->i_rw_mutex); + spin_lock(&inode->i_lock); if (ip->i_qadata == NULL) { - ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS); - if (!ip->i_qadata) { - error = -ENOMEM; - goto out; - } + struct gfs2_qadata *tmp; + + spin_unlock(&inode->i_lock); + tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS); + if (!tmp) + return -ENOMEM; + + spin_lock(&inode->i_lock); + if (ip->i_qadata == NULL) + ip->i_qadata = tmp; + else + kmem_cache_free(gfs2_qadata_cachep, tmp); } ip->i_qadata->qa_ref++; -out: - up_write(&ip->i_rw_mutex); - return error; + spin_unlock(&inode->i_lock); + return 0; } void gfs2_qa_put(struct gfs2_inode *ip) { - down_write(&ip->i_rw_mutex); + struct inode *inode = &ip->i_inode; + + spin_lock(&inode->i_lock); if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) { kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata); ip->i_qadata = NULL; } - up_write(&ip->i_rw_mutex); + spin_unlock(&inode->i_lock); } int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index eb775e93de97..c5bde789a16d 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -664,13 +664,14 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs) /** * gfs2_rs_delete - delete a multi-block reservation * @ip: The inode for this reservation - * @wcount: The inode's write count, or NULL * */ -void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount) +void gfs2_rs_delete(struct gfs2_inode *ip) { + struct inode *inode = &ip->i_inode; + down_write(&ip->i_rw_mutex); - if ((wcount == NULL) || (atomic_read(wcount) <= 1)) + if (atomic_read(&inode->i_writecount) <= 1) gfs2_rs_deltree(&ip->i_res); up_write(&ip->i_rw_mutex); } @@ -905,15 +906,15 @@ static int read_rindex_entry(struct gfs2_inode *ip) rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes); spin_lock_init(&rgd->rd_rsspin); - error = compute_bitstructs(rgd); - if (error) - goto fail; - error = gfs2_glock_get(sdp, rgd->rd_addr, &gfs2_rgrp_glops, CREATE, &rgd->rd_gl); if (error) goto fail; + error = compute_bitstructs(rgd); + if (error) + goto fail_glock; + rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr; rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED); if (rgd->rd_data > sdp->sd_max_rg_data) @@ -927,6 +928,7 @@ static int read_rindex_entry(struct gfs2_inode *ip) } error = 0; /* someone else read in the rgrp; free it and ignore it */ +fail_glock: gfs2_glock_put(rgd->rd_gl); fail: diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h index 9a587ada51ed..2d3c150c55bd 100644 --- a/fs/gfs2/rgrp.h +++ b/fs/gfs2/rgrp.h @@ -45,7 +45,7 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n, bool dinode, u64 *generation); extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs); -extern void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount); +extern void gfs2_rs_delete(struct gfs2_inode *ip); extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd, u64 bstart, u32 blen, int meta); extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd, diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index d2b7ecbd1b15..d14b98aa1c3e 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1434,7 +1434,7 @@ out: truncate_inode_pages_final(&inode->i_data); if (ip->i_qadata) gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0); - gfs2_rs_delete(ip, NULL); + gfs2_rs_deltree(&ip->i_res); gfs2_ordered_del_inode(ip); clear_inode(inode); gfs2_dir_hash_inval(ip); diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 5fc9ccab907c..a2f43f1a85f8 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -206,7 +206,7 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, info.flags = 0; info.length = len; info.low_limit = current->mm->mmap_base; - info.high_limit = TASK_SIZE; + info.high_limit = arch_get_mmap_end(addr); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; return vm_unmapped_area(&info); @@ -222,7 +222,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); - info.high_limit = current->mm->mmap_base; + info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; addr = vm_unmapped_area(&info); @@ -237,7 +237,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = current->mm->mmap_base; - info.high_limit = TASK_SIZE; + info.high_limit = arch_get_mmap_end(addr); addr = vm_unmapped_area(&info); } @@ -251,6 +251,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct hstate *h = hstate_file(file); + const unsigned long mmap_end = arch_get_mmap_end(addr); if (len & ~huge_page_mask(h)) return -EINVAL; @@ -266,7 +267,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && + if (mmap_end - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/fs/incfs/vfs.c b/fs/incfs/vfs.c index 776640451f6f..342998f24090 100644 --- a/fs/incfs/vfs.c +++ b/fs/incfs/vfs.c @@ -1592,6 +1592,10 @@ static int incfs_setattr(struct dentry *dentry, struct iattr *ia) if (ia->ia_valid & ATTR_SIZE) return -EINVAL; + if ((ia->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) && + (ia->ia_valid & ATTR_MODE)) + return -EINVAL; + if (!di) return -EINVAL; backing_dentry = di->backing_path.dentry; diff --git a/fs/inode.c b/fs/inode.c index 9246236bcdca..d9c748b0fb89 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -168,8 +168,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode) inode->i_wb_frn_history = 0; #endif - if (security_inode_alloc(inode)) - goto out; spin_lock_init(&inode->i_lock); lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); @@ -202,11 +200,12 @@ int inode_init_always(struct super_block *sb, struct inode *inode) inode->i_fsnotify_mask = 0; #endif inode->i_flctx = NULL; + + if (unlikely(security_inode_alloc(inode))) + return -ENOMEM; this_cpu_inc(nr_inodes); return 0; -out: - return -ENOMEM; } EXPORT_SYMBOL(inode_init_always); diff --git a/fs/internal.h b/fs/internal.h index 5155f6ce95c7..06d313b9beec 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -77,6 +77,8 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *, long do_rmdir(int dfd, struct filename *name); long do_unlinkat(int dfd, struct filename *name); int may_linkat(struct path *link); +int do_renameat2(int olddfd, struct filename *oldname, int newdfd, + struct filename *newname, unsigned int flags); /* * namespace.c @@ -132,6 +134,7 @@ extern struct file *do_file_open_root(struct dentry *, struct vfsmount *, const char *, const struct open_flags *); extern struct open_how build_open_how(int flags, umode_t mode); extern int build_open_flags(const struct open_how *how, struct open_flags *op); +extern int __close_fd_get_file(unsigned int fd, struct file **res); long do_sys_ftruncate(unsigned int fd, loff_t length, int small); int chmod_common(const struct path *path, umode_t mode); diff --git a/fs/io-wq.c b/fs/io-wq.c deleted file mode 100644 index 3d5fc76b92d0..000000000000 --- a/fs/io-wq.c +++ /dev/null @@ -1,1242 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Basic worker thread pool for io_uring - * - * Copyright (C) 2019 Jens Axboe - * - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../kernel/sched/sched.h" -#include "io-wq.h" - -#define WORKER_IDLE_TIMEOUT (5 * HZ) - -enum { - IO_WORKER_F_UP = 1, /* up and active */ - IO_WORKER_F_RUNNING = 2, /* account as running */ - IO_WORKER_F_FREE = 4, /* worker on free list */ - IO_WORKER_F_FIXED = 8, /* static idle worker */ - IO_WORKER_F_BOUND = 16, /* is doing bounded work */ -}; - -enum { - IO_WQ_BIT_EXIT = 0, /* wq exiting */ - IO_WQ_BIT_CANCEL = 1, /* cancel work on list */ - IO_WQ_BIT_ERROR = 2, /* error on setup */ -}; - -enum { - IO_WQE_FLAG_STALLED = 1, /* stalled on hash */ -}; - -/* - * One for each thread in a wqe pool - */ -struct io_worker { - refcount_t ref; - unsigned flags; - struct hlist_nulls_node nulls_node; - struct list_head all_list; - struct task_struct *task; - struct io_wqe *wqe; - - struct io_wq_work *cur_work; - spinlock_t lock; - - struct rcu_head rcu; - struct mm_struct *mm; -#ifdef CONFIG_BLK_CGROUP - struct cgroup_subsys_state *blkcg_css; -#endif - const struct cred *cur_creds; - const struct cred *saved_creds; - struct files_struct *restore_files; - struct nsproxy *restore_nsproxy; - struct fs_struct *restore_fs; -}; - -#if BITS_PER_LONG == 64 -#define IO_WQ_HASH_ORDER 6 -#else -#define IO_WQ_HASH_ORDER 5 -#endif - -#define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER) - -struct io_wqe_acct { - unsigned nr_workers; - unsigned max_workers; - atomic_t nr_running; -}; - -enum { - IO_WQ_ACCT_BOUND, - IO_WQ_ACCT_UNBOUND, -}; - -/* - * Per-node worker thread pool - */ -struct io_wqe { - struct { - raw_spinlock_t lock; - struct io_wq_work_list work_list; - unsigned long hash_map; - unsigned flags; - } ____cacheline_aligned_in_smp; - - int node; - struct io_wqe_acct acct[2]; - - struct hlist_nulls_head free_list; - struct list_head all_list; - - struct io_wq *wq; - struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS]; -}; - -/* - * Per io_wq state - */ -struct io_wq { - struct io_wqe **wqes; - unsigned long state; - - free_work_fn *free_work; - io_wq_work_fn *do_work; - - struct task_struct *manager; - struct user_struct *user; - refcount_t refs; - struct completion done; - - struct hlist_node cpuhp_node; - - refcount_t use_refs; -}; - -static enum cpuhp_state io_wq_online; - -static bool io_worker_get(struct io_worker *worker) -{ - return refcount_inc_not_zero(&worker->ref); -} - -static void io_worker_release(struct io_worker *worker) -{ - if (refcount_dec_and_test(&worker->ref)) - wake_up_process(worker->task); -} - -/* - * Note: drops the wqe->lock if returning true! The caller must re-acquire - * the lock in that case. Some callers need to restart handling if this - * happens, so we can't just re-acquire the lock on behalf of the caller. - */ -static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker) -{ - bool dropped_lock = false; - - if (worker->saved_creds) { - revert_creds(worker->saved_creds); - worker->cur_creds = worker->saved_creds = NULL; - } - - if (current->files != worker->restore_files) { - __acquire(&wqe->lock); - raw_spin_unlock_irq(&wqe->lock); - dropped_lock = true; - - task_lock(current); - current->files = worker->restore_files; - current->nsproxy = worker->restore_nsproxy; - task_unlock(current); - } - - if (current->fs != worker->restore_fs) - current->fs = worker->restore_fs; - - /* - * If we have an active mm, we need to drop the wq lock before unusing - * it. If we do, return true and let the caller retry the idle loop. - */ - if (worker->mm) { - if (!dropped_lock) { - __acquire(&wqe->lock); - raw_spin_unlock_irq(&wqe->lock); - dropped_lock = true; - } - __set_current_state(TASK_RUNNING); - kthread_unuse_mm(worker->mm); - mmput(worker->mm); - worker->mm = NULL; - } - -#ifdef CONFIG_BLK_CGROUP - if (worker->blkcg_css) { - kthread_associate_blkcg(NULL); - worker->blkcg_css = NULL; - } -#endif - if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY) - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; - return dropped_lock; -} - -static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe, - struct io_wq_work *work) -{ - if (work->flags & IO_WQ_WORK_UNBOUND) - return &wqe->acct[IO_WQ_ACCT_UNBOUND]; - - return &wqe->acct[IO_WQ_ACCT_BOUND]; -} - -static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe, - struct io_worker *worker) -{ - if (worker->flags & IO_WORKER_F_BOUND) - return &wqe->acct[IO_WQ_ACCT_BOUND]; - - return &wqe->acct[IO_WQ_ACCT_UNBOUND]; -} - -static void io_worker_exit(struct io_worker *worker) -{ - struct io_wqe *wqe = worker->wqe; - struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); - - /* - * If we're not at zero, someone else is holding a brief reference - * to the worker. Wait for that to go away. - */ - set_current_state(TASK_INTERRUPTIBLE); - if (!refcount_dec_and_test(&worker->ref)) - schedule(); - __set_current_state(TASK_RUNNING); - - preempt_disable(); - current->flags &= ~PF_IO_WORKER; - if (worker->flags & IO_WORKER_F_RUNNING) - atomic_dec(&acct->nr_running); - if (!(worker->flags & IO_WORKER_F_BOUND)) - atomic_dec(&wqe->wq->user->processes); - worker->flags = 0; - preempt_enable(); - - raw_spin_lock_irq(&wqe->lock); - hlist_nulls_del_rcu(&worker->nulls_node); - list_del_rcu(&worker->all_list); - if (__io_worker_unuse(wqe, worker)) { - __release(&wqe->lock); - raw_spin_lock_irq(&wqe->lock); - } - acct->nr_workers--; - raw_spin_unlock_irq(&wqe->lock); - - kfree_rcu(worker, rcu); - if (refcount_dec_and_test(&wqe->wq->refs)) - complete(&wqe->wq->done); -} - -static inline bool io_wqe_run_queue(struct io_wqe *wqe) - __must_hold(wqe->lock) -{ - if (!wq_list_empty(&wqe->work_list) && - !(wqe->flags & IO_WQE_FLAG_STALLED)) - return true; - return false; -} - -/* - * Check head of free list for an available worker. If one isn't available, - * caller must wake up the wq manager to create one. - */ -static bool io_wqe_activate_free_worker(struct io_wqe *wqe) - __must_hold(RCU) -{ - struct hlist_nulls_node *n; - struct io_worker *worker; - - n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list)); - if (is_a_nulls(n)) - return false; - - worker = hlist_nulls_entry(n, struct io_worker, nulls_node); - if (io_worker_get(worker)) { - wake_up_process(worker->task); - io_worker_release(worker); - return true; - } - - return false; -} - -/* - * We need a worker. If we find a free one, we're good. If not, and we're - * below the max number of workers, wake up the manager to create one. - */ -static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) -{ - bool ret; - - /* - * Most likely an attempt to queue unbounded work on an io_wq that - * wasn't setup with any unbounded workers. - */ - if (unlikely(!acct->max_workers)) - pr_warn_once("io-wq is not configured for unbound workers"); - - rcu_read_lock(); - ret = io_wqe_activate_free_worker(wqe); - rcu_read_unlock(); - - if (!ret && acct->nr_workers < acct->max_workers) - wake_up_process(wqe->wq->manager); -} - -static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker) -{ - struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); - - atomic_inc(&acct->nr_running); -} - -static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker) - __must_hold(wqe->lock) -{ - struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker); - - if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe)) - io_wqe_wake_worker(wqe, acct); -} - -static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker) -{ - allow_kernel_signal(SIGINT); - - current->flags |= PF_IO_WORKER; - - worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); - worker->restore_files = current->files; - worker->restore_nsproxy = current->nsproxy; - worker->restore_fs = current->fs; - io_wqe_inc_running(wqe, worker); -} - -/* - * Worker will start processing some work. Move it to the busy list, if - * it's currently on the freelist - */ -static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker, - struct io_wq_work *work) - __must_hold(wqe->lock) -{ - bool worker_bound, work_bound; - - if (worker->flags & IO_WORKER_F_FREE) { - worker->flags &= ~IO_WORKER_F_FREE; - hlist_nulls_del_init_rcu(&worker->nulls_node); - } - - /* - * If worker is moving from bound to unbound (or vice versa), then - * ensure we update the running accounting. - */ - worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0; - work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0; - if (worker_bound != work_bound) { - io_wqe_dec_running(wqe, worker); - if (work_bound) { - worker->flags |= IO_WORKER_F_BOUND; - wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--; - wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++; - atomic_dec(&wqe->wq->user->processes); - } else { - worker->flags &= ~IO_WORKER_F_BOUND; - wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++; - wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--; - atomic_inc(&wqe->wq->user->processes); - } - io_wqe_inc_running(wqe, worker); - } -} - -/* - * No work, worker going to sleep. Move to freelist, and unuse mm if we - * have one attached. Dropping the mm may potentially sleep, so we drop - * the lock in that case and return success. Since the caller has to - * retry the loop in that case (we changed task state), we don't regrab - * the lock if we return success. - */ -static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) - __must_hold(wqe->lock) -{ - if (!(worker->flags & IO_WORKER_F_FREE)) { - worker->flags |= IO_WORKER_F_FREE; - hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); - } - - return __io_worker_unuse(wqe, worker); -} - -static inline unsigned int io_get_work_hash(struct io_wq_work *work) -{ - return work->flags >> IO_WQ_HASH_SHIFT; -} - -static struct io_wq_work *io_get_next_work(struct io_wqe *wqe) - __must_hold(wqe->lock) -{ - struct io_wq_work_node *node, *prev; - struct io_wq_work *work, *tail; - unsigned int hash; - - wq_list_for_each(node, prev, &wqe->work_list) { - work = container_of(node, struct io_wq_work, list); - - /* not hashed, can run anytime */ - if (!io_wq_is_hashed(work)) { - wq_list_del(&wqe->work_list, node, prev); - return work; - } - - /* hashed, can run if not already running */ - hash = io_get_work_hash(work); - if (!(wqe->hash_map & BIT(hash))) { - wqe->hash_map |= BIT(hash); - /* all items with this hash lie in [work, tail] */ - tail = wqe->hash_tail[hash]; - wqe->hash_tail[hash] = NULL; - wq_list_cut(&wqe->work_list, &tail->list, prev); - return work; - } - } - - return NULL; -} - -static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work) -{ - if (worker->mm) { - kthread_unuse_mm(worker->mm); - mmput(worker->mm); - worker->mm = NULL; - } - - if (mmget_not_zero(work->identity->mm)) { - kthread_use_mm(work->identity->mm); - worker->mm = work->identity->mm; - return; - } - - /* failed grabbing mm, ensure work gets cancelled */ - work->flags |= IO_WQ_WORK_CANCEL; -} - -static inline void io_wq_switch_blkcg(struct io_worker *worker, - struct io_wq_work *work) -{ -#ifdef CONFIG_BLK_CGROUP - if (!(work->flags & IO_WQ_WORK_BLKCG)) - return; - if (work->identity->blkcg_css != worker->blkcg_css) { - kthread_associate_blkcg(work->identity->blkcg_css); - worker->blkcg_css = work->identity->blkcg_css; - } -#endif -} - -static void io_wq_switch_creds(struct io_worker *worker, - struct io_wq_work *work) -{ - const struct cred *old_creds = override_creds(work->identity->creds); - - worker->cur_creds = work->identity->creds; - if (worker->saved_creds) - put_cred(old_creds); /* creds set by previous switch */ - else - worker->saved_creds = old_creds; -} - -static void io_impersonate_work(struct io_worker *worker, - struct io_wq_work *work) -{ - if ((work->flags & IO_WQ_WORK_FILES) && - current->files != work->identity->files) { - task_lock(current); - current->files = work->identity->files; - current->nsproxy = work->identity->nsproxy; - task_unlock(current); - if (!work->identity->files) { - /* failed grabbing files, ensure work gets cancelled */ - work->flags |= IO_WQ_WORK_CANCEL; - } - } - if ((work->flags & IO_WQ_WORK_FS) && current->fs != work->identity->fs) - current->fs = work->identity->fs; - if ((work->flags & IO_WQ_WORK_MM) && work->identity->mm != worker->mm) - io_wq_switch_mm(worker, work); - if ((work->flags & IO_WQ_WORK_CREDS) && - worker->cur_creds != work->identity->creds) - io_wq_switch_creds(worker, work); - if (work->flags & IO_WQ_WORK_FSIZE) - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->identity->fsize; - else if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY) - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; - io_wq_switch_blkcg(worker, work); -#ifdef CONFIG_AUDIT - current->loginuid = work->identity->loginuid; - current->sessionid = work->identity->sessionid; -#endif -} - -static void io_assign_current_work(struct io_worker *worker, - struct io_wq_work *work) -{ - if (work) { - /* flush pending signals before assigning new work */ - if (signal_pending(current)) - flush_signals(current); - cond_resched(); - } - -#ifdef CONFIG_AUDIT - current->loginuid = KUIDT_INIT(AUDIT_UID_UNSET); - current->sessionid = AUDIT_SID_UNSET; -#endif - - spin_lock_irq(&worker->lock); - worker->cur_work = work; - spin_unlock_irq(&worker->lock); -} - -static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work); - -static void io_worker_handle_work(struct io_worker *worker) - __releases(wqe->lock) -{ - struct io_wqe *wqe = worker->wqe; - struct io_wq *wq = wqe->wq; - - do { - struct io_wq_work *work; -get_next: - /* - * If we got some work, mark us as busy. If we didn't, but - * the list isn't empty, it means we stalled on hashed work. - * Mark us stalled so we don't keep looking for work when we - * can't make progress, any work completion or insertion will - * clear the stalled flag. - */ - work = io_get_next_work(wqe); - if (work) - __io_worker_busy(wqe, worker, work); - else if (!wq_list_empty(&wqe->work_list)) - wqe->flags |= IO_WQE_FLAG_STALLED; - - raw_spin_unlock_irq(&wqe->lock); - if (!work) - break; - io_assign_current_work(worker, work); - - /* handle a whole dependent link */ - do { - struct io_wq_work *old_work, *next_hashed, *linked; - unsigned int hash = io_get_work_hash(work); - - next_hashed = wq_next_work(work); - io_impersonate_work(worker, work); - /* - * OK to set IO_WQ_WORK_CANCEL even for uncancellable - * work, the worker function will do the right thing. - */ - if (test_bit(IO_WQ_BIT_CANCEL, &wq->state)) - work->flags |= IO_WQ_WORK_CANCEL; - - old_work = work; - linked = wq->do_work(work); - - work = next_hashed; - if (!work && linked && !io_wq_is_hashed(linked)) { - work = linked; - linked = NULL; - } - io_assign_current_work(worker, work); - wq->free_work(old_work); - - if (linked) - io_wqe_enqueue(wqe, linked); - - if (hash != -1U && !next_hashed) { - raw_spin_lock_irq(&wqe->lock); - wqe->hash_map &= ~BIT_ULL(hash); - wqe->flags &= ~IO_WQE_FLAG_STALLED; - /* skip unnecessary unlock-lock wqe->lock */ - if (!work) - goto get_next; - raw_spin_unlock_irq(&wqe->lock); - } - } while (work); - - raw_spin_lock_irq(&wqe->lock); - } while (1); -} - -static int io_wqe_worker(void *data) -{ - struct io_worker *worker = data; - struct io_wqe *wqe = worker->wqe; - struct io_wq *wq = wqe->wq; - - io_worker_start(wqe, worker); - - while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { - set_current_state(TASK_INTERRUPTIBLE); -loop: - raw_spin_lock_irq(&wqe->lock); - if (io_wqe_run_queue(wqe)) { - __set_current_state(TASK_RUNNING); - io_worker_handle_work(worker); - goto loop; - } - /* drops the lock on success, retry */ - if (__io_worker_idle(wqe, worker)) { - __release(&wqe->lock); - goto loop; - } - raw_spin_unlock_irq(&wqe->lock); - if (signal_pending(current)) - flush_signals(current); - if (schedule_timeout(WORKER_IDLE_TIMEOUT)) - continue; - /* timed out, exit unless we're the fixed worker */ - if (test_bit(IO_WQ_BIT_EXIT, &wq->state) || - !(worker->flags & IO_WORKER_F_FIXED)) - break; - } - - if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) { - raw_spin_lock_irq(&wqe->lock); - if (!wq_list_empty(&wqe->work_list)) - io_worker_handle_work(worker); - else - raw_spin_unlock_irq(&wqe->lock); - } - - io_worker_exit(worker); - return 0; -} - -/* - * Called when a worker is scheduled in. Mark us as currently running. - */ -void io_wq_worker_running(struct task_struct *tsk) -{ - struct io_worker *worker = kthread_data(tsk); - struct io_wqe *wqe = worker->wqe; - - if (!(worker->flags & IO_WORKER_F_UP)) - return; - if (worker->flags & IO_WORKER_F_RUNNING) - return; - worker->flags |= IO_WORKER_F_RUNNING; - io_wqe_inc_running(wqe, worker); -} - -/* - * Called when worker is going to sleep. If there are no workers currently - * running and we have work pending, wake up a free one or have the manager - * set one up. - */ -void io_wq_worker_sleeping(struct task_struct *tsk) -{ - struct io_worker *worker = kthread_data(tsk); - struct io_wqe *wqe = worker->wqe; - - if (!(worker->flags & IO_WORKER_F_UP)) - return; - if (!(worker->flags & IO_WORKER_F_RUNNING)) - return; - - worker->flags &= ~IO_WORKER_F_RUNNING; - - raw_spin_lock_irq(&wqe->lock); - io_wqe_dec_running(wqe, worker); - raw_spin_unlock_irq(&wqe->lock); -} - -static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) -{ - struct io_wqe_acct *acct = &wqe->acct[index]; - struct io_worker *worker; - - worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node); - if (!worker) - return false; - - refcount_set(&worker->ref, 1); - worker->nulls_node.pprev = NULL; - worker->wqe = wqe; - spin_lock_init(&worker->lock); - - worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node, - "io_wqe_worker-%d/%d", index, wqe->node); - if (IS_ERR(worker->task)) { - kfree(worker); - return false; - } - kthread_bind_mask(worker->task, cpumask_of_node(wqe->node)); - - raw_spin_lock_irq(&wqe->lock); - hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); - list_add_tail_rcu(&worker->all_list, &wqe->all_list); - worker->flags |= IO_WORKER_F_FREE; - if (index == IO_WQ_ACCT_BOUND) - worker->flags |= IO_WORKER_F_BOUND; - if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND)) - worker->flags |= IO_WORKER_F_FIXED; - acct->nr_workers++; - raw_spin_unlock_irq(&wqe->lock); - - if (index == IO_WQ_ACCT_UNBOUND) - atomic_inc(&wq->user->processes); - - refcount_inc(&wq->refs); - wake_up_process(worker->task); - return true; -} - -static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index) - __must_hold(wqe->lock) -{ - struct io_wqe_acct *acct = &wqe->acct[index]; - - /* if we have available workers or no work, no need */ - if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe)) - return false; - return acct->nr_workers < acct->max_workers; -} - -static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data) -{ - send_sig(SIGINT, worker->task, 1); - return false; -} - -/* - * Iterate the passed in list and call the specific function for each - * worker that isn't exiting - */ -static bool io_wq_for_each_worker(struct io_wqe *wqe, - bool (*func)(struct io_worker *, void *), - void *data) -{ - struct io_worker *worker; - bool ret = false; - - list_for_each_entry_rcu(worker, &wqe->all_list, all_list) { - if (io_worker_get(worker)) { - /* no task if node is/was offline */ - if (worker->task) - ret = func(worker, data); - io_worker_release(worker); - if (ret) - break; - } - } - - return ret; -} - -static bool io_wq_worker_wake(struct io_worker *worker, void *data) -{ - wake_up_process(worker->task); - return false; -} - -/* - * Manager thread. Tasked with creating new workers, if we need them. - */ -static int io_wq_manager(void *data) -{ - struct io_wq *wq = data; - int node; - - /* create fixed workers */ - refcount_set(&wq->refs, 1); - for_each_node(node) { - if (!node_online(node)) - continue; - if (create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND)) - continue; - set_bit(IO_WQ_BIT_ERROR, &wq->state); - set_bit(IO_WQ_BIT_EXIT, &wq->state); - goto out; - } - - complete(&wq->done); - - while (!kthread_should_stop()) { - if (current->task_works) - task_work_run(); - - for_each_node(node) { - struct io_wqe *wqe = wq->wqes[node]; - bool fork_worker[2] = { false, false }; - - if (!node_online(node)) - continue; - - raw_spin_lock_irq(&wqe->lock); - if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND)) - fork_worker[IO_WQ_ACCT_BOUND] = true; - if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND)) - fork_worker[IO_WQ_ACCT_UNBOUND] = true; - raw_spin_unlock_irq(&wqe->lock); - if (fork_worker[IO_WQ_ACCT_BOUND]) - create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND); - if (fork_worker[IO_WQ_ACCT_UNBOUND]) - create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND); - } - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(HZ); - } - - if (current->task_works) - task_work_run(); - -out: - if (refcount_dec_and_test(&wq->refs)) { - complete(&wq->done); - return 0; - } - /* if ERROR is set and we get here, we have workers to wake */ - if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) { - rcu_read_lock(); - for_each_node(node) - io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL); - rcu_read_unlock(); - } - return 0; -} - -static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct, - struct io_wq_work *work) -{ - bool free_worker; - - if (!(work->flags & IO_WQ_WORK_UNBOUND)) - return true; - if (atomic_read(&acct->nr_running)) - return true; - - rcu_read_lock(); - free_worker = !hlist_nulls_empty(&wqe->free_list); - rcu_read_unlock(); - if (free_worker) - return true; - - if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers && - !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN))) - return false; - - return true; -} - -static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe) -{ - struct io_wq *wq = wqe->wq; - - do { - struct io_wq_work *old_work = work; - - work->flags |= IO_WQ_WORK_CANCEL; - work = wq->do_work(work); - wq->free_work(old_work); - } while (work); -} - -static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work) -{ - unsigned int hash; - struct io_wq_work *tail; - - if (!io_wq_is_hashed(work)) { -append: - wq_list_add_tail(&work->list, &wqe->work_list); - return; - } - - hash = io_get_work_hash(work); - tail = wqe->hash_tail[hash]; - wqe->hash_tail[hash] = work; - if (!tail) - goto append; - - wq_list_add_after(&work->list, &tail->list, &wqe->work_list); -} - -static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) -{ - struct io_wqe_acct *acct = io_work_get_acct(wqe, work); - bool do_wake; - unsigned long flags; - - /* - * Do early check to see if we need a new unbound worker, and if we do, - * if we're allowed to do so. This isn't 100% accurate as there's a - * gap between this check and incrementing the value, but that's OK. - * It's close enough to not be an issue, fork() has the same delay. - */ - if (unlikely(!io_wq_can_queue(wqe, acct, work))) { - io_run_cancel(work, wqe); - return; - } - - raw_spin_lock_irqsave(&wqe->lock, flags); - io_wqe_insert_work(wqe, work); - wqe->flags &= ~IO_WQE_FLAG_STALLED; - do_wake = (work->flags & IO_WQ_WORK_CONCURRENT) || - !atomic_read(&acct->nr_running); - raw_spin_unlock_irqrestore(&wqe->lock, flags); - - if (do_wake) - io_wqe_wake_worker(wqe, acct); -} - -void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) -{ - struct io_wqe *wqe = wq->wqes[numa_node_id()]; - - io_wqe_enqueue(wqe, work); -} - -/* - * Work items that hash to the same value will not be done in parallel. - * Used to limit concurrent writes, generally hashed by inode. - */ -void io_wq_hash_work(struct io_wq_work *work, void *val) -{ - unsigned int bit; - - bit = hash_ptr(val, IO_WQ_HASH_ORDER); - work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT)); -} - -void io_wq_cancel_all(struct io_wq *wq) -{ - int node; - - set_bit(IO_WQ_BIT_CANCEL, &wq->state); - - rcu_read_lock(); - for_each_node(node) { - struct io_wqe *wqe = wq->wqes[node]; - - io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL); - } - rcu_read_unlock(); -} - -struct io_cb_cancel_data { - work_cancel_fn *fn; - void *data; - int nr_running; - int nr_pending; - bool cancel_all; -}; - -static bool io_wq_worker_cancel(struct io_worker *worker, void *data) -{ - struct io_cb_cancel_data *match = data; - unsigned long flags; - - /* - * Hold the lock to avoid ->cur_work going out of scope, caller - * may dereference the passed in work. - */ - spin_lock_irqsave(&worker->lock, flags); - if (worker->cur_work && - !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) && - match->fn(worker->cur_work, match->data)) { - send_sig(SIGINT, worker->task, 1); - match->nr_running++; - } - spin_unlock_irqrestore(&worker->lock, flags); - - return match->nr_running && !match->cancel_all; -} - -static inline void io_wqe_remove_pending(struct io_wqe *wqe, - struct io_wq_work *work, - struct io_wq_work_node *prev) -{ - unsigned int hash = io_get_work_hash(work); - struct io_wq_work *prev_work = NULL; - - if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) { - if (prev) - prev_work = container_of(prev, struct io_wq_work, list); - if (prev_work && io_get_work_hash(prev_work) == hash) - wqe->hash_tail[hash] = prev_work; - else - wqe->hash_tail[hash] = NULL; - } - wq_list_del(&wqe->work_list, &work->list, prev); -} - -static void io_wqe_cancel_pending_work(struct io_wqe *wqe, - struct io_cb_cancel_data *match) -{ - struct io_wq_work_node *node, *prev; - struct io_wq_work *work; - unsigned long flags; - -retry: - raw_spin_lock_irqsave(&wqe->lock, flags); - wq_list_for_each(node, prev, &wqe->work_list) { - work = container_of(node, struct io_wq_work, list); - if (!match->fn(work, match->data)) - continue; - io_wqe_remove_pending(wqe, work, prev); - raw_spin_unlock_irqrestore(&wqe->lock, flags); - io_run_cancel(work, wqe); - match->nr_pending++; - if (!match->cancel_all) - return; - - /* not safe to continue after unlock */ - goto retry; - } - raw_spin_unlock_irqrestore(&wqe->lock, flags); -} - -static void io_wqe_cancel_running_work(struct io_wqe *wqe, - struct io_cb_cancel_data *match) -{ - rcu_read_lock(); - io_wq_for_each_worker(wqe, io_wq_worker_cancel, match); - rcu_read_unlock(); -} - -enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, - void *data, bool cancel_all) -{ - struct io_cb_cancel_data match = { - .fn = cancel, - .data = data, - .cancel_all = cancel_all, - }; - int node; - - /* - * First check pending list, if we're lucky we can just remove it - * from there. CANCEL_OK means that the work is returned as-new, - * no completion will be posted for it. - */ - for_each_node(node) { - struct io_wqe *wqe = wq->wqes[node]; - - io_wqe_cancel_pending_work(wqe, &match); - if (match.nr_pending && !match.cancel_all) - return IO_WQ_CANCEL_OK; - } - - /* - * Now check if a free (going busy) or busy worker has the work - * currently running. If we find it there, we'll return CANCEL_RUNNING - * as an indication that we attempt to signal cancellation. The - * completion will run normally in this case. - */ - for_each_node(node) { - struct io_wqe *wqe = wq->wqes[node]; - - io_wqe_cancel_running_work(wqe, &match); - if (match.nr_running && !match.cancel_all) - return IO_WQ_CANCEL_RUNNING; - } - - if (match.nr_running) - return IO_WQ_CANCEL_RUNNING; - if (match.nr_pending) - return IO_WQ_CANCEL_OK; - return IO_WQ_CANCEL_NOTFOUND; -} - -struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) -{ - int ret = -ENOMEM, node; - struct io_wq *wq; - - if (WARN_ON_ONCE(!data->free_work || !data->do_work)) - return ERR_PTR(-EINVAL); - if (WARN_ON_ONCE(!bounded)) - return ERR_PTR(-EINVAL); - - wq = kzalloc(sizeof(*wq), GFP_KERNEL); - if (!wq) - return ERR_PTR(-ENOMEM); - - wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL); - if (!wq->wqes) - goto err_wq; - - ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node); - if (ret) - goto err_wqes; - - wq->free_work = data->free_work; - wq->do_work = data->do_work; - - /* caller must already hold a reference to this */ - wq->user = data->user; - - ret = -ENOMEM; - for_each_node(node) { - struct io_wqe *wqe; - int alloc_node = node; - - if (!node_online(alloc_node)) - alloc_node = NUMA_NO_NODE; - wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node); - if (!wqe) - goto err; - wq->wqes[node] = wqe; - wqe->node = alloc_node; - wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; - atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0); - if (wq->user) { - wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = - task_rlimit(current, RLIMIT_NPROC); - } - atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0); - wqe->wq = wq; - raw_spin_lock_init(&wqe->lock); - INIT_WQ_LIST(&wqe->work_list); - INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0); - INIT_LIST_HEAD(&wqe->all_list); - } - - init_completion(&wq->done); - - wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager"); - if (!IS_ERR(wq->manager)) { - wake_up_process(wq->manager); - wait_for_completion(&wq->done); - if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) { - ret = -ENOMEM; - goto err; - } - refcount_set(&wq->use_refs, 1); - reinit_completion(&wq->done); - return wq; - } - - ret = PTR_ERR(wq->manager); - complete(&wq->done); -err: - cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); - for_each_node(node) - kfree(wq->wqes[node]); -err_wqes: - kfree(wq->wqes); -err_wq: - kfree(wq); - return ERR_PTR(ret); -} - -bool io_wq_get(struct io_wq *wq, struct io_wq_data *data) -{ - if (data->free_work != wq->free_work || data->do_work != wq->do_work) - return false; - - return refcount_inc_not_zero(&wq->use_refs); -} - -static void __io_wq_destroy(struct io_wq *wq) -{ - int node; - - cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); - - set_bit(IO_WQ_BIT_EXIT, &wq->state); - if (wq->manager) - kthread_stop(wq->manager); - - rcu_read_lock(); - for_each_node(node) - io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL); - rcu_read_unlock(); - - wait_for_completion(&wq->done); - - for_each_node(node) - kfree(wq->wqes[node]); - kfree(wq->wqes); - kfree(wq); -} - -void io_wq_destroy(struct io_wq *wq) -{ - if (refcount_dec_and_test(&wq->use_refs)) - __io_wq_destroy(wq); -} - -struct task_struct *io_wq_get_task(struct io_wq *wq) -{ - return wq->manager; -} - -static bool io_wq_worker_affinity(struct io_worker *worker, void *data) -{ - struct task_struct *task = worker->task; - struct rq_flags rf; - struct rq *rq; - - rq = task_rq_lock(task, &rf); - do_set_cpus_allowed(task, cpumask_of_node(worker->wqe->node)); - task->flags |= PF_NO_SETAFFINITY; - task_rq_unlock(rq, task, &rf); - return false; -} - -static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node) -{ - struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node); - int i; - - rcu_read_lock(); - for_each_node(i) - io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, NULL); - rcu_read_unlock(); - return 0; -} - -static __init int io_wq_init(void) -{ - int ret; - - ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online", - io_wq_cpu_online, NULL); - if (ret < 0) - return ret; - io_wq_online = ret; - return 0; -} -subsys_initcall(io_wq_init); diff --git a/fs/ioctl.c b/fs/ioctl.c index 32d8bd3f958b..ac3b386d99d5 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -170,7 +170,7 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo, if (*len == 0) return -EINVAL; - if (start > maxbytes) + if (start >= maxbytes) return -EFBIG; /* diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index caed9d98c64a..d88dc9da040e 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -528,7 +528,8 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) * write started inside the existing inode size. */ if (pos + len > i_size) - truncate_pagecache_range(inode, max(pos, i_size), pos + len); + truncate_pagecache_range(inode, max(pos, i_size), + pos + len - 1); } static int @@ -1458,13 +1459,6 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data) PF_MEMALLOC)) goto redirty; - /* - * Given that we do not allow direct reclaim to call us, we should - * never be called in a recursive filesystem reclaim context. - */ - if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS)) - goto redirty; - /* * Is this page beyond the end of the file? * diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index b121d7d434c6..fa24b407a9dc 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -501,7 +501,6 @@ void jbd2_journal_commit_transaction(journal_t *journal) } spin_unlock(&commit_transaction->t_handle_lock); commit_transaction->t_state = T_SWITCH; - write_unlock(&journal->j_state_lock); J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <= journal->j_max_transaction_buffers); @@ -521,6 +520,8 @@ void jbd2_journal_commit_transaction(journal_t *journal) * has reserved. This is consistent with the existing behaviour * that multiple jbd2_journal_get_write_access() calls to the same * buffer are perfectly permissible. + * We use journal->j_state_lock here to serialize processing of + * t_reserved_list with eviction of buffers from journal_unmap_buffer(). */ while (commit_transaction->t_reserved_list) { jh = commit_transaction->t_reserved_list; @@ -540,6 +541,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) jbd2_journal_refile_buffer(journal, jh); } + write_unlock(&journal->j_state_lock); /* * Now try to drop any written-back buffers from the journal's * checkpoint lists. We do this *before* commit because it potentially @@ -562,13 +564,13 @@ void jbd2_journal_commit_transaction(journal_t *journal) */ jbd2_journal_switch_revoke_table(journal); + write_lock(&journal->j_state_lock); /* * Reserved credits cannot be claimed anymore, free them */ atomic_sub(atomic_read(&journal->j_reserved_credits), &commit_transaction->t_outstanding_credits); - write_lock(&journal->j_state_lock); trace_jbd2_commit_flushing(journal, commit_transaction); stats.run.rs_flushing = jiffies; stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked, @@ -579,7 +581,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) journal->j_running_transaction = NULL; start_time = ktime_get(); commit_transaction->t_log_start = journal->j_head; - wake_up(&journal->j_wait_transaction_locked); + wake_up_all(&journal->j_wait_transaction_locked); write_unlock(&journal->j_state_lock); jbd_debug(3, "JBD2: commit phase 2a\n"); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 099e4319851d..a07621321465 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -924,10 +924,16 @@ int jbd2_fc_wait_bufs(journal_t *journal, int num_blks) for (i = j_fc_off - 1; i >= j_fc_off - num_blks; i--) { bh = journal->j_fc_wbuf[i]; wait_on_buffer(bh); + /* + * Update j_fc_off so jbd2_fc_release_bufs can release remain + * buffer head. + */ + if (unlikely(!buffer_uptodate(bh))) { + journal->j_fc_off = i + 1; + return -EIO; + } put_bh(bh); journal->j_fc_wbuf[i] = NULL; - if (unlikely(!buffer_uptodate(bh))) - return -EIO; } return 0; diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 1e07dfac4d81..1ae1697fe99b 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -256,6 +256,7 @@ static int fc_do_one_pass(journal_t *journal, err = journal->j_fc_replay_callback(journal, bh, pass, next_fc_block - journal->j_fc_first, expected_commit_id); + brelse(bh); next_fc_block++; if (err < 0 || err == JBD2_FC_REPLAY_STOP) break; diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index e8fc45fd751f..86472212cce1 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -173,7 +173,7 @@ static void wait_transaction_locked(journal_t *journal) int need_to_start; tid_t tid = journal->j_running_transaction->t_tid; - prepare_to_wait(&journal->j_wait_transaction_locked, &wait, + prepare_to_wait_exclusive(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); @@ -199,7 +199,7 @@ static void wait_transaction_switching(journal_t *journal) read_unlock(&journal->j_state_lock); return; } - prepare_to_wait(&journal->j_wait_transaction_locked, &wait, + prepare_to_wait_exclusive(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); read_unlock(&journal->j_state_lock); /* @@ -894,7 +894,7 @@ void jbd2_journal_unlock_updates (journal_t *journal) write_lock(&journal->j_state_lock); --journal->j_barrier_count; write_unlock(&journal->j_state_lock); - wake_up(&journal->j_wait_transaction_locked); + wake_up_all(&journal->j_wait_transaction_locked); } static void warn_dirty_buffer(struct buffer_head *bh) @@ -1460,8 +1460,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) struct journal_head *jh; int ret = 0; - if (is_handle_aborted(handle)) - return -EROFS; if (!buffer_jbd(bh)) return -EUCLEAN; @@ -1508,6 +1506,18 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) journal = transaction->t_journal; spin_lock(&jh->b_state_lock); + if (is_handle_aborted(handle)) { + /* + * Check journal aborting with @jh->b_state_lock locked, + * since 'jh->b_transaction' could be replaced with + * 'jh->b_next_transaction' during old transaction + * committing if journal aborted, which may fail + * assertion on 'jh->b_frozen_data == NULL'. + */ + ret = -EROFS; + goto out_unlock_bh; + } + if (jh->b_modified == 0) { /* * This buffer's got modified and becoming part diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 7170de78cd26..db210989784d 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -603,6 +603,7 @@ out_root: jffs2_free_raw_node_refs(c); kvfree(c->blocks); jffs2_clear_xattr_subsystem(c); + jffs2_sum_exit(c); out_inohash: kfree(c->inocache_list); out_wbuf: diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index b0eb9c85eea0..980aa3300f10 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -146,12 +146,13 @@ void jfs_evict_inode(struct inode *inode) dquot_initialize(inode); if (JFS_IP(inode)->fileset == FILESYSTEM_I) { + struct inode *ipimap = JFS_SBI(inode->i_sb)->ipimap; truncate_inode_pages_final(&inode->i_data); if (test_cflag(COMMIT_Freewmap, inode)) jfs_free_zero_link(inode); - if (JFS_SBI(inode->i_sb)->ipimap) + if (ipimap && JFS_IP(ipimap)->i_imap) diFree(inode); /* diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index e58ae29a223d..0ce17ea8fa8a 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -385,7 +385,8 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks) } /* write the last buffer. */ - write_metapage(mp); + if (mp) + write_metapage(mp); IREAD_UNLOCK(ipbmap); diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 9aec80b9d7c6..8b3c86a502da 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -19,7 +19,15 @@ DEFINE_MUTEX(kernfs_mutex); static DEFINE_SPINLOCK(kernfs_rename_lock); /* kn->parent and ->name */ -static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by rename_lock */ +/* + * Don't use rename_lock to piggy back on pr_cont_buf. We don't want to + * call pr_cont() while holding rename_lock. Because sometimes pr_cont() + * will perform wakeups when releasing console_sem. Holding rename_lock + * will introduce deadlock if the scheduler reads the kernfs_name in the + * wakeup path. + */ +static DEFINE_SPINLOCK(kernfs_pr_cont_lock); +static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by pr_cont_lock */ static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */ #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb) @@ -230,12 +238,12 @@ void pr_cont_kernfs_name(struct kernfs_node *kn) { unsigned long flags; - spin_lock_irqsave(&kernfs_rename_lock, flags); + spin_lock_irqsave(&kernfs_pr_cont_lock, flags); - kernfs_name_locked(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf)); + kernfs_name(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf)); pr_cont("%s", kernfs_pr_cont_buf); - spin_unlock_irqrestore(&kernfs_rename_lock, flags); + spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags); } /** @@ -249,10 +257,10 @@ void pr_cont_kernfs_path(struct kernfs_node *kn) unsigned long flags; int sz; - spin_lock_irqsave(&kernfs_rename_lock, flags); + spin_lock_irqsave(&kernfs_pr_cont_lock, flags); - sz = kernfs_path_from_node_locked(kn, NULL, kernfs_pr_cont_buf, - sizeof(kernfs_pr_cont_buf)); + sz = kernfs_path_from_node(kn, NULL, kernfs_pr_cont_buf, + sizeof(kernfs_pr_cont_buf)); if (sz < 0) { pr_cont("(error)"); goto out; @@ -266,7 +274,7 @@ void pr_cont_kernfs_path(struct kernfs_node *kn) pr_cont("%s", kernfs_pr_cont_buf); out: - spin_unlock_irqrestore(&kernfs_rename_lock, flags); + spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags); } /** @@ -864,13 +872,12 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent, lockdep_assert_held(&kernfs_mutex); - /* grab kernfs_rename_lock to piggy back on kernfs_pr_cont_buf */ - spin_lock_irq(&kernfs_rename_lock); + spin_lock_irq(&kernfs_pr_cont_lock); len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf)); if (len >= sizeof(kernfs_pr_cont_buf)) { - spin_unlock_irq(&kernfs_rename_lock); + spin_unlock_irq(&kernfs_pr_cont_lock); return NULL; } @@ -882,7 +889,7 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent, parent = kernfs_find_ns(parent, name, ns); } - spin_unlock_irq(&kernfs_rename_lock); + spin_unlock_irq(&kernfs_pr_cont_lock); return parent; } @@ -1512,8 +1519,11 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, mutex_lock(&kernfs_mutex); kn = kernfs_find_ns(parent, name, ns); - if (kn) + if (kn) { + kernfs_get(kn); __kernfs_remove(kn); + kernfs_put(kn); + } mutex_unlock(&kernfs_mutex); diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 8a9d4a28290d..09222efe77d2 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -446,7 +446,8 @@ static const struct address_space_operations minix_aops = { .writepage = minix_writepage, .write_begin = minix_write_begin, .write_end = generic_write_end, - .bmap = minix_bmap + .bmap = minix_bmap, + .direct_IO = noop_direct_IO }; static const struct inode_operations minix_symlink_inode_operations = { diff --git a/fs/namei.c b/fs/namei.c index 342bc7d4cbc7..887bc1f8be97 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -532,6 +532,8 @@ static void set_nameidata(struct nameidata *p, int dfd, struct filename *name) p->stack = p->internal; p->dfd = dfd; p->name = name; + p->path.mnt = NULL; + p->path.dentry = NULL; p->total_link_count = old ? old->total_link_count : 0; p->saved = old; current->nameidata = p; @@ -605,6 +607,8 @@ static void terminate_walk(struct nameidata *nd) rcu_read_unlock(); } nd->depth = 0; + nd->path.mnt = NULL; + nd->path.dentry = NULL; } /* path_put is needed afterwards regardless of success or failure */ @@ -633,6 +637,11 @@ static inline bool legitimize_path(struct nameidata *nd, static bool legitimize_links(struct nameidata *nd) { int i; + if (unlikely(nd->flags & LOOKUP_CACHED)) { + drop_links(nd); + nd->depth = 0; + return false; + } for (i = 0; i < nd->depth; i++) { struct saved *last = nd->stack + i; if (unlikely(!legitimize_path(nd, &last->link, last->seq))) { @@ -708,19 +717,19 @@ out: } /** - * unlazy_child - try to switch to ref-walk mode. + * try_to_unlazy_next - try to switch to ref-walk mode. * @nd: nameidata pathwalk data - * @dentry: child of nd->path.dentry - * @seq: seq number to check dentry against - * Returns: 0 on success, -ECHILD on failure + * @dentry: next dentry to step into + * @seq: seq number to check @dentry against + * Returns: true on success, false on failure * - * unlazy_child attempts to legitimize the current nd->path, nd->root and dentry - * for ref-walk mode. @dentry must be a path found by a do_lookup call on - * @nd. Must be called from rcu-walk context. - * Nothing should touch nameidata between unlazy_child() failure and + * Similar to to try_to_unlazy(), but here we have the next dentry already + * picked by rcu-walk and want to legitimize that in addition to the current + * nd->path and nd->root for ref-walk mode. Must be called from rcu-walk context. + * Nothing should touch nameidata between try_to_unlazy_next() failure and * terminate_walk(). */ -static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned seq) +static bool try_to_unlazy_next(struct nameidata *nd, struct dentry *dentry, unsigned seq) { BUG_ON(!(nd->flags & LOOKUP_RCU)); @@ -750,7 +759,7 @@ static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned se if (unlikely(!legitimize_root(nd))) goto out_dput; rcu_read_unlock(); - return 0; + return true; out2: nd->path.mnt = NULL; @@ -758,11 +767,11 @@ out1: nd->path.dentry = NULL; out: rcu_read_unlock(); - return -ECHILD; + return false; out_dput: rcu_read_unlock(); dput(dentry); - return -ECHILD; + return false; } static inline int d_revalidate(struct dentry *dentry, unsigned int flags) @@ -870,6 +879,7 @@ static int complete_walk(struct nameidata *nd) */ if (!(nd->flags & (LOOKUP_ROOT | LOOKUP_IS_SCOPED))) nd->root.mnt = NULL; + nd->flags &= ~LOOKUP_CACHED; if (!try_to_unlazy(nd)) return -ECHILD; } @@ -1432,6 +1442,8 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, * becoming unpinned. */ flags = dentry->d_flags; + if (read_seqretry(&mount_lock, nd->m_seq)) + return false; continue; } if (read_seqretry(&mount_lock, nd->m_seq)) @@ -1456,7 +1468,7 @@ static inline int handle_mounts(struct nameidata *nd, struct dentry *dentry, return -ENOENT; if (likely(__follow_mount_rcu(nd, path, inode, seqp))) return 0; - if (unlazy_child(nd, dentry, seq)) + if (!try_to_unlazy_next(nd, dentry, seq)) return -ECHILD; // *path might've been clobbered by __follow_mount_rcu() path->mnt = nd->path.mnt; @@ -1577,7 +1589,7 @@ static struct dentry *lookup_fast(struct nameidata *nd, status = d_revalidate(dentry, nd->flags); if (likely(status > 0)) return dentry; - if (unlazy_child(nd, dentry, seq)) + if (!try_to_unlazy_next(nd, dentry, seq)) return ERR_PTR(-ECHILD); if (unlikely(status == -ECHILD)) /* we'd been told to redo it in non-rcu mode */ @@ -2286,6 +2298,10 @@ static const char *path_init(struct nameidata *nd, unsigned flags) int error; const char *s = nd->name->name; + /* LOOKUP_CACHED requires RCU, ask caller to retry */ + if ((flags & (LOOKUP_RCU | LOOKUP_CACHED)) == LOOKUP_CACHED) + return ERR_PTR(-EAGAIN); + if (!*s) flags &= ~LOOKUP_RCU; if (flags & LOOKUP_RCU) @@ -2315,8 +2331,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags) } nd->root.mnt = NULL; - nd->path.mnt = NULL; - nd->path.dentry = NULL; /* Absolute pathname -- fetch the root (LOOKUP_IN_ROOT uses nd->dfd). */ if (*s == '/' && !(flags & LOOKUP_IN_ROOT)) { @@ -3356,6 +3370,8 @@ struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag) child = d_alloc(dentry, &slash_name); if (unlikely(!child)) goto out_err; + if (!IS_POSIXACL(dir)) + mode &= ~current_umask(); error = dir->i_op->tmpfile(dir, child, mode); if (error) goto out_err; @@ -4421,8 +4437,8 @@ out: } EXPORT_SYMBOL_NS(vfs_rename, ANDROID_GKI_VFS_EXPORT_ONLY); -static int do_renameat2(int olddfd, const char __user *oldname, int newdfd, - const char __user *newname, unsigned int flags) +int do_renameat2(int olddfd, struct filename *from, int newdfd, + struct filename *to, unsigned int flags) { struct dentry *old_dentry, *new_dentry; struct dentry *trap; @@ -4430,32 +4446,30 @@ static int do_renameat2(int olddfd, const char __user *oldname, int newdfd, struct qstr old_last, new_last; int old_type, new_type; struct inode *delegated_inode = NULL; - struct filename *from; - struct filename *to; unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET; bool should_retry = false; - int error; + int error = -EINVAL; if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) - return -EINVAL; + goto put_both; if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) && (flags & RENAME_EXCHANGE)) - return -EINVAL; + goto put_both; if (flags & RENAME_EXCHANGE) target_flags = 0; retry: - from = filename_parentat(olddfd, getname(oldname), lookup_flags, - &old_path, &old_last, &old_type); + from = filename_parentat(olddfd, from, lookup_flags, &old_path, + &old_last, &old_type); if (IS_ERR(from)) { error = PTR_ERR(from); - goto exit; + goto put_new; } - to = filename_parentat(newdfd, getname(newname), lookup_flags, - &new_path, &new_last, &new_type); + to = filename_parentat(newdfd, to, lookup_flags, &new_path, &new_last, + &new_type); if (IS_ERR(to)) { error = PTR_ERR(to); goto exit1; @@ -4548,34 +4562,40 @@ exit2: if (retry_estale(error, lookup_flags)) should_retry = true; path_put(&new_path); - putname(to); exit1: path_put(&old_path); - putname(from); if (should_retry) { should_retry = false; lookup_flags |= LOOKUP_REVAL; goto retry; } -exit: +put_both: + if (!IS_ERR(from)) + putname(from); +put_new: + if (!IS_ERR(to)) + putname(to); return error; } SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags) { - return do_renameat2(olddfd, oldname, newdfd, newname, flags); + return do_renameat2(olddfd, getname(oldname), newdfd, getname(newname), + flags); } SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname) { - return do_renameat2(olddfd, oldname, newdfd, newname, 0); + return do_renameat2(olddfd, getname(oldname), newdfd, getname(newname), + 0); } SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname) { - return do_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0); + return do_renameat2(AT_FDCWD, getname(oldname), AT_FDCWD, + getname(newname), 0); } int readlink_copy(char __user *buffer, int buflen, const char *link) @@ -4713,7 +4733,7 @@ int __page_symlink(struct inode *inode, const char *symname, int len, int nofs) { struct address_space *mapping = inode->i_mapping; struct page *page; - void *fsdata; + void *fsdata = NULL; int err; unsigned int flags = 0; if (nofs) diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index a5209643ac36..bfdd21224073 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -283,6 +283,7 @@ static u32 initiate_file_draining(struct nfs_client *clp, rv = NFS4_OK; break; case -ENOENT: + set_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags); /* Embrace your forgetfulness! */ rv = NFS4ERR_NOMATCHING_LAYOUT; diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index d6ac2c4f88b6..1eb6c7a142ff 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -228,8 +228,7 @@ again: * */ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred, - fmode_t type, - const nfs4_stateid *stateid, + fmode_t type, const nfs4_stateid *stateid, unsigned long pagemod_limit) { struct nfs_delegation *delegation; @@ -239,25 +238,24 @@ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred, delegation = rcu_dereference(NFS_I(inode)->delegation); if (delegation != NULL) { spin_lock(&delegation->lock); - if (nfs4_is_valid_delegation(delegation, 0)) { - nfs4_stateid_copy(&delegation->stateid, stateid); - delegation->type = type; - delegation->pagemod_limit = pagemod_limit; - oldcred = delegation->cred; - delegation->cred = get_cred(cred); - clear_bit(NFS_DELEGATION_NEED_RECLAIM, - &delegation->flags); - spin_unlock(&delegation->lock); - rcu_read_unlock(); - put_cred(oldcred); - trace_nfs4_reclaim_delegation(inode, type); - return; - } - /* We appear to have raced with a delegation return. */ + nfs4_stateid_copy(&delegation->stateid, stateid); + delegation->type = type; + delegation->pagemod_limit = pagemod_limit; + oldcred = delegation->cred; + delegation->cred = get_cred(cred); + clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); + if (test_and_clear_bit(NFS_DELEGATION_REVOKED, + &delegation->flags)) + atomic_long_inc(&nfs_active_delegations); spin_unlock(&delegation->lock); + rcu_read_unlock(); + put_cred(oldcred); + trace_nfs4_reclaim_delegation(inode, type); + } else { + rcu_read_unlock(); + nfs_inode_set_delegation(inode, cred, type, stateid, + pagemod_limit); } - rcu_read_unlock(); - nfs_inode_set_delegation(inode, cred, type, stateid, pagemod_limit); } static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 2ad56ff4752c..9f88ca7b2001 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1628,16 +1628,6 @@ const struct dentry_operations nfs4_dentry_operations = { }; EXPORT_SYMBOL_GPL(nfs4_dentry_operations); -static fmode_t flags_to_mode(int flags) -{ - fmode_t res = (__force fmode_t)flags & FMODE_EXEC; - if ((flags & O_ACCMODE) != O_WRONLY) - res |= FMODE_READ; - if ((flags & O_ACCMODE) != O_RDONLY) - res |= FMODE_WRITE; - return res; -} - static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags, struct file *filp) { return alloc_nfs_open_context(dentry, flags_to_mode(open_flags), filp); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 3c0335c15a73..c220810c61d1 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -172,8 +172,8 @@ ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE); if (iov_iter_rw(iter) == READ) - return nfs_file_direct_read(iocb, iter); - return nfs_file_direct_write(iocb, iter); + return nfs_file_direct_read(iocb, iter, true); + return nfs_file_direct_write(iocb, iter, true); } static void nfs_direct_release_pages(struct page **pages, unsigned int npages) @@ -424,6 +424,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, * nfs_file_direct_read - file direct read operation for NFS files * @iocb: target I/O control block * @iter: vector of user buffers into which to read data + * @swap: flag indicating this is swap IO, not O_DIRECT IO * * We use this function for direct reads instead of calling * generic_file_aio_read() in order to avoid gfar's check to see if @@ -439,7 +440,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, * client must read the updated atime from the server back into its * cache. */ -ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter) +ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, + bool swap) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -481,12 +483,14 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter) if (iter_is_iovec(iter)) dreq->flags = NFS_ODIRECT_SHOULD_DIRTY; - nfs_start_io_direct(inode); + if (!swap) + nfs_start_io_direct(inode); NFS_I(inode)->read_io += count; requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos); - nfs_end_io_direct(inode); + if (!swap) + nfs_end_io_direct(inode); if (requested > 0) { result = nfs_direct_wait(dreq); @@ -789,7 +793,7 @@ static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = { */ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, struct iov_iter *iter, - loff_t pos) + loff_t pos, int ioflags) { struct nfs_pageio_descriptor desc; struct inode *inode = dreq->inode; @@ -797,7 +801,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, size_t requested_bytes = 0; size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE); - nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false, + nfs_pageio_init_write(&desc, inode, ioflags, false, &nfs_direct_write_completion_ops); desc.pg_dreq = dreq; get_dreq(dreq); @@ -875,6 +879,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, * nfs_file_direct_write - file direct write operation for NFS files * @iocb: target I/O control block * @iter: vector of user buffers from which to write data + * @swap: flag indicating this is swap IO, not O_DIRECT IO * * We use this function for direct writes instead of calling * generic_file_aio_write() in order to avoid taking the inode @@ -891,7 +896,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, * Note that O_APPEND is not supported for NFS direct writes, as there * is no atomic O_APPEND write facility in the NFS protocol. */ -ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) +ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, + bool swap) { ssize_t result, requested; size_t count; @@ -905,7 +911,11 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n", file, iov_iter_count(iter), (long long) iocb->ki_pos); - result = generic_write_checks(iocb, iter); + if (swap) + /* bypass generic checks */ + result = iov_iter_count(iter); + else + result = generic_write_checks(iocb, iter); if (result <= 0) return result; count = result; @@ -936,17 +946,23 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) dreq->iocb = iocb; pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode); - nfs_start_io_direct(inode); + if (swap) { + requested = nfs_direct_write_schedule_iovec(dreq, iter, pos, + FLUSH_STABLE); + } else { + nfs_start_io_direct(inode); - requested = nfs_direct_write_schedule_iovec(dreq, iter, pos); + requested = nfs_direct_write_schedule_iovec(dreq, iter, pos, + FLUSH_COND_STABLE); - if (mapping->nrpages) { - invalidate_inode_pages2_range(mapping, - pos >> PAGE_SHIFT, end); + if (mapping->nrpages) { + invalidate_inode_pages2_range(mapping, + pos >> PAGE_SHIFT, end); + } + + nfs_end_io_direct(inode); } - nfs_end_io_direct(inode); - if (requested > 0) { result = nfs_direct_wait(dreq); if (result > 0) { diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 63940a7a70be..ad856b7b9a46 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -161,7 +161,7 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to) ssize_t result; if (iocb->ki_flags & IOCB_DIRECT) - return nfs_file_direct_read(iocb, to); + return nfs_file_direct_read(iocb, to, false); dprintk("NFS: read(%pD2, %zu@%lu)\n", iocb->ki_filp, @@ -208,15 +208,16 @@ static int nfs_file_fsync_commit(struct file *file, int datasync) { struct inode *inode = file_inode(file); - int ret; + int ret, ret2; dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync); nfs_inc_stats(inode, NFSIOS_VFSFSYNC); ret = nfs_commit_inode(inode, FLUSH_SYNC); - if (ret < 0) - return ret; - return file_check_and_advance_wb_err(file); + ret2 = file_check_and_advance_wb_err(file); + if (ret2 < 0) + return ret2; + return ret; } int @@ -389,11 +390,8 @@ static int nfs_write_end(struct file *file, struct address_space *mapping, return status; NFS_I(mapping->host)->write_io += copied; - if (nfs_ctx_key_to_expire(ctx, mapping->host)) { - status = nfs_wb_all(mapping->host); - if (status < 0) - return status; - } + if (nfs_ctx_key_to_expire(ctx, mapping->host)) + nfs_wb_all(mapping->host); return copied; } @@ -616,7 +614,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) return result; if (iocb->ki_flags & IOCB_DIRECT) - return nfs_file_direct_write(iocb, from); + return nfs_file_direct_write(iocb, from, false); dprintk("NFS: write(%pD2, %zu@%Ld)\n", file, iov_iter_count(from), (long long) iocb->ki_pos); diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c index 05b39e8f97b9..d60c086c6e9c 100644 --- a/fs/nfs/fs_context.c +++ b/fs/nfs/fs_context.c @@ -476,7 +476,7 @@ static int nfs_fs_context_parse_param(struct fs_context *fc, if (result.negated) ctx->flags &= ~NFS_MOUNT_SOFTREVAL; else - ctx->flags &= NFS_MOUNT_SOFTREVAL; + ctx->flags |= NFS_MOUNT_SOFTREVAL; break; case Opt_posix: if (result.negated) diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index f17ad75d0afd..62e361c8fcd8 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1139,7 +1139,6 @@ int nfs_open(struct inode *inode, struct file *filp) nfs_fscache_open_file(inode, filp); return 0; } -EXPORT_SYMBOL_GPL(nfs_open); /* * This function is called whenever some part of NFS notices that diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 98554dd18a71..a7e0970b5bfe 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -42,6 +42,16 @@ static inline bool nfs_lookup_is_soft_revalidate(const struct dentry *dentry) return true; } +static inline fmode_t flags_to_mode(int flags) +{ + fmode_t res = (__force fmode_t)flags & FMODE_EXEC; + if ((flags & O_ACCMODE) != O_WRONLY) + res |= FMODE_READ; + if ((flags & O_ACCMODE) != O_RDONLY) + res |= FMODE_WRITE; + return res; +} + /* * Note: RFC 1813 doesn't limit the number of auth flavors that * a server can return, so make something up. @@ -578,6 +588,13 @@ nfs_write_match_verf(const struct nfs_writeverf *verf, !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier); } +static inline gfp_t nfs_io_gfp_mask(void) +{ + if (current->flags & PF_WQ_WORKER) + return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; + return GFP_KERNEL; +} + /* unlink.c */ extern struct rpc_task * nfs_async_rename(struct inode *old_dir, struct inode *new_dir, @@ -815,6 +832,7 @@ static inline bool nfs_error_is_fatal_on_server(int err) case 0: case -ERESTARTSYS: case -EINTR: + case -ENOMEM: return false; } return nfs_error_is_fatal(err); diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c index 5601e47360c2..b49359afac88 100644 --- a/fs/nfs/nfs3client.c +++ b/fs/nfs/nfs3client.c @@ -108,7 +108,6 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv, if (mds_srv->flags & NFS_MOUNT_NORESVPORT) __set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags); - __set_bit(NFS_CS_NOPING, &cl_init.init_flags); __set_bit(NFS_CS_DS, &cl_init.init_flags); /* Use the MDS nfs_client cl_ipaddr. */ diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 2587b1b8e2ef..dad32b171e67 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -567,8 +567,10 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, ctx = get_nfs_open_context(nfs_file_open_context(src)); l_ctx = nfs_get_lock_context(ctx); - if (IS_ERR(l_ctx)) - return PTR_ERR(l_ctx); + if (IS_ERR(l_ctx)) { + status = PTR_ERR(l_ctx); + goto out; + } status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx, FMODE_READ); @@ -576,7 +578,7 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, if (status) { if (status == -EAGAIN) status = -NFS4ERR_BAD_STATEID; - return status; + goto out; } status = nfs4_call_sync(src_server->client, src_server, &msg, @@ -584,6 +586,7 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, if (status == -ENOTSUPP) src_server->caps &= ~NFS_CAP_COPY_NOTIFY; +out: put_nfs_open_context(nfs_file_open_context(src)); return status; } diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 0e6437b08a3a..252c99c76a42 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -346,6 +346,7 @@ int nfs40_init_client(struct nfs_client *clp) ret = nfs4_setup_slot_table(tbl, NFS4_MAX_SLOT_TABLE, "NFSv4.0 transport Slot table"); if (ret) { + nfs4_shutdown_slot_table(tbl); kfree(tbl); return ret; } diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index a1e5c6b85ded..70cd0d764c44 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -32,6 +32,7 @@ nfs4_file_open(struct inode *inode, struct file *filp) struct dentry *parent = NULL; struct inode *dir; unsigned openflags = filp->f_flags; + fmode_t f_mode; struct iattr attr; int err; @@ -50,8 +51,9 @@ nfs4_file_open(struct inode *inode, struct file *filp) if (err) return err; + f_mode = filp->f_mode; if ((openflags & O_ACCMODE) == 3) - return nfs_open(inode, filp); + f_mode |= flags_to_mode(openflags); /* We can't create new files here */ openflags &= ~(O_CREAT|O_EXCL); @@ -59,7 +61,7 @@ nfs4_file_open(struct inode *inode, struct file *filp) parent = dget_parent(dentry); dir = d_inode(parent); - ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp); + ctx = alloc_nfs_open_context(file_dentry(filp), f_mode, filp); err = PTR_ERR(ctx); if (IS_ERR(ctx)) goto out; @@ -319,7 +321,7 @@ static int read_name_gen = 1; static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt, struct nfs_fh *src_fh, nfs4_stateid *stateid) { - struct nfs_fattr fattr; + struct nfs_fattr *fattr = nfs_alloc_fattr(); struct file *filep, *res; struct nfs_server *server; struct inode *r_ino = NULL; @@ -330,14 +332,20 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt, server = NFS_SERVER(ss_mnt->mnt_root->d_inode); - nfs_fattr_init(&fattr); + if (!fattr) + return ERR_PTR(-ENOMEM); - status = nfs4_proc_getattr(server, src_fh, &fattr, NULL, NULL); + status = nfs4_proc_getattr(server, src_fh, fattr, NULL, NULL); if (status < 0) { res = ERR_PTR(status); goto out; } + if (!S_ISREG(fattr->mode)) { + res = ERR_PTR(-EBADF); + goto out; + } + res = ERR_PTR(-ENOMEM); len = strlen(SSC_READ_NAME_BODY) + 16; read_name = kzalloc(len, GFP_NOFS); @@ -345,7 +353,7 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt, goto out; snprintf(read_name, len, SSC_READ_NAME_BODY, read_name_gen++); - r_ino = nfs_fhget(ss_mnt->mnt_root->d_inode->i_sb, src_fh, &fattr, + r_ino = nfs_fhget(ss_mnt->mnt_root->d_inode->i_sb, src_fh, fattr, NULL); if (IS_ERR(r_ino)) { res = ERR_CAST(r_ino); @@ -356,6 +364,7 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt, r_ino->i_fop); if (IS_ERR(filep)) { res = ERR_CAST(filep); + iput(r_ino); goto out_free_name; } filep->f_mode |= FMODE_READ; @@ -390,6 +399,7 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt, out_free_name: kfree(read_name); out: + nfs_free_fattr(fattr); return res; out_stateowner: nfs4_put_state_owner(sp); diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index f331866dd418..ec6afd3c4bca 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c @@ -561,22 +561,20 @@ nfs_idmap_prepare_pipe_upcall(struct idmap *idmap, return true; } -static void -nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret) +static void nfs_idmap_complete_pipe_upcall(struct idmap_legacy_upcalldata *data, + int ret) { - struct key *authkey = idmap->idmap_upcall_data->authkey; - - kfree(idmap->idmap_upcall_data); - idmap->idmap_upcall_data = NULL; - complete_request_key(authkey, ret); - key_put(authkey); + complete_request_key(data->authkey, ret); + key_put(data->authkey); + kfree(data); } -static void -nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret) +static void nfs_idmap_abort_pipe_upcall(struct idmap *idmap, + struct idmap_legacy_upcalldata *data, + int ret) { - if (idmap->idmap_upcall_data != NULL) - nfs_idmap_complete_pipe_upcall_locked(idmap, ret); + if (cmpxchg(&idmap->idmap_upcall_data, data, NULL) == data) + nfs_idmap_complete_pipe_upcall(data, ret); } static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux) @@ -613,7 +611,7 @@ static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux) ret = rpc_queue_upcall(idmap->idmap_pipe, msg); if (ret < 0) - nfs_idmap_abort_pipe_upcall(idmap, ret); + nfs_idmap_abort_pipe_upcall(idmap, data, ret); return ret; out2: @@ -669,6 +667,7 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) struct request_key_auth *rka; struct rpc_inode *rpci = RPC_I(file_inode(filp)); struct idmap *idmap = (struct idmap *)rpci->private; + struct idmap_legacy_upcalldata *data; struct key *authkey; struct idmap_msg im; size_t namelen_in; @@ -678,10 +677,11 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) * will have been woken up and someone else may now have used * idmap_key_cons - so after this point we may no longer touch it. */ - if (idmap->idmap_upcall_data == NULL) + data = xchg(&idmap->idmap_upcall_data, NULL); + if (data == NULL) goto out_noupcall; - authkey = idmap->idmap_upcall_data->authkey; + authkey = data->authkey; rka = get_request_key_auth(authkey); if (mlen != sizeof(im)) { @@ -703,18 +703,17 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) if (namelen_in == 0 || namelen_in == IDMAP_NAMESZ) { ret = -EINVAL; goto out; -} + } - ret = nfs_idmap_read_and_verify_message(&im, - &idmap->idmap_upcall_data->idmap_msg, - rka->target_key, authkey); + ret = nfs_idmap_read_and_verify_message(&im, &data->idmap_msg, + rka->target_key, authkey); if (ret >= 0) { key_set_timeout(rka->target_key, nfs_idmap_cache_timeout); ret = mlen; } out: - nfs_idmap_complete_pipe_upcall_locked(idmap, ret); + nfs_idmap_complete_pipe_upcall(data, ret); out_noupcall: return ret; } @@ -728,7 +727,7 @@ idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg) struct idmap *idmap = data->idmap; if (msg->errno) - nfs_idmap_abort_pipe_upcall(idmap, msg->errno); + nfs_idmap_abort_pipe_upcall(idmap, data, msg->errno); } static void @@ -736,8 +735,11 @@ idmap_release_pipe(struct inode *inode) { struct rpc_inode *rpci = RPC_I(inode); struct idmap *idmap = (struct idmap *)rpci->private; + struct idmap_legacy_upcalldata *data; - nfs_idmap_abort_pipe_upcall(idmap, -EPIPE); + data = xchg(&idmap->idmap_upcall_data, NULL); + if (data) + nfs_idmap_complete_pipe_upcall(data, -EPIPE); } int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, kuid_t *uid) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 4d4ce7fd4e70..a576b0e28711 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -369,6 +369,14 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent kunmap_atomic(start); } +static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version) +{ + if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) { + fattr->pre_change_attr = version; + fattr->valid |= NFS_ATTR_FATTR_PRECHANGE; + } +} + static void nfs4_test_and_free_stateid(struct nfs_server *server, nfs4_stateid *stateid, const struct cred *cred) @@ -782,10 +790,9 @@ static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot, if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0) slot->seq_nr_highest_sent = seqnr; } -static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, - u32 seqnr) +static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr) { - slot->seq_nr_highest_sent = seqnr; + nfs4_slot_sequence_record_sent(slot, seqnr); slot->seq_nr_last_acked = seqnr; } @@ -852,7 +859,6 @@ static int nfs41_sequence_process(struct rpc_task *task, __func__, slot->slot_nr, slot->seq_nr); - nfs4_slot_sequence_acked(slot, slot->seq_nr); goto out_retry; case -NFS4ERR_RETRY_UNCACHED_REP: case -NFS4ERR_SEQ_FALSE_RETRY: @@ -3078,8 +3084,13 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, } out: - if (!opendata->cancelled) + if (!opendata->cancelled) { + if (opendata->lgp) { + nfs4_lgopen_release(opendata->lgp); + opendata->lgp = NULL; + } nfs4_sequence_free_slot(&opendata->o_res.seq_res); + } return ret; } @@ -6464,7 +6475,9 @@ static void nfs4_delegreturn_release(void *calldata) pnfs_roc_release(&data->lr.arg, &data->lr.res, data->res.lr_ret); if (inode) { - nfs_post_op_update_inode_force_wcc(inode, &data->fattr); + nfs4_fattr_set_prechange(&data->fattr, + inode_peek_iversion_raw(inode)); + nfs_refresh_inode(inode, &data->fattr); nfs_iput_and_deactive(inode); } kfree(calldata); @@ -7001,6 +7014,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) { struct nfs4_lockdata *data = calldata; struct nfs4_lock_state *lsp = data->lsp; + struct nfs_server *server = NFS_SERVER(d_inode(data->ctx->dentry)); dprintk("%s: begin!\n", __func__); @@ -7010,8 +7024,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) data->rpc_status = task->tk_status; switch (task->tk_status) { case 0: - renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), - data->timestamp); + renew_lease(server, data->timestamp); if (data->arg.new_lock && !data->cancelled) { data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) @@ -7032,6 +7045,8 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) if (!nfs4_stateid_match(&data->arg.open_stateid, &lsp->ls_state->open_stateid)) goto out_restart; + else if (nfs4_async_handle_error(task, server, lsp->ls_state, NULL) == -EAGAIN) + goto out_restart; } else if (!nfs4_stateid_match(&data->arg.lock_stateid, &lsp->ls_stateid)) goto out_restart; @@ -9264,6 +9279,9 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf rpc_delay(task, NFS4_POLL_RETRY_MAX); fallthrough; case -NFS4ERR_RETRY_UNCACHED_REP: + case -EACCES: + dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n", + __func__, task->tk_status, clp->cl_hostname); return -EAGAIN; case -NFS4ERR_BADSESSION: case -NFS4ERR_DEADSESSION: diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index cbeec29e9f21..a77a3d8c0b3f 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -49,6 +49,7 @@ #include #include #include +#include #include @@ -1776,6 +1777,7 @@ static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp) { + set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); /* Mark all delegations for reclaim */ nfs_delegation_mark_reclaim(clp); nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot); @@ -2557,9 +2559,17 @@ static void nfs4_layoutreturn_any_run(struct nfs_client *clp) static void nfs4_state_manager(struct nfs_client *clp) { + unsigned int memflags; int status = 0; const char *section = "", *section_sep = ""; + /* + * State recovery can deadlock if the direct reclaim code tries + * start NFS writeback. So ensure memory allocations are all + * GFP_NOFS. + */ + memflags = memalloc_nofs_save(); + /* Ensure exclusive access to NFSv4 state */ do { trace_nfs4_state_mgr(clp); @@ -2633,6 +2643,7 @@ static void nfs4_state_manager(struct nfs_client *clp) if (status < 0) goto out_error; nfs4_state_end_reclaim_reboot(clp); + continue; } /* Detect expired delegations... */ @@ -2654,6 +2665,7 @@ static void nfs4_state_manager(struct nfs_client *clp) clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); } + memalloc_nofs_restore(memflags); nfs4_end_drain_session(clp); nfs4_clear_state_manager_bit(clp); @@ -2671,6 +2683,7 @@ static void nfs4_state_manager(struct nfs_client *clp) return; if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) return; + memflags = memalloc_nofs_save(); } while (refcount_read(&clp->cl_count) > 1 && !signalled()); goto out_drain; @@ -2683,6 +2696,7 @@ out_error: clp->cl_hostname, -status); ssleep(1); out_drain: + memalloc_nofs_restore(memflags); nfs4_end_drain_session(clp); nfs4_clear_state_manager_bit(clp); } diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 98b9c1ed366e..17fef6eb490c 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -90,10 +90,10 @@ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos) } } -static inline struct nfs_page * -nfs_page_alloc(void) +static inline struct nfs_page *nfs_page_alloc(void) { - struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL); + struct nfs_page *p = + kmem_cache_zalloc(nfs_page_cachep, nfs_io_gfp_mask()); if (p) INIT_LIST_HEAD(&p->wb_list); return p; @@ -901,7 +901,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, struct nfs_commit_info cinfo; struct nfs_page_array *pg_array = &hdr->page_array; unsigned int pagecount, pageused; - gfp_t gfp_flags = GFP_KERNEL; + gfp_t gfp_flags = nfs_io_gfp_mask(); pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count); pg_array->npages = pagecount; @@ -984,7 +984,7 @@ nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc, desc->pg_mirrors_dynamic = NULL; if (mirror_count == 1) return desc->pg_mirrors_static; - ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_KERNEL); + ret = kmalloc_array(mirror_count, sizeof(*ret), nfs_io_gfp_mask()); if (ret != NULL) { for (i = 0; i < mirror_count; i++) nfs_pageio_mirror_init(&ret[i], desc->pg_bsize); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index b3b9eff5d572..21436721745b 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -469,6 +469,7 @@ pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo, pnfs_clear_lseg_state(lseg, lseg_list); pnfs_clear_layoutreturn_info(lo); pnfs_free_returned_lsegs(lo, lseg_list, &range, 0); + set_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags); if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) && !test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) pnfs_clear_layoutreturn_waitbit(lo); @@ -1923,8 +1924,9 @@ static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo) static void nfs_layoutget_end(struct pnfs_layout_hdr *lo) { - if (atomic_dec_and_test(&lo->plh_outstanding)) - wake_up_var(&lo->plh_outstanding); + if (atomic_dec_and_test(&lo->plh_outstanding) && + test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags)) + wake_up_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN); } static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo) @@ -2006,6 +2008,7 @@ lookup_again: lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags); if (lo == NULL) { spin_unlock(&ino->i_lock); + lseg = ERR_PTR(-ENOMEM); trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg, PNFS_UPDATE_LAYOUT_NOMEM); goto out; @@ -2030,11 +2033,11 @@ lookup_again: * If the layout segment list is empty, but there are outstanding * layoutget calls, then they might be subject to a layoutrecall. */ - if ((list_empty(&lo->plh_segs) || !pnfs_layout_is_valid(lo)) && + if (test_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags) && atomic_read(&lo->plh_outstanding) != 0) { spin_unlock(&ino->i_lock); - lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding, - !atomic_read(&lo->plh_outstanding))); + lseg = ERR_PTR(wait_on_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN, + TASK_KILLABLE)); if (IS_ERR(lseg)) goto out_put_layout_hdr; pnfs_put_layout_hdr(lo); @@ -2134,6 +2137,7 @@ lookup_again: lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &stateid, &arg, gfp_flags); if (!lgp) { + lseg = ERR_PTR(-ENOMEM); trace_pnfs_update_layout(ino, pos, count, iomode, lo, NULL, PNFS_UPDATE_LAYOUT_NOMEM); nfs_layoutget_end(lo); @@ -2153,6 +2157,12 @@ lookup_again: case -ERECALLCONFLICT: case -EAGAIN: break; + case -ENODATA: + /* The server returned NFS4ERR_LAYOUTUNAVAILABLE */ + pnfs_layout_set_fail_bit( + lo, pnfs_iomode_to_fail_bit(iomode)); + lseg = NULL; + goto out_put_layout_hdr; default: if (!nfs_error_is_fatal(PTR_ERR(lseg))) { pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode)); @@ -2406,7 +2416,8 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) goto out_forget; } - if (!pnfs_layout_is_valid(lo) && !pnfs_is_first_layoutget(lo)) + if (test_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags) && + !pnfs_is_first_layoutget(lo)) goto out_forget; if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) { diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 11d9ed9addc0..a7cf84a6673b 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -107,6 +107,7 @@ enum { NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */ NFS_LAYOUT_INODE_FREEING, /* The inode is being freed */ NFS_LAYOUT_HASHED, /* The layout visible */ + NFS_LAYOUT_DRAIN, }; enum layoutdriver_policy_flags { diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 7b9d701bef01..a2ad8bb87e2d 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -419,7 +419,7 @@ static struct nfs_commit_data * pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket, struct nfs_commit_info *cinfo) { - struct nfs_commit_data *data = nfs_commitdata_alloc(false); + struct nfs_commit_data *data = nfs_commitdata_alloc(); if (!data) return NULL; @@ -515,7 +515,11 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages, unsigned int nreq = 0; if (!list_empty(mds_pages)) { - data = nfs_commitdata_alloc(true); + data = nfs_commitdata_alloc(); + if (!data) { + nfs_retry_commit(mds_pages, NULL, cinfo, -1); + return -ENOMEM; + } data->ds_commit_index = -1; list_splice_init(mds_pages, &data->pages); list_add_tail(&data->list, &list); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 4034102010f0..b3fcc27b9564 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1029,22 +1029,31 @@ static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx) if (ctx && ctx->bsize) sb->s_blocksize = nfs_block_size(ctx->bsize, &sb->s_blocksize_bits); - if (server->nfs_client->rpc_ops->version != 2) { - /* The VFS shouldn't apply the umask to mode bits. We will do - * so ourselves when necessary. + switch (server->nfs_client->rpc_ops->version) { + case 2: + sb->s_time_gran = 1000; + sb->s_time_min = 0; + sb->s_time_max = U32_MAX; + break; + case 3: + /* + * The VFS shouldn't apply the umask to mode bits. + * We will do so ourselves when necessary. */ sb->s_flags |= SB_POSIXACL; sb->s_time_gran = 1; - sb->s_export_op = &nfs_export_ops; - } else - sb->s_time_gran = 1000; - - if (server->nfs_client->rpc_ops->version != 4) { sb->s_time_min = 0; sb->s_time_max = U32_MAX; - } else { + sb->s_export_op = &nfs_export_ops; + break; + case 4: + sb->s_flags |= SB_POSIXACL; + sb->s_time_gran = 1; sb->s_time_min = S64_MIN; sb->s_time_max = S64_MAX; + if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) + sb->s_export_op = &nfs_export_ops; + break; } sb->s_magic = NFS_SUPER_MAGIC; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index cc926e69ee9b..dc08a0c02f09 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -70,27 +70,17 @@ static mempool_t *nfs_wdata_mempool; static struct kmem_cache *nfs_cdata_cachep; static mempool_t *nfs_commit_mempool; -struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail) +struct nfs_commit_data *nfs_commitdata_alloc(void) { struct nfs_commit_data *p; - if (never_fail) - p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); - else { - /* It is OK to do some reclaim, not no safe to wait - * for anything to be returned to the pool. - * mempool_alloc() cannot handle that particular combination, - * so we need two separate attempts. - */ + p = kmem_cache_zalloc(nfs_cdata_cachep, nfs_io_gfp_mask()); + if (!p) { p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT); - if (!p) - p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO | - __GFP_NOWARN | __GFP_NORETRY); if (!p) return NULL; + memset(p, 0, sizeof(*p)); } - - memset(p, 0, sizeof(*p)); INIT_LIST_HEAD(&p->pages); return p; } @@ -104,9 +94,15 @@ EXPORT_SYMBOL_GPL(nfs_commit_free); static struct nfs_pgio_header *nfs_writehdr_alloc(void) { - struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_KERNEL); + struct nfs_pgio_header *p; - memset(p, 0, sizeof(*p)); + p = kmem_cache_zalloc(nfs_wdata_cachep, nfs_io_gfp_mask()); + if (!p) { + p = mempool_alloc(nfs_wdata_mempool, GFP_NOWAIT); + if (!p) + return NULL; + memset(p, 0, sizeof(*p)); + } p->rw_mode = FMODE_WRITE; return p; } @@ -679,11 +675,7 @@ static int nfs_writepage_locked(struct page *page, err = nfs_do_writepage(page, wbc, &pgio); pgio.pg_error = 0; nfs_pageio_complete(&pgio); - if (err < 0) - return err; - if (nfs_error_is_fatal(pgio.pg_error)) - return pgio.pg_error; - return 0; + return err; } int nfs_writepage(struct page *page, struct writeback_control *wbc) @@ -734,9 +726,6 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) if (err < 0) goto out_err; - err = pgio.pg_error; - if (nfs_error_is_fatal(err)) - goto out_err; return 0; out_err: return err; @@ -1415,7 +1404,7 @@ static void nfs_async_write_error(struct list_head *head, int error) while (!list_empty(head)) { req = nfs_list_entry(head->next); nfs_list_remove_request(req); - if (nfs_error_is_fatal(error)) + if (nfs_error_is_fatal_on_server(error)) nfs_write_error(req, error); else nfs_redirty_request(req); @@ -1800,7 +1789,11 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how, if (list_empty(head)) return 0; - data = nfs_commitdata_alloc(true); + data = nfs_commitdata_alloc(); + if (!data) { + nfs_retry_commit(head, NULL, cinfo, -1); + return -ENOMEM; + } /* Set up the argument struct */ nfs_init_commit(data, head, NULL, cinfo); diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index acd0898e3866..e30e1ddc1ace 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -194,7 +194,6 @@ nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval, __set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags); } nf->nf_mark = NULL; - init_rwsem(&nf->nf_rwsem); trace_nfsd_file_alloc(nf); } return nf; diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h index 7872df5a0fe3..435ceab27897 100644 --- a/fs/nfsd/filecache.h +++ b/fs/nfsd/filecache.h @@ -46,7 +46,6 @@ struct nfsd_file { refcount_t nf_ref; unsigned char nf_may; struct nfsd_file_mark *nf_mark; - struct rw_semaphore nf_rwsem; }; int nfsd_file_cache_init(void); diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 7850d141c762..735ee8a79870 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1380,6 +1380,8 @@ static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync) static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy) { + struct file *dst = copy->nf_dst->nf_file; + struct file *src = copy->nf_src->nf_file; ssize_t bytes_copied = 0; size_t bytes_total = copy->cp_count; u64 src_pos = copy->cp_src_pos; @@ -1388,9 +1390,8 @@ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy) do { if (kthread_should_stop()) break; - bytes_copied = nfsd_copy_file_range(copy->nf_src->nf_file, - src_pos, copy->nf_dst->nf_file, dst_pos, - bytes_total); + bytes_copied = nfsd_copy_file_range(src, src_pos, dst, dst_pos, + bytes_total); if (bytes_copied <= 0) break; bytes_total -= bytes_copied; diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index f9b730c43192..83c4e6883953 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -815,8 +815,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg, princhash.data = memdup_user( &ci->cc_princhash.cp_data, princhashlen); - if (IS_ERR_OR_NULL(princhash.data)) + if (IS_ERR_OR_NULL(princhash.data)) { + kfree(name.data); return -EFAULT; + } princhash.len = princhashlen; } else princhash.len = 0; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 84dd68091f42..665d0eaeb8db 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -843,6 +843,7 @@ static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) static void nfs4_free_deleg(struct nfs4_stid *stid) { + WARN_ON(!list_empty(&stid->sc_cp_list)); kmem_cache_free(deleg_slab, stid); atomic_long_dec(&num_delegations); } @@ -1358,6 +1359,7 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid) release_all_access(stp); if (stp->st_stateowner) nfs4_put_stateowner(stp->st_stateowner); + WARN_ON(!list_empty(&stid->sc_cp_list)); kmem_cache_free(stateid_slab, stid); } @@ -6207,6 +6209,7 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) struct nfs4_client *clp = s->st_stid.sc_client; bool unhashed; LIST_HEAD(reaplist); + struct nfs4_ol_stateid *stp; spin_lock(&clp->cl_lock); unhashed = unhash_open_stateid(s, &reaplist); @@ -6215,6 +6218,8 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) if (unhashed) put_ol_stateid_locked(s, &reaplist); spin_unlock(&clp->cl_lock); + list_for_each_entry(stp, &reaplist, st_locks) + nfs4_free_cpntf_statelist(clp->net, &stp->st_stid); free_ol_stateid_reaplist(&reaplist); } else { spin_unlock(&clp->cl_lock); @@ -7122,16 +7127,12 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp, if (sop->so_is_open_owner || !same_owner_str(sop, owner)) continue; - /* see if there are still any locks associated with it */ - lo = lockowner(sop); - list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) { - if (check_for_locks(stp->st_stid.sc_file, lo)) { - status = nfserr_locks_held; - spin_unlock(&clp->cl_lock); - return status; - } + if (atomic_read(&sop->so_count) != 1) { + spin_unlock(&clp->cl_lock); + return nfserr_locks_held; } + lo = lockowner(sop); nfs4_get_stateowner(sop); break; } diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 46f825cf53f4..cc605ee0b2fa 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3871,7 +3871,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, if (resp->xdr.buf->page_len && test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags)) { WARN_ON_ONCE(1); - return nfserr_resource; + return nfserr_serverfault; } xdr_commit_encode(xdr); diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 011cd570b50d..b09ead06a249 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -535,10 +535,11 @@ __be32 nfsd4_clone_file_range(struct nfsd_file *nf_src, u64 src_pos, { struct file *src = nf_src->nf_file; struct file *dst = nf_dst->nf_file; + errseq_t since; loff_t cloned; __be32 ret = 0; - down_write(&nf_dst->nf_rwsem); + since = READ_ONCE(dst->f_wb_err); cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0); if (cloned < 0) { ret = nfserrno(cloned); @@ -552,6 +553,8 @@ __be32 nfsd4_clone_file_range(struct nfsd_file *nf_src, u64 src_pos, loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX; int status = vfs_fsync_range(dst, dst_pos, dst_end, 0); + if (!status) + status = filemap_check_wb_err(dst->f_mapping, since); if (!status) status = commit_inode_metadata(file_inode(src)); if (status < 0) { @@ -561,13 +564,13 @@ __be32 nfsd4_clone_file_range(struct nfsd_file *nf_src, u64 src_pos, } } out_err: - up_write(&nf_dst->nf_rwsem); return ret; } ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, u64 dst_pos, u64 count) { + ssize_t ret; /* * Limit copy to 4MB to prevent indefinitely blocking an nfsd @@ -578,7 +581,12 @@ ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, * limit like this and pipeline multiple COPY requests. */ count = min_t(u64, count, 1 << 22); - return vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0); + ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0); + + if (ret == -EOPNOTSUPP || ret == -EXDEV) + ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, + COPY_FILE_SPLICE); + return ret; } __be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp, @@ -980,6 +988,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf, struct file *file = nf->nf_file; struct svc_export *exp; struct iov_iter iter; + errseq_t since; __be32 nfserr; int host_err; int use_wgather; @@ -1009,21 +1018,22 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf, flags |= RWF_SYNC; iov_iter_kvec(&iter, WRITE, vec, vlen, *cnt); + since = READ_ONCE(file->f_wb_err); if (flags & RWF_SYNC) { - down_write(&nf->nf_rwsem); - host_err = vfs_iter_write(file, &iter, &pos, flags); - if (host_err < 0) - nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp), - nfsd_net_id)); - up_write(&nf->nf_rwsem); - } else { - down_read(&nf->nf_rwsem); if (verf) nfsd_copy_boot_verifier(verf, net_generic(SVC_NET(rqstp), nfsd_net_id)); host_err = vfs_iter_write(file, &iter, &pos, flags); - up_read(&nf->nf_rwsem); + if (host_err < 0) + nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp), + nfsd_net_id)); + } else { + if (verf) + nfsd_copy_boot_verifier(verf, + net_generic(SVC_NET(rqstp), + nfsd_net_id)); + host_err = vfs_iter_write(file, &iter, &pos, flags); } if (host_err < 0) { nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp), @@ -1033,6 +1043,9 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf, *cnt = host_err; nfsdstats.io_write += *cnt; fsnotify_modify(file); + host_err = filemap_check_wb_err(file->f_mapping, since); + if (host_err < 0) + goto out_nfserr; if (stable && use_wgather) { host_err = wait_for_concurrent_writes(file); @@ -1113,19 +1126,6 @@ out: } #ifdef CONFIG_NFSD_V3 -static int -nfsd_filemap_write_and_wait_range(struct nfsd_file *nf, loff_t offset, - loff_t end) -{ - struct address_space *mapping = nf->nf_file->f_mapping; - int ret = filemap_fdatawrite_range(mapping, offset, end); - - if (ret) - return ret; - filemap_fdatawait_range_keep_errors(mapping, offset, end); - return 0; -} - /* * Commit all pending writes to stable storage. * @@ -1156,25 +1156,26 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, if (err) goto out; if (EX_ISSYNC(fhp->fh_export)) { - int err2 = nfsd_filemap_write_and_wait_range(nf, offset, end); + errseq_t since = READ_ONCE(nf->nf_file->f_wb_err); + int err2; - down_write(&nf->nf_rwsem); - if (!err2) - err2 = vfs_fsync_range(nf->nf_file, offset, end, 0); + err2 = vfs_fsync_range(nf->nf_file, offset, end, 0); switch (err2) { case 0: nfsd_copy_boot_verifier(verf, net_generic(nf->nf_net, nfsd_net_id)); + err2 = filemap_check_wb_err(nf->nf_file->f_mapping, + since); + err = nfserrno(err2); break; case -EINVAL: err = nfserr_notsupp; break; default: - err = nfserrno(err2); nfsd_reset_boot_verifier(net_generic(nf->nf_net, nfsd_net_id)); + err = nfserrno(err2); } - up_write(&nf->nf_rwsem); } else nfsd_copy_boot_verifier(verf, net_generic(nf->nf_net, nfsd_net_id)); diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 4391fd3abd8f..e00e184b1261 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -20,6 +20,23 @@ #include "page.h" #include "btnode.h" + +/** + * nilfs_init_btnc_inode - initialize B-tree node cache inode + * @btnc_inode: inode to be initialized + * + * nilfs_init_btnc_inode() sets up an inode for B-tree node cache. + */ +void nilfs_init_btnc_inode(struct inode *btnc_inode) +{ + struct nilfs_inode_info *ii = NILFS_I(btnc_inode); + + btnc_inode->i_mode = S_IFREG; + ii->i_flags = 0; + memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap)); + mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS); +} + void nilfs_btnode_cache_clear(struct address_space *btnc) { invalidate_mapping_pages(btnc, 0, -1); @@ -29,7 +46,7 @@ void nilfs_btnode_cache_clear(struct address_space *btnc) struct buffer_head * nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) { - struct inode *inode = NILFS_BTNC_I(btnc); + struct inode *inode = btnc->host; struct buffer_head *bh; bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node)); @@ -57,7 +74,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, struct buffer_head **pbh, sector_t *submit_ptr) { struct buffer_head *bh; - struct inode *inode = NILFS_BTNC_I(btnc); + struct inode *inode = btnc->host; struct page *page; int err; @@ -157,7 +174,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc, struct nilfs_btnode_chkey_ctxt *ctxt) { struct buffer_head *obh, *nbh; - struct inode *inode = NILFS_BTNC_I(btnc); + struct inode *inode = btnc->host; __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; int err; diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h index 0f88dbc9bcb3..05ab64d354dc 100644 --- a/fs/nilfs2/btnode.h +++ b/fs/nilfs2/btnode.h @@ -30,6 +30,7 @@ struct nilfs_btnode_chkey_ctxt { struct buffer_head *newbh; }; +void nilfs_init_btnc_inode(struct inode *btnc_inode); void nilfs_btnode_cache_clear(struct address_space *); struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr); diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index f42ab57201e7..77efd69213a3 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -58,7 +58,8 @@ static void nilfs_btree_free_path(struct nilfs_btree_path *path) static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree, __u64 ptr, struct buffer_head **bhp) { - struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache; + struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode; + struct address_space *btnc = btnc_inode->i_mapping; struct buffer_head *bh; bh = nilfs_btnode_create_block(btnc, ptr); @@ -470,7 +471,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, struct buffer_head **bhp, const struct nilfs_btree_readahead_info *ra) { - struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache; + struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode; + struct address_space *btnc = btnc_inode->i_mapping; struct buffer_head *bh, *ra_bh; sector_t submit_ptr = 0; int ret; @@ -1742,6 +1744,10 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key, dat = nilfs_bmap_get_dat(btree); } + ret = nilfs_attach_btree_node_cache(&NILFS_BMAP_I(btree)->vfs_inode); + if (ret < 0) + return ret; + ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat); if (ret < 0) return ret; @@ -1914,7 +1920,7 @@ static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree, path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr; path[level].bp_ctxt.bh = path[level].bp_bh; ret = nilfs_btnode_prepare_change_key( - &NILFS_BMAP_I(btree)->i_btnode_cache, + NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping, &path[level].bp_ctxt); if (ret < 0) { nilfs_dat_abort_update(dat, @@ -1940,7 +1946,7 @@ static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree, if (buffer_nilfs_node(path[level].bp_bh)) { nilfs_btnode_commit_change_key( - &NILFS_BMAP_I(btree)->i_btnode_cache, + NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping, &path[level].bp_ctxt); path[level].bp_bh = path[level].bp_ctxt.bh; } @@ -1959,7 +1965,7 @@ static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree, &path[level].bp_newreq.bpr_req); if (buffer_nilfs_node(path[level].bp_bh)) nilfs_btnode_abort_change_key( - &NILFS_BMAP_I(btree)->i_btnode_cache, + NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping, &path[level].bp_ctxt); } @@ -2135,7 +2141,8 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree, static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree, struct list_head *listp) { - struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache; + struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode; + struct address_space *btcache = btnc_inode->i_mapping; struct list_head lists[NILFS_BTREE_LEVEL_MAX]; struct pagevec pvec; struct buffer_head *bh, *head; @@ -2189,12 +2196,12 @@ static int nilfs_btree_assign_p(struct nilfs_bmap *btree, path[level].bp_ctxt.newkey = blocknr; path[level].bp_ctxt.bh = *bh; ret = nilfs_btnode_prepare_change_key( - &NILFS_BMAP_I(btree)->i_btnode_cache, + NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping, &path[level].bp_ctxt); if (ret < 0) return ret; nilfs_btnode_commit_change_key( - &NILFS_BMAP_I(btree)->i_btnode_cache, + NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping, &path[level].bp_ctxt); *bh = path[level].bp_ctxt.bh; } @@ -2399,6 +2406,10 @@ int nilfs_btree_init(struct nilfs_bmap *bmap) if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode)) ret = -EIO; + else + ret = nilfs_attach_btree_node_cache( + &NILFS_BMAP_I(bmap)->vfs_inode); + return ret; } diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index 8bccdf1158fc..8fedc7104320 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c @@ -111,6 +111,13 @@ static void nilfs_dat_commit_free(struct inode *dat, kunmap_atomic(kaddr); nilfs_dat_commit_entry(dat, req); + + if (unlikely(req->pr_desc_bh == NULL || req->pr_bitmap_bh == NULL)) { + nilfs_error(dat->i_sb, + "state inconsistency probably due to duplicate use of vblocknr = %llu", + (unsigned long long)req->pr_entry_nr); + return; + } nilfs_palloc_commit_free_entry(dat, req); } @@ -497,7 +504,9 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size, di = NILFS_DAT_I(dat); lockdep_set_class(&di->mi.mi_sem, &dat_lock_key); nilfs_palloc_setup_cache(dat, &di->palloc_cache); - nilfs_mdt_setup_shadow_map(dat, &di->shadow); + err = nilfs_mdt_setup_shadow_map(dat, &di->shadow); + if (err) + goto failed; err = nilfs_read_inode_common(dat, raw_inode); if (err) diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 448320496856..aadea660c66c 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c @@ -126,9 +126,10 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn, __u64 vbn, struct buffer_head **out_bh) { + struct inode *btnc_inode = NILFS_I(inode)->i_assoc_inode; int ret; - ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache, + ret = nilfs_btnode_submit_block(btnc_inode->i_mapping, vbn ? : pbn, pbn, REQ_OP_READ, 0, out_bh, &pbn); if (ret == -EEXIST) /* internal code (cache hit) */ @@ -170,7 +171,7 @@ int nilfs_init_gcinode(struct inode *inode) ii->i_flags = 0; nilfs_bmap_init_gc(ii->i_bmap); - return 0; + return nilfs_attach_btree_node_cache(inode); } /** @@ -185,7 +186,7 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs) ii = list_first_entry(head, struct nilfs_inode_info, i_dirty); list_del_init(&ii->i_dirty); truncate_inode_pages(&ii->vfs_inode.i_data, 0); - nilfs_btnode_cache_clear(&ii->i_btnode_cache); + nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping); iput(&ii->vfs_inode); } } diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 745d371d6fea..fb594edc0837 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -29,12 +29,16 @@ * @cno: checkpoint number * @root: pointer on NILFS root object (mounted checkpoint) * @for_gc: inode for GC flag + * @for_btnc: inode for B-tree node cache flag + * @for_shadow: inode for shadowed page cache flag */ struct nilfs_iget_args { u64 ino; __u64 cno; struct nilfs_root *root; - int for_gc; + bool for_gc; + bool for_btnc; + bool for_shadow; }; static int nilfs_iget_test(struct inode *inode, void *opaque); @@ -314,7 +318,8 @@ static int nilfs_insert_inode_locked(struct inode *inode, unsigned long ino) { struct nilfs_iget_args args = { - .ino = ino, .root = root, .cno = 0, .for_gc = 0 + .ino = ino, .root = root, .cno = 0, .for_gc = false, + .for_btnc = false, .for_shadow = false }; return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); @@ -327,6 +332,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) struct inode *inode; struct nilfs_inode_info *ii; struct nilfs_root *root; + struct buffer_head *bh; int err = -ENOMEM; ino_t ino; @@ -342,11 +348,25 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) ii->i_state = BIT(NILFS_I_NEW); ii->i_root = root; - err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); + err = nilfs_ifile_create_inode(root->ifile, &ino, &bh); if (unlikely(err)) goto failed_ifile_create_inode; /* reference count of i_bh inherits from nilfs_mdt_read_block() */ + if (unlikely(ino < NILFS_USER_INO)) { + nilfs_warn(sb, + "inode bitmap is inconsistent for reserved inodes"); + do { + brelse(bh); + err = nilfs_ifile_create_inode(root->ifile, &ino, &bh); + if (unlikely(err)) + goto failed_ifile_create_inode; + } while (ino < NILFS_USER_INO); + + nilfs_info(sb, "repaired inode bitmap for reserved inodes"); + } + ii->i_bh = bh; + atomic64_inc(&root->inodes_count); inode_init_owner(inode, dir, mode); inode->i_ino = ino; @@ -439,6 +459,8 @@ int nilfs_read_inode_common(struct inode *inode, inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); + if (nilfs_is_metadata_file_inode(inode) && !S_ISREG(inode->i_mode)) + return -EIO; /* this inode is for metadata and corrupted */ if (inode->i_nlink == 0) return -ESTALE; /* this inode is deleted */ @@ -527,6 +549,19 @@ static int nilfs_iget_test(struct inode *inode, void *opaque) return 0; ii = NILFS_I(inode); + if (test_bit(NILFS_I_BTNC, &ii->i_state)) { + if (!args->for_btnc) + return 0; + } else if (args->for_btnc) { + return 0; + } + if (test_bit(NILFS_I_SHADOW, &ii->i_state)) { + if (!args->for_shadow) + return 0; + } else if (args->for_shadow) { + return 0; + } + if (!test_bit(NILFS_I_GCINODE, &ii->i_state)) return !args->for_gc; @@ -538,15 +573,17 @@ static int nilfs_iget_set(struct inode *inode, void *opaque) struct nilfs_iget_args *args = opaque; inode->i_ino = args->ino; - if (args->for_gc) { + NILFS_I(inode)->i_cno = args->cno; + NILFS_I(inode)->i_root = args->root; + if (args->root && args->ino == NILFS_ROOT_INO) + nilfs_get_root(args->root); + + if (args->for_gc) NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE); - NILFS_I(inode)->i_cno = args->cno; - NILFS_I(inode)->i_root = NULL; - } else { - if (args->root && args->ino == NILFS_ROOT_INO) - nilfs_get_root(args->root); - NILFS_I(inode)->i_root = args->root; - } + if (args->for_btnc) + NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC); + if (args->for_shadow) + NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW); return 0; } @@ -554,7 +591,8 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root, unsigned long ino) { struct nilfs_iget_args args = { - .ino = ino, .root = root, .cno = 0, .for_gc = 0 + .ino = ino, .root = root, .cno = 0, .for_gc = false, + .for_btnc = false, .for_shadow = false }; return ilookup5(sb, ino, nilfs_iget_test, &args); @@ -564,7 +602,8 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root, unsigned long ino) { struct nilfs_iget_args args = { - .ino = ino, .root = root, .cno = 0, .for_gc = 0 + .ino = ino, .root = root, .cno = 0, .for_gc = false, + .for_btnc = false, .for_shadow = false }; return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); @@ -595,7 +634,8 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, __u64 cno) { struct nilfs_iget_args args = { - .ino = ino, .root = NULL, .cno = cno, .for_gc = 1 + .ino = ino, .root = NULL, .cno = cno, .for_gc = true, + .for_btnc = false, .for_shadow = false }; struct inode *inode; int err; @@ -615,6 +655,113 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, return inode; } +/** + * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode + * @inode: inode object + * + * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode, + * or does nothing if the inode already has it. This function allocates + * an additional inode to maintain page cache of B-tree nodes one-on-one. + * + * Return Value: On success, 0 is returned. On errors, one of the following + * negative error code is returned. + * + * %-ENOMEM - Insufficient memory available. + */ +int nilfs_attach_btree_node_cache(struct inode *inode) +{ + struct nilfs_inode_info *ii = NILFS_I(inode); + struct inode *btnc_inode; + struct nilfs_iget_args args; + + if (ii->i_assoc_inode) + return 0; + + args.ino = inode->i_ino; + args.root = ii->i_root; + args.cno = ii->i_cno; + args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0; + args.for_btnc = true; + args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0; + + btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, + nilfs_iget_set, &args); + if (unlikely(!btnc_inode)) + return -ENOMEM; + if (btnc_inode->i_state & I_NEW) { + nilfs_init_btnc_inode(btnc_inode); + unlock_new_inode(btnc_inode); + } + NILFS_I(btnc_inode)->i_assoc_inode = inode; + NILFS_I(btnc_inode)->i_bmap = ii->i_bmap; + ii->i_assoc_inode = btnc_inode; + + return 0; +} + +/** + * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode + * @inode: inode object + * + * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its + * holder inode bound to @inode, or does nothing if @inode doesn't have it. + */ +void nilfs_detach_btree_node_cache(struct inode *inode) +{ + struct nilfs_inode_info *ii = NILFS_I(inode); + struct inode *btnc_inode = ii->i_assoc_inode; + + if (btnc_inode) { + NILFS_I(btnc_inode)->i_assoc_inode = NULL; + ii->i_assoc_inode = NULL; + iput(btnc_inode); + } +} + +/** + * nilfs_iget_for_shadow - obtain inode for shadow mapping + * @inode: inode object that uses shadow mapping + * + * nilfs_iget_for_shadow() allocates a pair of inodes that holds page + * caches for shadow mapping. The page cache for data pages is set up + * in one inode and the one for b-tree node pages is set up in the + * other inode, which is attached to the former inode. + * + * Return Value: On success, a pointer to the inode for data pages is + * returned. On errors, one of the following negative error code is returned + * in a pointer type. + * + * %-ENOMEM - Insufficient memory available. + */ +struct inode *nilfs_iget_for_shadow(struct inode *inode) +{ + struct nilfs_iget_args args = { + .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false, + .for_btnc = false, .for_shadow = true + }; + struct inode *s_inode; + int err; + + s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, + nilfs_iget_set, &args); + if (unlikely(!s_inode)) + return ERR_PTR(-ENOMEM); + if (!(s_inode->i_state & I_NEW)) + return inode; + + NILFS_I(s_inode)->i_flags = 0; + memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap)); + mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS); + + err = nilfs_attach_btree_node_cache(s_inode); + if (unlikely(err)) { + iget_failed(s_inode); + return ERR_PTR(err); + } + unlock_new_inode(s_inode); + return s_inode; +} + void nilfs_write_inode_common(struct inode *inode, struct nilfs_inode *raw_inode, int has_bmap) { @@ -762,7 +909,8 @@ static void nilfs_clear_inode(struct inode *inode) if (test_bit(NILFS_I_BMAP, &ii->i_state)) nilfs_bmap_clear(ii->i_bmap); - nilfs_btnode_cache_clear(&ii->i_btnode_cache); + if (!test_bit(NILFS_I_BTNC, &ii->i_state)) + nilfs_detach_btree_node_cache(inode); if (ii->i_root && inode->i_ino == NILFS_ROOT_INO) nilfs_put_root(ii->i_root); diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index c0361ce45f62..e80ef2c0a785 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -469,9 +469,18 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz) void nilfs_mdt_clear(struct inode *inode) { struct nilfs_mdt_info *mdi = NILFS_MDT(inode); + struct nilfs_shadow_map *shadow = mdi->mi_shadow; if (mdi->mi_palloc_cache) nilfs_palloc_destroy_cache(inode); + + if (shadow) { + struct inode *s_inode = shadow->inode; + + shadow->inode = NULL; + iput(s_inode); + mdi->mi_shadow = NULL; + } } /** @@ -505,12 +514,15 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode, struct nilfs_shadow_map *shadow) { struct nilfs_mdt_info *mi = NILFS_MDT(inode); + struct inode *s_inode; INIT_LIST_HEAD(&shadow->frozen_buffers); - address_space_init_once(&shadow->frozen_data); - nilfs_mapping_init(&shadow->frozen_data, inode); - address_space_init_once(&shadow->frozen_btnodes); - nilfs_mapping_init(&shadow->frozen_btnodes, inode); + + s_inode = nilfs_iget_for_shadow(inode); + if (IS_ERR(s_inode)) + return PTR_ERR(s_inode); + + shadow->inode = s_inode; mi->mi_shadow = shadow; return 0; } @@ -524,14 +536,15 @@ int nilfs_mdt_save_to_shadow_map(struct inode *inode) struct nilfs_mdt_info *mi = NILFS_MDT(inode); struct nilfs_inode_info *ii = NILFS_I(inode); struct nilfs_shadow_map *shadow = mi->mi_shadow; + struct inode *s_inode = shadow->inode; int ret; - ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping); + ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping); if (ret) goto out; - ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes, - &ii->i_btnode_cache); + ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping, + ii->i_assoc_inode->i_mapping); if (ret) goto out; @@ -547,7 +560,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh) struct page *page; int blkbits = inode->i_blkbits; - page = grab_cache_page(&shadow->frozen_data, bh->b_page->index); + page = grab_cache_page(shadow->inode->i_mapping, bh->b_page->index); if (!page) return -ENOMEM; @@ -579,7 +592,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh) struct page *page; int n; - page = find_lock_page(&shadow->frozen_data, bh->b_page->index); + page = find_lock_page(shadow->inode->i_mapping, bh->b_page->index); if (page) { if (page_has_buffers(page)) { n = bh_offset(bh) >> inode->i_blkbits; @@ -620,10 +633,11 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode) nilfs_palloc_clear_cache(inode); nilfs_clear_dirty_pages(inode->i_mapping, true); - nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data); + nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping); - nilfs_clear_dirty_pages(&ii->i_btnode_cache, true); - nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes); + nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true); + nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping, + NILFS_I(shadow->inode)->i_assoc_inode->i_mapping); nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store); @@ -638,10 +652,11 @@ void nilfs_mdt_clear_shadow_map(struct inode *inode) { struct nilfs_mdt_info *mi = NILFS_MDT(inode); struct nilfs_shadow_map *shadow = mi->mi_shadow; + struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode; down_write(&mi->mi_sem); nilfs_release_frozen_buffers(shadow); - truncate_inode_pages(&shadow->frozen_data, 0); - truncate_inode_pages(&shadow->frozen_btnodes, 0); + truncate_inode_pages(shadow->inode->i_mapping, 0); + truncate_inode_pages(shadow_btnc_inode->i_mapping, 0); up_write(&mi->mi_sem); } diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h index e77aea4bb921..9d8ac0d27c16 100644 --- a/fs/nilfs2/mdt.h +++ b/fs/nilfs2/mdt.h @@ -18,14 +18,12 @@ /** * struct nilfs_shadow_map - shadow mapping of meta data file * @bmap_store: shadow copy of bmap state - * @frozen_data: shadowed dirty data pages - * @frozen_btnodes: shadowed dirty b-tree nodes' pages + * @inode: holder of page caches used in shadow mapping * @frozen_buffers: list of frozen buffers */ struct nilfs_shadow_map { struct nilfs_bmap_store bmap_store; - struct address_space frozen_data; - struct address_space frozen_btnodes; + struct inode *inode; struct list_head frozen_buffers; }; diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index f8450ee3fd06..ace27a89fbb0 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -28,7 +28,7 @@ * @i_xattr: * @i_dir_start_lookup: page index of last successful search * @i_cno: checkpoint number for GC inode - * @i_btnode_cache: cached pages of b-tree nodes + * @i_assoc_inode: associated inode (B-tree node cache holder or back pointer) * @i_dirty: list for connecting dirty files * @xattr_sem: semaphore for extended attributes processing * @i_bh: buffer contains disk inode @@ -43,7 +43,7 @@ struct nilfs_inode_info { __u64 i_xattr; /* sector_t ??? */ __u32 i_dir_start_lookup; __u64 i_cno; /* check point number for GC inode */ - struct address_space i_btnode_cache; + struct inode *i_assoc_inode; struct list_head i_dirty; /* List for connecting dirty files */ #ifdef CONFIG_NILFS_XATTR @@ -75,13 +75,6 @@ NILFS_BMAP_I(const struct nilfs_bmap *bmap) return container_of(bmap, struct nilfs_inode_info, i_bmap_data); } -static inline struct inode *NILFS_BTNC_I(struct address_space *btnc) -{ - struct nilfs_inode_info *ii = - container_of(btnc, struct nilfs_inode_info, i_btnode_cache); - return &ii->vfs_inode; -} - /* * Dynamic state flags of NILFS on-memory inode (i_state) */ @@ -98,6 +91,8 @@ enum { NILFS_I_INODE_SYNC, /* dsync is not allowed for inode */ NILFS_I_BMAP, /* has bmap and btnode_cache */ NILFS_I_GCINODE, /* inode for GC, on memory only */ + NILFS_I_BTNC, /* inode for btree node cache */ + NILFS_I_SHADOW, /* inode for shadowed page cache */ }; /* @@ -203,6 +198,9 @@ static inline int nilfs_acl_chmod(struct inode *inode) static inline int nilfs_init_acl(struct inode *inode, struct inode *dir) { + if (S_ISLNK(inode->i_mode)) + return 0; + inode->i_mode &= ~current_umask(); return 0; } @@ -264,6 +262,9 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root, unsigned long ino); extern struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, __u64 cno); +int nilfs_attach_btree_node_cache(struct inode *inode); +void nilfs_detach_btree_node_cache(struct inode *inode); +struct inode *nilfs_iget_for_shadow(struct inode *inode); extern void nilfs_update_inode(struct inode *, struct buffer_head *, int); extern void nilfs_truncate(struct inode *); extern void nilfs_evict_inode(struct inode *); diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 171fb5cd427f..d1a148f0cae3 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -448,10 +448,9 @@ void nilfs_mapping_init(struct address_space *mapping, struct inode *inode) /* * NILFS2 needs clear_page_dirty() in the following two cases: * - * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears - * page dirty flags when it copies back pages from the shadow cache - * (gcdat->{i_mapping,i_btnode_cache}) to its original cache - * (dat->{i_mapping,i_btnode_cache}). + * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty + * flag of pages when it copies back pages from shadow cache to the + * original cache. * * 2) Some B-tree operations like insertion or deletion may dispose buffers * in dirty state, and this needs to cancel the dirty state of their pages. diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index e3726aca28ed..5ee4973525f0 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -322,7 +322,7 @@ void nilfs_relax_pressure_in_lock(struct super_block *sb) struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci = nilfs->ns_writer; - if (!sci || !sci->sc_flush_request) + if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request) return; set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags); @@ -738,15 +738,18 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode, struct list_head *listp) { struct nilfs_inode_info *ii = NILFS_I(inode); - struct address_space *mapping = &ii->i_btnode_cache; + struct inode *btnc_inode = ii->i_assoc_inode; struct pagevec pvec; struct buffer_head *bh, *head; unsigned int i; pgoff_t index = 0; + if (!btnc_inode) + return; + pagevec_init(&pvec); - while (pagevec_lookup_tag(&pvec, mapping, &index, + while (pagevec_lookup_tag(&pvec, btnc_inode->i_mapping, &index, PAGECACHE_TAG_DIRTY)) { for (i = 0; i < pagevec_count(&pvec); i++) { bh = head = page_buffers(pvec.pages[i]); @@ -877,9 +880,11 @@ static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci) nilfs_mdt_mark_dirty(nilfs->ns_cpfile); nilfs_cpfile_put_checkpoint( nilfs->ns_cpfile, nilfs->ns_cno, bh_cp); - } else - WARN_ON(err == -EINVAL || err == -ENOENT); - + } else if (err == -EINVAL || err == -ENOENT) { + nilfs_error(sci->sc_super, + "checkpoint creation failed due to metadata corruption."); + err = -EIO; + } return err; } @@ -893,7 +898,11 @@ static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci) err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0, &raw_cp, &bh_cp); if (unlikely(err)) { - WARN_ON(err == -EINVAL || err == -ENOENT); + if (err == -EINVAL || err == -ENOENT) { + nilfs_error(sci->sc_super, + "checkpoint finalization failed due to metadata corruption."); + err = -EIO; + } goto failed_ibh; } raw_cp->cp_snapshot_list.ssl_next = 0; @@ -2239,7 +2248,7 @@ int nilfs_construct_segment(struct super_block *sb) struct nilfs_transaction_info *ti; int err; - if (!sci) + if (sb_rdonly(sb) || unlikely(!sci)) return -EROFS; /* A call inside transactions causes a deadlock. */ @@ -2278,7 +2287,7 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode, struct nilfs_transaction_info ti; int err = 0; - if (!sci) + if (sb_rdonly(sb) || unlikely(!sci)) return -EROFS; nilfs_transaction_lock(sb, &ti, 0); @@ -2415,7 +2424,7 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head) continue; list_del_init(&ii->i_dirty); truncate_inode_pages(&ii->vfs_inode.i_data, 0); - nilfs_btnode_cache_clear(&ii->i_btnode_cache); + nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping); iput(&ii->vfs_inode); } } @@ -2774,11 +2783,12 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) if (nilfs->ns_writer) { /* - * This happens if the filesystem was remounted - * read/write after nilfs_error degenerated it into a - * read-only mount. + * This happens if the filesystem is made read-only by + * __nilfs_error or nilfs_remount and then remounted + * read/write. In these cases, reuse the existing + * writer. */ - nilfs_detach_log_writer(sb); + return 0; } nilfs->ns_writer = nilfs_segctor_new(sb, root); @@ -2788,10 +2798,9 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL); err = nilfs_segctor_start_thread(nilfs->ns_writer); - if (err) { - kfree(nilfs->ns_writer); - nilfs->ns_writer = NULL; - } + if (unlikely(err)) + nilfs_detach_log_writer(sb); + return err; } diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index 63722475e17e..51f4cb060231 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c @@ -495,14 +495,22 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) { struct buffer_head *bh; + void *kaddr; + struct nilfs_segment_usage *su; int ret; + down_write(&NILFS_MDT(sufile)->mi_sem); ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); if (!ret) { mark_buffer_dirty(bh); nilfs_mdt_mark_dirty(sufile); + kaddr = kmap_atomic(bh->b_page); + su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); + nilfs_segment_usage_set_dirty(su); + kunmap_atomic(kaddr); brelse(bh); } + up_write(&NILFS_MDT(sufile)->mi_sem); return ret; } diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index ab1a5e8467f2..b3c094c59254 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -158,7 +158,8 @@ struct inode *nilfs_alloc_inode(struct super_block *sb) ii->i_bh = NULL; ii->i_state = 0; ii->i_cno = 0; - nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode); + ii->i_assoc_inode = NULL; + ii->i_bmap = &ii->i_bmap_data; return &ii->vfs_inode; } @@ -1133,8 +1134,6 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data) if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb)) goto out; if (*flags & SB_RDONLY) { - /* Shutting down log writer */ - nilfs_detach_log_writer(sb); sb->s_flags |= SB_RDONLY; /* @@ -1378,8 +1377,6 @@ static void nilfs_inode_init_once(void *obj) #ifdef CONFIG_NILFS_XATTR init_rwsem(&ii->xattr_sem); #endif - address_space_init_once(&ii->i_btnode_cache); - ii->i_bmap = &ii->i_bmap_data; inode_init_once(&ii->vfs_inode); } diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index c20ebecd7bc2..ce103dd39b89 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -690,9 +690,7 @@ int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) { unsigned long ncleansegs; - down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile); - up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment; return 0; } diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c index f0d6b54be412..765b50aeadd2 100644 --- a/fs/notify/fdinfo.c +++ b/fs/notify/fdinfo.c @@ -83,16 +83,9 @@ static void inotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark) inode_mark = container_of(mark, struct inotify_inode_mark, fsn_mark); inode = igrab(fsnotify_conn_inode(mark->connector)); if (inode) { - /* - * IN_ALL_EVENTS represents all of the mask bits - * that we expose to userspace. There is at - * least one bit (FS_EVENT_ON_CHILD) which is - * used only internally to the kernel. - */ - u32 mask = mark->mask & IN_ALL_EVENTS; - seq_printf(m, "inotify wd:%x ino:%lx sdev:%x mask:%x ignored_mask:%x ", + seq_printf(m, "inotify wd:%x ino:%lx sdev:%x mask:%x ignored_mask:0 ", inode_mark->wd, inode->i_ino, inode->i_sb->s_dev, - mask, mark->ignored_mask); + inotify_mark_user_mask(mark)); show_mark_fhandle(m, inode); seq_putc(m, '\n'); iput(inode); diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h index 2007e3711916..8f00151eb731 100644 --- a/fs/notify/inotify/inotify.h +++ b/fs/notify/inotify/inotify.h @@ -22,6 +22,18 @@ static inline struct inotify_event_info *INOTIFY_E(struct fsnotify_event *fse) return container_of(fse, struct inotify_event_info, fse); } +/* + * INOTIFY_USER_FLAGS represents all of the mask bits that we expose to + * userspace. There is at least one bit (FS_EVENT_ON_CHILD) which is + * used only internally to the kernel. + */ +#define INOTIFY_USER_MASK (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK) + +static inline __u32 inotify_mark_user_mask(struct fsnotify_mark *fsn_mark) +{ + return fsn_mark->mask & INOTIFY_USER_MASK; +} + extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group); extern int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index ca7b8185e260..9ea915e9d2a1 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -88,7 +88,7 @@ static inline __u32 inotify_arg_to_mask(struct inode *inode, u32 arg) mask |= FS_EVENT_ON_CHILD; /* mask off the flags used to open the fd */ - mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK)); + mask |= (arg & INOTIFY_USER_MASK); return mask; } diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 8387937b9d01..5b44be5f93dd 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -430,7 +430,7 @@ void fsnotify_free_mark(struct fsnotify_mark *mark) void fsnotify_destroy_mark(struct fsnotify_mark *mark, struct fsnotify_group *group) { - mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); + mutex_lock(&group->mark_mutex); fsnotify_detach_mark(mark); mutex_unlock(&group->mark_mutex); fsnotify_free_mark(mark); @@ -742,7 +742,7 @@ void fsnotify_clear_marks_by_group(struct fsnotify_group *group, * move marks to free to to_free list in one go and then free marks in * to_free list one by one. */ - mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); + mutex_lock(&group->mark_mutex); list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { if ((1U << mark->connector->type) & type_mask) list_move(&mark->g_list, &to_free); @@ -751,7 +751,7 @@ void fsnotify_clear_marks_by_group(struct fsnotify_group *group, clear: while (1) { - mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); + mutex_lock(&group->mark_mutex); if (list_empty(head)) { mutex_unlock(&group->mark_mutex); break; diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c index d563abc3e136..c0881d39d36a 100644 --- a/fs/ntfs/attrib.c +++ b/fs/ntfs/attrib.c @@ -592,15 +592,39 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name, a = (ATTR_RECORD*)((u8*)ctx->attr + le32_to_cpu(ctx->attr->length)); for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) { - if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec + - le32_to_cpu(ctx->mrec->bytes_allocated)) + u8 *mrec_end = (u8 *)ctx->mrec + + le32_to_cpu(ctx->mrec->bytes_allocated); + u8 *name_end; + + /* check whether ATTR_RECORD wrap */ + if ((u8 *)a < (u8 *)ctx->mrec) break; + + /* check whether Attribute Record Header is within bounds */ + if ((u8 *)a > mrec_end || + (u8 *)a + sizeof(ATTR_RECORD) > mrec_end) + break; + + /* check whether ATTR_RECORD's name is within bounds */ + name_end = (u8 *)a + le16_to_cpu(a->name_offset) + + a->name_length * sizeof(ntfschar); + if (name_end > mrec_end) + break; + ctx->attr = a; if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) || a->type == AT_END)) return -ENOENT; if (unlikely(!a->length)) break; + + /* check whether ATTR_RECORD's length wrap */ + if ((u8 *)a + le32_to_cpu(a->length) < (u8 *)a) + break; + /* check whether ATTR_RECORD's length is within bounds */ + if ((u8 *)a + le32_to_cpu(a->length) > mrec_end) + break; + if (a->type != type) continue; /* diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index cf222c9225d6..645c4b1b23de 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -1829,6 +1829,13 @@ int ntfs_read_inode_mount(struct inode *vi) goto err_out; } + /* Sanity check offset to the first attribute */ + if (le16_to_cpu(m->attrs_offset) >= le32_to_cpu(m->bytes_allocated)) { + ntfs_error(sb, "Incorrect mft offset to the first attribute %u in superblock.", + le16_to_cpu(m->attrs_offset)); + goto err_out; + } + /* Need this to sanity check attribute list references to $MFT. */ vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number); diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 56edd625cf4d..3ded93309139 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -2092,7 +2092,8 @@ get_ctx_vol_failed: // TODO: Initialize security. /* Get the extended system files' directory inode. */ vol->extend_ino = ntfs_iget(sb, FILE_Extend); - if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino)) { + if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino) || + !S_ISDIR(vol->extend_ino->i_mode)) { if (!IS_ERR(vol->extend_ino)) iput(vol->extend_ino); ntfs_error(sb, "Failed to load $Extend."); diff --git a/fs/ocfs2/dlmfs/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c index 339f098d9592..8fa289de3939 100644 --- a/fs/ocfs2/dlmfs/userdlm.c +++ b/fs/ocfs2/dlmfs/userdlm.c @@ -435,6 +435,11 @@ again: } spin_lock(&lockres->l_lock); + if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { + spin_unlock(&lockres->l_lock); + status = -EAGAIN; + goto bail; + } /* We only compare against the currently granted level * here. If the lock is blocked waiting on a downconvert, @@ -597,7 +602,7 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres) spin_lock(&lockres->l_lock); if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { spin_unlock(&lockres->l_lock); - return 0; + goto bail; } lockres->l_flags |= USER_LOCK_IN_TEARDOWN; @@ -611,12 +616,17 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres) } if (lockres->l_ro_holders || lockres->l_ex_holders) { + lockres->l_flags &= ~USER_LOCK_IN_TEARDOWN; spin_unlock(&lockres->l_lock); goto bail; } status = 0; if (!(lockres->l_flags & USER_LOCK_ATTACHED)) { + /* + * lock is never requested, leave USER_LOCK_IN_TEARDOWN set + * to avoid new lock request coming in. + */ spin_unlock(&lockres->l_lock); goto bail; } @@ -627,6 +637,10 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres) status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK); if (status) { + spin_lock(&lockres->l_lock); + lockres->l_flags &= ~USER_LOCK_IN_TEARDOWN; + lockres->l_flags &= ~USER_LOCK_BUSY; + spin_unlock(&lockres->l_lock); user_log_dlm_error("ocfs2_dlm_unlock", status, lockres); goto bail; } diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index c46bf7f581a1..856474b0a1ae 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -231,6 +231,7 @@ static int ocfs2_mknod(struct inode *dir, handle_t *handle = NULL; struct ocfs2_super *osb; struct ocfs2_dinode *dirfe; + struct ocfs2_dinode *fe = NULL; struct buffer_head *new_fe_bh = NULL; struct inode *inode = NULL; struct ocfs2_alloc_context *inode_ac = NULL; @@ -381,6 +382,7 @@ static int ocfs2_mknod(struct inode *dir, goto leave; } + fe = (struct ocfs2_dinode *) new_fe_bh->b_data; if (S_ISDIR(mode)) { status = ocfs2_fill_new_dir(osb, handle, dir, inode, new_fe_bh, data_ac, meta_ac); @@ -453,8 +455,11 @@ roll_back: leave: if (status < 0 && did_quota_inode) dquot_free_inode(inode); - if (handle) + if (handle) { + if (status < 0 && fe) + ocfs2_set_links_count(fe, 0); ocfs2_commit_trans(osb, handle); + } ocfs2_inode_unlock(dir, 1); if (did_block_signals) @@ -631,18 +636,9 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, return status; } - status = __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh, + return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh, parent_fe_bh, handle, inode_ac, fe_blkno, suballoc_loc, suballoc_bit); - if (status < 0) { - u64 bg_blkno = ocfs2_which_suballoc_group(fe_blkno, suballoc_bit); - int tmp = ocfs2_free_suballoc_bits(handle, inode_ac->ac_inode, - inode_ac->ac_bh, suballoc_bit, bg_blkno, 1); - if (tmp) - mlog_errno(tmp); - } - - return status; } static int ocfs2_mkdir(struct inode *dir, @@ -2023,8 +2019,11 @@ bail: ocfs2_clusters_to_bytes(osb->sb, 1)); if (status < 0 && did_quota_inode) dquot_free_inode(inode); - if (handle) + if (handle) { + if (status < 0 && fe) + ocfs2_set_links_count(fe, 0); ocfs2_commit_trans(osb, handle); + } ocfs2_inode_unlock(dir, 1); if (did_block_signals) diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 7993d527edae..0a8cd8e59a92 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -279,7 +279,6 @@ enum ocfs2_mount_options OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT = 1 << 15, /* Journal Async Commit */ OCFS2_MOUNT_ERRORS_CONT = 1 << 16, /* Return EIO to the calling process on error */ OCFS2_MOUNT_ERRORS_ROFS = 1 << 17, /* Change filesystem to read-only on error */ - OCFS2_MOUNT_NOCLUSTER = 1 << 18, /* No cluster aware filesystem mount */ }; #define OCFS2_OSB_SOFT_RO 0x0001 @@ -675,8 +674,7 @@ static inline int ocfs2_cluster_o2cb_global_heartbeat(struct ocfs2_super *osb) static inline int ocfs2_mount_local(struct ocfs2_super *osb) { - return ((osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT) - || (osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER)); + return (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT); } static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb) diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c index 4da0e4b1e79b..8caeceeaeda7 100644 --- a/fs/ocfs2/slot_map.c +++ b/fs/ocfs2/slot_map.c @@ -254,16 +254,14 @@ static int __ocfs2_find_empty_slot(struct ocfs2_slot_info *si, int i, ret = -ENOSPC; if ((preferred >= 0) && (preferred < si->si_num_slots)) { - if (!si->si_slots[preferred].sl_valid || - !si->si_slots[preferred].sl_node_num) { + if (!si->si_slots[preferred].sl_valid) { ret = preferred; goto out; } } for(i = 0; i < si->si_num_slots; i++) { - if (!si->si_slots[i].sl_valid || - !si->si_slots[i].sl_node_num) { + if (!si->si_slots[i].sl_valid) { ret = i; break; } @@ -458,30 +456,24 @@ int ocfs2_find_slot(struct ocfs2_super *osb) spin_lock(&osb->osb_lock); ocfs2_update_slot_info(si); - if (ocfs2_mount_local(osb)) - /* use slot 0 directly in local mode */ - slot = 0; - else { - /* search for ourselves first and take the slot if it already - * exists. Perhaps we need to mark this in a variable for our - * own journal recovery? Possibly not, though we certainly - * need to warn to the user */ - slot = __ocfs2_node_num_to_slot(si, osb->node_num); + /* search for ourselves first and take the slot if it already + * exists. Perhaps we need to mark this in a variable for our + * own journal recovery? Possibly not, though we certainly + * need to warn to the user */ + slot = __ocfs2_node_num_to_slot(si, osb->node_num); + if (slot < 0) { + /* if no slot yet, then just take 1st available + * one. */ + slot = __ocfs2_find_empty_slot(si, osb->preferred_slot); if (slot < 0) { - /* if no slot yet, then just take 1st available - * one. */ - slot = __ocfs2_find_empty_slot(si, osb->preferred_slot); - if (slot < 0) { - spin_unlock(&osb->osb_lock); - mlog(ML_ERROR, "no free slots available!\n"); - status = -EINVAL; - goto bail; - } - } else - printk(KERN_INFO "ocfs2: Slot %d on device (%s) was " - "already allocated to this node!\n", - slot, osb->dev_str); - } + spin_unlock(&osb->osb_lock); + mlog(ML_ERROR, "no free slots available!\n"); + status = -EINVAL; + goto bail; + } + } else + printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already " + "allocated to this node!\n", slot, osb->dev_str); ocfs2_set_slot(si, slot, osb->node_num); osb->slot_num = slot; diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index d7eb00f1ea8a..9eff7ffeda6e 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -176,7 +176,6 @@ enum { Opt_dir_resv_level, Opt_journal_async_commit, Opt_err_cont, - Opt_nocluster, Opt_err, }; @@ -210,7 +209,6 @@ static const match_table_t tokens = { {Opt_dir_resv_level, "dir_resv_level=%u"}, {Opt_journal_async_commit, "journal_async_commit"}, {Opt_err_cont, "errors=continue"}, - {Opt_nocluster, "nocluster"}, {Opt_err, NULL} }; @@ -622,13 +620,6 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data) goto out; } - tmp = OCFS2_MOUNT_NOCLUSTER; - if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) { - ret = -EINVAL; - mlog(ML_ERROR, "Cannot change nocluster option on remount\n"); - goto out; - } - tmp = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL | OCFS2_MOUNT_HB_NONE; if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) { @@ -869,7 +860,6 @@ static int ocfs2_verify_userspace_stack(struct ocfs2_super *osb, } if (ocfs2_userspace_stack(osb) && - !(osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER) && strncmp(osb->osb_cluster_stack, mopt->cluster_stack, OCFS2_STACK_LABEL_LEN)) { mlog(ML_ERROR, @@ -1150,11 +1140,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" : "ordered"); - if ((osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER) && - !(osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT)) - printk(KERN_NOTICE "ocfs2: The shared device (%s) is mounted " - "without cluster aware mode.\n", osb->dev_str); - atomic_set(&osb->vol_state, VOLUME_MOUNTED); wake_up(&osb->osb_mount_event); @@ -1461,9 +1446,6 @@ static int ocfs2_parse_options(struct super_block *sb, case Opt_journal_async_commit: mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT; break; - case Opt_nocluster: - mopt->mount_opt |= OCFS2_MOUNT_NOCLUSTER; - break; default: mlog(ML_ERROR, "Unrecognized mount option \"%s\" " @@ -1575,9 +1557,6 @@ static int ocfs2_show_options(struct seq_file *s, struct dentry *root) if (opts & OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT) seq_printf(s, ",journal_async_commit"); - if (opts & OCFS2_MOUNT_NOCLUSTER) - seq_printf(s, ",nocluster"); - return 0; } diff --git a/fs/open.c b/fs/open.c index b9d55ca3763b..93e85572b165 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1110,6 +1110,12 @@ inline int build_open_flags(const struct open_how *how, struct open_flags *op) lookup_flags |= LOOKUP_BENEATH; if (how->resolve & RESOLVE_IN_ROOT) lookup_flags |= LOOKUP_IN_ROOT; + if (how->resolve & RESOLVE_CACHED) { + /* Don't bother even trying for create/truncate/tmpfile open */ + if (flags & (O_TRUNC | O_CREAT | O_TMPFILE)) + return -EAGAIN; + lookup_flags |= LOOKUP_CACHED; + } op->lookup_flags = lookup_flags; return 0; diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c index f469982dcb36..44118f0ab0b3 100644 --- a/fs/overlayfs/export.c +++ b/fs/overlayfs/export.c @@ -257,7 +257,7 @@ static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len, return FILEID_INVALID; dentry = d_find_any_alias(inode); - if (WARN_ON(!dentry)) + if (!dentry) return FILEID_INVALID; bytes = ovl_dentry_to_fid(dentry, fid, buflen); diff --git a/fs/pipe.c b/fs/pipe.c index ea77cf5b519f..ea680351ea8d 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -1245,30 +1245,33 @@ unsigned int round_pipe_size(unsigned long size) /* * Resize the pipe ring to a number of slots. + * + * Note the pipe can be reduced in capacity, but only if the current + * occupancy doesn't exceed nr_slots; if it does, EBUSY will be + * returned instead. */ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots) { struct pipe_buffer *bufs; unsigned int head, tail, mask, n; - /* - * We can shrink the pipe, if arg is greater than the ring occupancy. - * Since we don't expect a lot of shrink+grow operations, just free and - * allocate again like we would do for growing. If the pipe currently - * contains more buffers than arg, then return busy. - */ - mask = pipe->ring_size - 1; - head = pipe->head; - tail = pipe->tail; - n = pipe_occupancy(pipe->head, pipe->tail); - if (nr_slots < n) - return -EBUSY; - bufs = kcalloc(nr_slots, sizeof(*bufs), GFP_KERNEL_ACCOUNT | __GFP_NOWARN); if (unlikely(!bufs)) return -ENOMEM; + spin_lock_irq(&pipe->rd_wait.lock); + mask = pipe->ring_size - 1; + head = pipe->head; + tail = pipe->tail; + + n = pipe_occupancy(head, tail); + if (nr_slots < n) { + spin_unlock_irq(&pipe->rd_wait.lock); + kfree(bufs); + return -EBUSY; + } + /* * The pipe array wraps around, so just start the new one at zero * and adjust the indices. @@ -1300,6 +1303,8 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots) pipe->tail = tail; pipe->head = head; + spin_unlock_irq(&pipe->rd_wait.lock); + /* This might have made more room for writers */ wake_up_interruptible(&pipe->wr_wait); return 0; diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 09e4d8a499a3..5898761698c2 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -453,6 +453,9 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent, proc_set_user(ent, (*parent)->uid, (*parent)->gid); ent->proc_dops = &proc_misc_dentry_ops; + /* Revalidate everything under /proc/${pid}/net */ + if ((*parent)->proc_dops == &proc_net_dentry_ops) + pde_force_lookup(ent); out: return ent; diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index e502414b3556..4d2e64e9016c 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -193,8 +193,6 @@ kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg) return 1; p = pfn_to_page(pfn); - if (!memmap_valid_within(pfn, p, page_zone(p))) - return 1; ent = kmalloc(sizeof(*ent), GFP_KERNEL); if (!ent) diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index 1aa9236bf1af..707477e27f83 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c @@ -362,6 +362,9 @@ static __net_init int proc_net_ns_init(struct net *net) proc_set_user(netd, uid, gid); + /* Seed dentry revalidation for /proc/${pid}/net */ + pde_force_lookup(netd); + err = -EEXIST; net_statd = proc_net_mkdir(net, "stat", netd); if (!net_statd) diff --git a/fs/proc/self.c b/fs/proc/self.c index a4012154e109..72cd69bcaf4a 100644 --- a/fs/proc/self.c +++ b/fs/proc/self.c @@ -16,13 +16,6 @@ static const char *proc_self_get_link(struct dentry *dentry, pid_t tgid = task_tgid_nr_ns(current, ns); char *name; - /* - * Not currently supported. Once we can inherit all of struct pid, - * we can allow this. - */ - if (current->flags & PF_IO_WORKER) - return ERR_PTR(-EOPNOTSUPP); - if (!tgid) return ERR_PTR(-ENOENT); /* max length of unsigned int in decimal + NULL term */ diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 79caeb9eb728..8aa5fb6e0d41 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -560,10 +560,12 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, struct vm_area_struct *vma = walk->vma; bool locked = !!(vma->vm_flags & VM_LOCKED); struct page *page = NULL; - bool migration = false; + bool migration = false, young = false, dirty = false; if (pte_present(*pte)) { page = vm_normal_page(vma, addr, *pte); + young = pte_young(*pte); + dirty = pte_dirty(*pte); } else if (is_swap_pte(*pte)) { swp_entry_t swpent = pte_to_swp_entry(*pte); @@ -597,8 +599,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, if (!page) return; - smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), - locked, migration); + smaps_account(mss, page, false, young, dirty, locked, migration); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -1015,7 +1016,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v) vma = vma->vm_next; } - show_vma_header_prefix(m, priv->mm->mmap->vm_start, + show_vma_header_prefix(m, priv->mm->mmap ? priv->mm->mmap->vm_start : 0, last_vma_end, 0, 0, 0, 0); seq_pad(m, ' '); seq_puts(m, "[rollup]\n"); diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c index d56681d86d28..a553273fbd41 100644 --- a/fs/proc/thread_self.c +++ b/fs/proc/thread_self.c @@ -17,13 +17,6 @@ static const char *proc_thread_self_get_link(struct dentry *dentry, pid_t pid = task_pid_nr_ns(current, ns); char *name; - /* - * Not currently supported. Once we can inherit all of struct pid, - * we can allow this. - */ - if (current->flags & PF_IO_WORKER) - return ERR_PTR(-EOPNOTSUPP); - if (!pid) return ERR_PTR(-ENOENT); name = kmalloc(10 + 6 + 10 + 1, dentry ? GFP_KERNEL : GFP_ATOMIC); diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 143e94395987..19f3e3f36aa3 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -737,7 +737,7 @@ static int ramoops_parse_dt(struct platform_device *pdev, field = value; \ } - parse_u32("mem-type", pdata->record_size, pdata->mem_type); + parse_u32("mem-type", pdata->mem_type, pdata->mem_type); parse_u32("record-size", pdata->record_size, 0); parse_u32("console-size", pdata->console_size, 0); parse_u32("ftrace-size", pdata->ftrace_size, 0); diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 09fb8459bb5c..65f123d5809b 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -79,6 +79,7 @@ #include #include #include +#include #include "../internal.h" /* ugh */ #include @@ -427,9 +428,11 @@ EXPORT_SYMBOL(mark_info_dirty); int dquot_acquire(struct dquot *dquot) { int ret = 0, ret2 = 0; + unsigned int memalloc; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); mutex_lock(&dquot->dq_lock); + memalloc = memalloc_nofs_save(); if (!test_bit(DQ_READ_B, &dquot->dq_flags)) { ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot); if (ret < 0) @@ -460,6 +463,7 @@ int dquot_acquire(struct dquot *dquot) smp_mb__before_atomic(); set_bit(DQ_ACTIVE_B, &dquot->dq_flags); out_iolock: + memalloc_nofs_restore(memalloc); mutex_unlock(&dquot->dq_lock); return ret; } @@ -471,9 +475,11 @@ EXPORT_SYMBOL(dquot_acquire); int dquot_commit(struct dquot *dquot) { int ret = 0; + unsigned int memalloc; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); mutex_lock(&dquot->dq_lock); + memalloc = memalloc_nofs_save(); if (!clear_dquot_dirty(dquot)) goto out_lock; /* Inactive dquot can be only if there was error during read/init @@ -483,6 +489,7 @@ int dquot_commit(struct dquot *dquot) else ret = -EIO; out_lock: + memalloc_nofs_restore(memalloc); mutex_unlock(&dquot->dq_lock); return ret; } @@ -494,9 +501,11 @@ EXPORT_SYMBOL(dquot_commit); int dquot_release(struct dquot *dquot) { int ret = 0, ret2 = 0; + unsigned int memalloc; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); mutex_lock(&dquot->dq_lock); + memalloc = memalloc_nofs_save(); /* Check whether we are not racing with some other dqget() */ if (dquot_is_busy(dquot)) goto out_dqlock; @@ -512,6 +521,7 @@ int dquot_release(struct dquot *dquot) } clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); out_dqlock: + memalloc_nofs_restore(memalloc); mutex_unlock(&dquot->dq_lock); return ret; } diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index 1a188fbdf34e..07948f6ac84e 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -80,6 +80,35 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) return ret; } +static inline int do_check_range(struct super_block *sb, const char *val_name, + uint val, uint min_val, uint max_val) +{ + if (val < min_val || val > max_val) { + quota_error(sb, "Getting %s %u out of range %u-%u", + val_name, val, min_val, max_val); + return -EUCLEAN; + } + + return 0; +} + +static int check_dquot_block_header(struct qtree_mem_dqinfo *info, + struct qt_disk_dqdbheader *dh) +{ + int err = 0; + + err = do_check_range(info->dqi_sb, "dqdh_next_free", + le32_to_cpu(dh->dqdh_next_free), 0, + info->dqi_blocks - 1); + if (err) + return err; + err = do_check_range(info->dqi_sb, "dqdh_prev_free", + le32_to_cpu(dh->dqdh_prev_free), 0, + info->dqi_blocks - 1); + + return err; +} + /* Remove empty block from list and return it */ static int get_free_dqblk(struct qtree_mem_dqinfo *info) { @@ -94,6 +123,9 @@ static int get_free_dqblk(struct qtree_mem_dqinfo *info) ret = read_blk(info, blk, buf); if (ret < 0) goto out_buf; + ret = check_dquot_block_header(info, dh); + if (ret) + goto out_buf; info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); } else { @@ -241,6 +273,9 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, *err = read_blk(info, blk, buf); if (*err < 0) goto out_buf; + *err = check_dquot_block_header(info, dh); + if (*err) + goto out_buf; } else { blk = get_free_dqblk(info); if ((int)blk < 0) { @@ -433,6 +468,9 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, goto out_buf; } dh = (struct qt_disk_dqdbheader *)buf; + ret = check_dquot_block_header(info, dh); + if (ret) + goto out_buf; le16_add_cpu(&dh->dqdh_entries, -1); if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ ret = remove_free_dqentry(info, buf, blk); diff --git a/fs/read_write.c b/fs/read_write.c index ae703cb90599..729c02a917ed 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -1388,28 +1388,6 @@ ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in, } EXPORT_SYMBOL(generic_copy_file_range); -static ssize_t do_copy_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - size_t len, unsigned int flags) -{ - /* - * Although we now allow filesystems to handle cross sb copy, passing - * a file of the wrong filesystem type to filesystem driver can result - * in an attempt to dereference the wrong type of ->private_data, so - * avoid doing that until we really have a good reason. NFS defines - * several different file_system_type structures, but they all end up - * using the same ->copy_file_range() function pointer. - */ - if (file_out->f_op->copy_file_range && - file_out->f_op->copy_file_range == file_in->f_op->copy_file_range) - return file_out->f_op->copy_file_range(file_in, pos_in, - file_out, pos_out, - len, flags); - - return generic_copy_file_range(file_in, pos_in, file_out, pos_out, len, - flags); -} - /* * Performs necessary checks before doing a file copy * @@ -1431,6 +1409,26 @@ static int generic_copy_file_checks(struct file *file_in, loff_t pos_in, if (ret) return ret; + /* + * We allow some filesystems to handle cross sb copy, but passing + * a file of the wrong filesystem type to filesystem driver can result + * in an attempt to dereference the wrong type of ->private_data, so + * avoid doing that until we really have a good reason. + * + * nfs and cifs define several different file_system_type structures + * and several different sets of file_operations, but they all end up + * using the same ->copy_file_range() function pointer. + */ + if (flags & COPY_FILE_SPLICE) { + /* cross sb splice is allowed */ + } else if (file_out->f_op->copy_file_range) { + if (file_in->f_op->copy_file_range != + file_out->f_op->copy_file_range) + return -EXDEV; + } else if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) { + return -EXDEV; + } + /* Don't touch certain kinds of inodes */ if (IS_IMMUTABLE(inode_out)) return -EPERM; @@ -1473,8 +1471,9 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, size_t len, unsigned int flags) { ssize_t ret; + bool splice = flags & COPY_FILE_SPLICE; - if (flags != 0) + if (flags & ~COPY_FILE_SPLICE) return -EINVAL; ret = generic_copy_file_checks(file_in, pos_in, file_out, pos_out, &len, @@ -1496,26 +1495,43 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, file_start_write(file_out); /* - * Try cloning first, this is supported by more file systems, and - * more efficient if both clone and copy are supported (e.g. NFS). + * Cloning is supported by more file systems, so we implement copy on + * same sb using clone, but for filesystems where both clone and copy + * are supported (e.g. nfs,cifs), we only call the copy method. */ - if (file_in->f_op->remap_file_range && - file_inode(file_in)->i_sb == file_inode(file_out)->i_sb) { - loff_t cloned; + if (!splice && file_out->f_op->copy_file_range) { + ret = file_out->f_op->copy_file_range(file_in, pos_in, + file_out, pos_out, + len, flags); + goto done; + } - cloned = file_in->f_op->remap_file_range(file_in, pos_in, + if (!splice && file_in->f_op->remap_file_range && + file_inode(file_in)->i_sb == file_inode(file_out)->i_sb) { + ret = file_in->f_op->remap_file_range(file_in, pos_in, file_out, pos_out, min_t(loff_t, MAX_RW_COUNT, len), REMAP_FILE_CAN_SHORTEN); - if (cloned > 0) { - ret = cloned; + if (ret > 0) goto done; - } } - ret = do_copy_file_range(file_in, pos_in, file_out, pos_out, len, - flags); - WARN_ON_ONCE(ret == -EOPNOTSUPP); + /* + * We can get here for same sb copy of filesystems that do not implement + * ->copy_file_range() in case filesystem does not support clone or in + * case filesystem supports clone but rejected the clone request (e.g. + * because it was not block aligned). + * + * In both cases, fall back to kernel copy so we are able to maintain a + * consistent story about which filesystems support copy_file_range() + * and which filesystems do not, that will allow userspace tools to + * make consistent desicions w.r.t using copy_file_range(). + * + * We also get here if caller (e.g. nfsd) requested COPY_FILE_SPLICE. + */ + ret = generic_copy_file_range(file_in, pos_in, file_out, pos_out, len, + flags); + done: if (ret > 0) { fsnotify_access(file_in); @@ -1566,6 +1582,10 @@ SYSCALL_DEFINE6(copy_file_range, int, fd_in, loff_t __user *, off_in, pos_out = f_out.file->f_pos; } + ret = -EINVAL; + if (flags != 0) + goto out; + ret = vfs_copy_file_range(f_in.file, pos_in, f_out.file, pos_out, len, flags); if (ret > 0) { diff --git a/fs/remap_range.c b/fs/remap_range.c index e6099beefa97..e8e00e217d6c 100644 --- a/fs/remap_range.c +++ b/fs/remap_range.c @@ -71,7 +71,8 @@ static int generic_remap_checks(struct file *file_in, loff_t pos_in, * Otherwise, make sure the count is also block-aligned, having * already confirmed the starting offsets' block alignment. */ - if (pos_in + count == size_in) { + if (pos_in + count == size_in && + (!(remap_flags & REMAP_FILE_DEDUP) || pos_out + count == size_out)) { bcount = ALIGN(size_in, bs) - pos_in; } else { if (!IS_ALIGNED(count, bs)) diff --git a/fs/stat.c b/fs/stat.c index c8a078f3acf8..c6a2e10983f8 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -306,9 +306,6 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat # define choose_32_64(a,b) b #endif -#define valid_dev(x) choose_32_64(old_valid_dev(x),true) -#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x) - #ifndef INIT_STRUCT_STAT_PADDING # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st)) #endif @@ -317,7 +314,9 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) { struct stat tmp; - if (!valid_dev(stat->dev) || !valid_dev(stat->rdev)) + if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) + return -EOVERFLOW; + if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) return -EOVERFLOW; #if BITS_PER_LONG == 32 if (stat->size > MAX_NON_LFS) @@ -325,7 +324,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) #endif INIT_STRUCT_STAT_PADDING(tmp); - tmp.st_dev = encode_dev(stat->dev); + tmp.st_dev = new_encode_dev(stat->dev); tmp.st_ino = stat->ino; if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) return -EOVERFLOW; @@ -335,7 +334,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) return -EOVERFLOW; SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); - tmp.st_rdev = encode_dev(stat->rdev); + tmp.st_rdev = new_encode_dev(stat->rdev); tmp.st_size = stat->size; tmp.st_atime = stat->atime.tv_sec; tmp.st_mtime = stat->mtime.tv_sec; @@ -616,11 +615,13 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) { struct compat_stat tmp; - if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev)) + if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) + return -EOVERFLOW; + if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) return -EOVERFLOW; memset(&tmp, 0, sizeof(tmp)); - tmp.st_dev = old_encode_dev(stat->dev); + tmp.st_dev = new_encode_dev(stat->dev); tmp.st_ino = stat->ino; if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) return -EOVERFLOW; @@ -630,7 +631,7 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) return -EOVERFLOW; SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); - tmp.st_rdev = old_encode_dev(stat->rdev); + tmp.st_rdev = new_encode_dev(stat->rdev); if ((u64) stat->size > MAX_NON_LFS) return -EOVERFLOW; tmp.st_size = stat->size; diff --git a/fs/super.c b/fs/super.c index ae2b97dabe31..602d02c50c42 100644 --- a/fs/super.c +++ b/fs/super.c @@ -293,7 +293,7 @@ static void __put_super(struct super_block *s) WARN_ON(s->s_inode_lru.node); WARN_ON(!list_empty(&s->s_mounts)); security_sb_free(s); - fscrypt_sb_free(s); + fscrypt_destroy_keyring(s); put_user_ns(s->s_user_ns); kfree(s->s_subtype); call_rcu(&s->rcu, destroy_super_rcu); @@ -454,6 +454,7 @@ void generic_shutdown_super(struct super_block *sb) evict_inodes(sb); /* only nonzero refcount inodes can have marks */ fsnotify_sb_delete(sb); + fscrypt_destroy_keyring(sb); if (sb->s_dio_done_wq) { destroy_workqueue(sb->s_dio_done_wq); diff --git a/fs/sync.c b/fs/sync.c index 9bbaa61994fc..ad7a2715ab81 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -21,25 +21,6 @@ #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ SYNC_FILE_RANGE_WAIT_AFTER) -/* - * Do the filesystem syncing work. For simple filesystems - * writeback_inodes_sb(sb) just dirties buffers with inodes so we have to - * submit IO for these buffers via __sync_blockdev(). This also speeds up the - * wait == 1 case since in that case write_inode() functions do - * sync_dirty_buffer() and thus effectively write one block at a time. - */ -static int __sync_filesystem(struct super_block *sb, int wait) -{ - if (wait) - sync_inodes_sb(sb); - else - writeback_inodes_sb(sb, WB_REASON_SYNC); - - if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, wait); - return __sync_blockdev(sb->s_bdev, wait); -} - /* * Write out and wait upon all dirty data associated with this * superblock. Filesystem data as well as the underlying block @@ -47,7 +28,7 @@ static int __sync_filesystem(struct super_block *sb, int wait) */ int sync_filesystem(struct super_block *sb) { - int ret; + int ret = 0; /* * We need to be protected against the filesystem going from @@ -61,10 +42,31 @@ int sync_filesystem(struct super_block *sb) if (sb_rdonly(sb)) return 0; - ret = __sync_filesystem(sb, 0); - if (ret < 0) + /* + * Do the filesystem syncing work. For simple filesystems + * writeback_inodes_sb(sb) just dirties buffers with inodes so we have + * to submit I/O for these buffers via __sync_blockdev(). This also + * speeds up the wait == 1 case since in that case write_inode() + * methods call sync_dirty_buffer() and thus effectively write one block + * at a time. + */ + writeback_inodes_sb(sb, WB_REASON_SYNC); + if (sb->s_op->sync_fs) { + ret = sb->s_op->sync_fs(sb, 0); + if (ret) + return ret; + } + ret = __sync_blockdev(sb->s_bdev, 0); + if (ret) return ret; - return __sync_filesystem(sb, 1); + + sync_inodes_sb(sb); + if (sb->s_op->sync_fs) { + ret = sb->s_op->sync_fs(sb, 1); + if (ret) + return ret; + } + return __sync_blockdev(sb->s_bdev, 1); } EXPORT_SYMBOL_NS(sync_filesystem, ANDROID_GKI_VFS_EXPORT_ONLY); diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index 7b126d3a469a..1311e6846272 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -139,6 +139,8 @@ struct tracefs_mount_opts { kuid_t uid; kgid_t gid; umode_t mode; + /* Opt_* bitfield. */ + unsigned int opts; }; enum { @@ -239,6 +241,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) kgid_t gid; char *p; + opts->opts = 0; opts->mode = TRACEFS_DEFAULT_MODE; while ((p = strsep(&data, ",")) != NULL) { @@ -273,24 +276,36 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) * but traditionally tracefs has ignored all mount options */ } + + opts->opts |= BIT(token); } return 0; } -static int tracefs_apply_options(struct super_block *sb) +static int tracefs_apply_options(struct super_block *sb, bool remount) { struct tracefs_fs_info *fsi = sb->s_fs_info; struct inode *inode = sb->s_root->d_inode; struct tracefs_mount_opts *opts = &fsi->mount_opts; - inode->i_mode &= ~S_IALLUGO; - inode->i_mode |= opts->mode; + /* + * On remount, only reset mode/uid/gid if they were provided as mount + * options. + */ - inode->i_uid = opts->uid; + if (!remount || opts->opts & BIT(Opt_mode)) { + inode->i_mode &= ~S_IALLUGO; + inode->i_mode |= opts->mode; + } - /* Set all the group ids to the mount option */ - set_gid(sb->s_root, opts->gid); + if (!remount || opts->opts & BIT(Opt_uid)) + inode->i_uid = opts->uid; + + if (!remount || opts->opts & BIT(Opt_gid)) { + /* Set all the group ids to the mount option */ + set_gid(sb->s_root, opts->gid); + } return 0; } @@ -305,7 +320,7 @@ static int tracefs_remount(struct super_block *sb, int *flags, char *data) if (err) goto fail; - tracefs_apply_options(sb); + tracefs_apply_options(sb, true); fail: return err; @@ -357,7 +372,7 @@ static int trace_fill_super(struct super_block *sb, void *data, int silent) sb->s_op = &tracefs_super_operations; - tracefs_apply_options(sb); + tracefs_apply_options(sb, false); return 0; diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index bd250ed0a26f..e75301feb969 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -354,15 +354,18 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry, { struct inode *inode; struct ubifs_info *c = dir->i_sb->s_fs_info; - struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1}; + struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1, + .dirtied_ino = 1}; struct ubifs_budget_req ino_req = { .dirtied_ino = 1 }; struct ubifs_inode *ui, *dir_ui = ubifs_inode(dir); int err, instantiated = 0; struct fscrypt_name nm; /* - * Budget request settings: new dirty inode, new direntry, - * budget for dirtied inode will be released via writeback. + * Budget request settings: new inode, new direntry, changing the + * parent directory inode. + * Allocate budget separately for new dirtied inode, the budget will + * be released via writeback. */ dbg_gen("dent '%pd', mode %#hx in dir ino %lu", @@ -950,7 +953,8 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) struct ubifs_inode *dir_ui = ubifs_inode(dir); struct ubifs_info *c = dir->i_sb->s_fs_info; int err, sz_change; - struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1 }; + struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1, + .dirtied_ino = 1}; struct fscrypt_name nm; /* diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 9f3aced46c68..aff5ca32e4f6 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -241,7 +241,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir, poffset - lfi); else { if (!copy_name) { - copy_name = kmalloc(UDF_NAME_LEN, + copy_name = kmalloc(UDF_NAME_LEN_CS0, GFP_NOFS); if (!copy_name) { fi = ERR_PTR(-ENOMEM); diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 01cdd2c57b65..e11d1a6732a6 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -987,7 +987,7 @@ static int resolve_userfault_fork(struct userfaultfd_ctx *new, int fd; fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new, - O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode); + O_RDONLY | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode); if (fd < 0) return fd; @@ -2101,7 +2101,7 @@ SYSCALL_DEFINE1(userfaultfd, int, flags) mmgrab(ctx->mm); fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx, - O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL); + O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL); if (fd < 0) { mmdrop(ctx->mm); kmem_cache_free(userfaultfd_ctx_cachep, ctx); diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index 96ac7e562b87..fcca36bbd997 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c @@ -876,21 +876,18 @@ xfs_attr_node_hasname( state = xfs_da_state_alloc(args); if (statep != NULL) - *statep = NULL; + *statep = state; /* * Search to see if name exists, and get back a pointer to it. */ error = xfs_da3_node_lookup_int(state, &retval); - if (error) { - xfs_da_state_free(state); - return error; - } + if (error) + retval = error; - if (statep != NULL) - *statep = state; - else + if (!statep) xfs_da_state_free(state); + return retval; } diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index d9a692484eae..de9c27ef68d8 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -6229,6 +6229,11 @@ xfs_bmap_validate_extent( xfs_fsblock_t endfsb; bool isrt; + if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock) + return __this_address; + if (irec->br_startoff + irec->br_blockcount <= irec->br_startoff) + return __this_address; + isrt = XFS_IS_REALTIME_INODE(ip); endfsb = irec->br_startblock + irec->br_blockcount - 1; if (isrt && whichfork == XFS_DATA_FORK) { diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c index 2d25bab68764..24c7d30e41df 100644 --- a/fs/xfs/libxfs/xfs_btree.c +++ b/fs/xfs/libxfs/xfs_btree.c @@ -353,20 +353,17 @@ xfs_btree_free_block( */ void xfs_btree_del_cursor( - xfs_btree_cur_t *cur, /* btree cursor */ - int error) /* del because of error */ + struct xfs_btree_cur *cur, /* btree cursor */ + int error) /* del because of error */ { - int i; /* btree level */ + int i; /* btree level */ /* - * Clear the buffer pointers, and release the buffers. - * If we're doing this in the face of an error, we - * need to make sure to inspect all of the entries - * in the bc_bufs array for buffers to be unlocked. - * This is because some of the btree code works from - * level n down to 0, and if we get an error along - * the way we won't have initialized all the entries - * down to 0. + * Clear the buffer pointers and release the buffers. If we're doing + * this because of an error, inspect all of the entries in the bc_bufs + * array for buffers to be unlocked. This is because some of the btree + * code works from level n down to 0, and if we get an error along the + * way we won't have initialized all the entries down to 0. */ for (i = 0; i < cur->bc_nlevels; i++) { if (cur->bc_bufs[i]) @@ -374,17 +371,17 @@ xfs_btree_del_cursor( else if (!error) break; } + /* - * Can't free a bmap cursor without having dealt with the - * allocated indirect blocks' accounting. - */ - ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || - cur->bc_ino.allocated == 0); - /* - * Free the cursor. + * If we are doing a BMBT update, the number of unaccounted blocks + * allocated during this cursor life time should be zero. If it's not + * zero, then we should be shut down or on our way to shutdown due to + * cancelling a dirty transaction on error. */ + ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_ino.allocated == 0 || + XFS_FORCED_SHUTDOWN(cur->bc_mp) || error != 0); if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) - kmem_free((void *)cur->bc_ops); + kmem_free(cur->bc_ops); kmem_cache_free(xfs_btree_cur_zone, cur); } @@ -2814,7 +2811,7 @@ xfs_btree_split_worker( struct xfs_btree_split_args *args = container_of(work, struct xfs_btree_split_args, work); unsigned long pflags; - unsigned long new_pflags = PF_MEMALLOC_NOFS; + unsigned long new_pflags = 0; /* * we are in a transaction context here, but may also be doing work @@ -2826,12 +2823,20 @@ xfs_btree_split_worker( new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; current_set_flags_nested(&pflags, new_pflags); + xfs_trans_set_context(args->cur->bc_tp); args->result = __xfs_btree_split(args->cur, args->level, args->ptrp, args->key, args->curp, args->stat); + + xfs_trans_clear_context(args->cur->bc_tp); + current_restore_flags_nested(&pflags, new_pflags); + + /* + * Do not access args after complete() has run here. We don't own args + * and the owner may run and free args before we return here. + */ complete(args->done); - current_restore_flags_nested(&pflags, new_pflags); } /* diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h index e55378640b05..d03e6098ded9 100644 --- a/fs/xfs/libxfs/xfs_dir2.h +++ b/fs/xfs/libxfs/xfs_dir2.h @@ -47,8 +47,6 @@ extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp, extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_name *name, xfs_ino_t ino, xfs_extlen_t tot); -extern bool xfs_dir2_sf_replace_needblock(struct xfs_inode *dp, - xfs_ino_t inum); extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_name *name, xfs_ino_t inum, xfs_extlen_t tot); diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c index 2463b5d73447..8c4f76bba88b 100644 --- a/fs/xfs/libxfs/xfs_dir2_sf.c +++ b/fs/xfs/libxfs/xfs_dir2_sf.c @@ -1018,7 +1018,7 @@ xfs_dir2_sf_removename( /* * Check whether the sf dir replace operation need more blocks. */ -bool +static bool xfs_dir2_sf_replace_needblock( struct xfs_inode *dp, xfs_ino_t inum) diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index c667c63f2cb0..fa8aefe6b7ec 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -358,19 +358,36 @@ xfs_dinode_verify_fork( int whichfork) { uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork); + mode_t mode = be16_to_cpu(dip->di_mode); + uint32_t fork_size = XFS_DFORK_SIZE(dip, mp, whichfork); + uint32_t fork_format = XFS_DFORK_FORMAT(dip, whichfork); - switch (XFS_DFORK_FORMAT(dip, whichfork)) { - case XFS_DINODE_FMT_LOCAL: - /* - * no local regular files yet - */ - if (whichfork == XFS_DATA_FORK) { - if (S_ISREG(be16_to_cpu(dip->di_mode))) - return __this_address; - if (be64_to_cpu(dip->di_size) > - XFS_DFORK_SIZE(dip, mp, whichfork)) + /* + * For fork types that can contain local data, check that the fork + * format matches the size of local data contained within the fork. + * + * For all types, check that when the size says the should be in extent + * or btree format, the inode isn't claiming it is in local format. + */ + if (whichfork == XFS_DATA_FORK) { + if (S_ISDIR(mode) || S_ISLNK(mode)) { + if (be64_to_cpu(dip->di_size) <= fork_size && + fork_format != XFS_DINODE_FMT_LOCAL) return __this_address; } + + if (be64_to_cpu(dip->di_size) > fork_size && + fork_format == XFS_DINODE_FMT_LOCAL) + return __this_address; + } + + switch (fork_format) { + case XFS_DINODE_FMT_LOCAL: + /* + * No local regular files yet. + */ + if (S_ISREG(mode) && whichfork == XFS_DATA_FORK) + return __this_address; if (di_nextents) return __this_address; break; diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h index 8bd00da6d2a4..2f46ef3800aa 100644 --- a/fs/xfs/libxfs/xfs_log_format.h +++ b/fs/xfs/libxfs/xfs_log_format.h @@ -414,7 +414,16 @@ struct xfs_log_dinode { /* start of the extended dinode, writable fields */ uint32_t di_crc; /* CRC of the inode */ uint64_t di_changecount; /* number of attribute changes */ - xfs_lsn_t di_lsn; /* flush sequence */ + + /* + * The LSN we write to this field during formatting is not a reflection + * of the current on-disk LSN. It should never be used for recovery + * sequencing, nor should it be recovered into the on-disk inode at all. + * See xlog_recover_inode_commit_pass2() and xfs_log_dinode_to_disk() + * for details. + */ + xfs_lsn_t di_lsn; + uint64_t di_flags2; /* more random flags */ uint32_t di_cowextsize; /* basic cow extent size for file */ uint8_t di_pad2[12]; /* more padding for future expansion */ diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index 5aeafa59ed27..66e8353da2f3 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c @@ -956,9 +956,19 @@ xfs_log_sb( struct xfs_mount *mp = tp->t_mountp; struct xfs_buf *bp = xfs_trans_getsb(tp); - mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount); - mp->m_sb.sb_ifree = percpu_counter_sum(&mp->m_ifree); - mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks); + /* + * Lazy sb counters don't update the in-core superblock so do that now. + * If this is at unmount, the counters will be exactly correct, but at + * any other time they will only be ballpark correct because of + * reservations that have been taken out percpu counters. If we have an + * unclean shutdown, this will be corrected by log recovery rebuilding + * the counters from the AGF block counts. + */ + if (xfs_sb_version_haslazysbcount(&mp->m_sb)) { + mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount); + mp->m_sb.sb_ifree = percpu_counter_sum(&mp->m_ifree); + mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks); + } xfs_sb_to_disk(bp->b_addr, &mp->m_sb); xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h index 397d94775440..1ce06173c2f5 100644 --- a/fs/xfs/libxfs/xfs_types.h +++ b/fs/xfs/libxfs/xfs_types.h @@ -21,6 +21,7 @@ typedef int32_t xfs_suminfo_t; /* type of bitmap summary info */ typedef uint32_t xfs_rtword_t; /* word type for bitmap manipulations */ typedef int64_t xfs_lsn_t; /* log sequence number */ +typedef int64_t xfs_csn_t; /* CIL sequence number */ typedef uint32_t xfs_dablk_t; /* dir/attr block number (in file) */ typedef uint32_t xfs_dahash_t; /* dir/attr hash value */ diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 4304c6416fbb..953de843d9c3 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -62,7 +62,7 @@ xfs_setfilesize_trans_alloc( * We hand off the transaction to the completion thread now, so * clear the flag here. */ - current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); + xfs_trans_clear_context(tp); return 0; } @@ -125,7 +125,7 @@ xfs_setfilesize_ioend( * thus we need to mark ourselves as being in a transaction manually. * Similarly for freeze protection. */ - current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); + xfs_trans_set_context(tp); __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS); /* we abort the update if there was an IO error */ @@ -145,6 +145,7 @@ xfs_end_ioend( struct iomap_ioend *ioend) { struct xfs_inode *ip = XFS_I(ioend->io_inode); + struct xfs_mount *mp = ip->i_mount; xfs_off_t offset = ioend->io_offset; size_t size = ioend->io_size; unsigned int nofs_flag; @@ -160,18 +161,26 @@ xfs_end_ioend( /* * Just clean up the in-memory strutures if the fs has been shut down. */ - if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { + if (XFS_FORCED_SHUTDOWN(mp)) { error = -EIO; goto done; } /* - * Clean up any COW blocks on an I/O error. + * Clean up all COW blocks and underlying data fork delalloc blocks on + * I/O error. The delalloc punch is required because this ioend was + * mapped to blocks in the COW fork and the associated pages are no + * longer dirty. If we don't remove delalloc blocks here, they become + * stale and can corrupt free space accounting on unmount. */ error = blk_status_to_errno(ioend->io_bio->bi_status); if (unlikely(error)) { - if (ioend->io_flags & IOMAP_F_SHARED) + if (ioend->io_flags & IOMAP_F_SHARED) { xfs_reflink_cancel_cow_range(ip, offset, size, true); + xfs_bmap_punch_delalloc_range(ip, + XFS_B_TO_FSBT(mp, offset), + XFS_B_TO_FSB(mp, size)); + } goto done; } @@ -568,6 +577,12 @@ xfs_vm_writepage( { struct xfs_writepage_ctx wpc = { }; + if (WARN_ON_ONCE(current->journal_info)) { + redirty_page_for_writepage(wbc, page); + unlock_page(page); + return 0; + } + return iomap_writepage(page, wbc, &wpc.ctx, &xfs_writeback_ops); } @@ -578,6 +593,13 @@ xfs_vm_writepages( { struct xfs_writepage_ctx wpc = { }; + /* + * Writing back data in a transaction context can result in recursive + * transactions. This is bad, so issue a warning and get out of here. + */ + if (WARN_ON_ONCE(current->journal_info)) + return 0; + xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops); } diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 0356f2e340a1..a3d5ecccfc2c 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -56,14 +56,12 @@ xfs_buf_log_format_size( } /* - * This returns the number of log iovecs needed to log the - * given buf log item. + * Return the number of log iovecs and space needed to log the given buf log + * item segment. * - * It calculates this as 1 iovec for the buf log format structure - * and 1 for each stretch of non-contiguous chunks to be logged. - * Contiguous chunks are logged in a single iovec. - * - * If the XFS_BLI_STALE flag has been set, then log nothing. + * It calculates this as 1 iovec for the buf log format structure and 1 for each + * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged + * in a single iovec. */ STATIC void xfs_buf_item_size_segment( @@ -119,11 +117,8 @@ xfs_buf_item_size_segment( } /* - * This returns the number of log iovecs needed to log the given buf log item. - * - * It calculates this as 1 iovec for the buf log format structure and 1 for each - * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged - * in a single iovec. + * Return the number of log iovecs and space needed to log the given buf log + * item. * * Discontiguous buffers need a format structure per region that is being * logged. This makes the changes in the buffer appear to log recovery as though @@ -133,7 +128,11 @@ xfs_buf_item_size_segment( * what ends up on disk. * * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log - * format structures. + * format structures. If the item has previously been logged and has dirty + * regions, we do not relog them in stale buffers. This has the effect of + * reducing the size of the relogged item by the amount of dirty data tracked + * by the log item. This can result in the committing transaction reducing the + * amount of space being consumed by the CIL. */ STATIC void xfs_buf_item_size( @@ -147,9 +146,9 @@ xfs_buf_item_size( ASSERT(atomic_read(&bip->bli_refcount) > 0); if (bip->bli_flags & XFS_BLI_STALE) { /* - * The buffer is stale, so all we need to log - * is the buf log format structure with the - * cancel flag in it. + * The buffer is stale, so all we need to log is the buf log + * format structure with the cancel flag in it as we are never + * going to replay the changes tracked in the log item. */ trace_xfs_buf_item_size_stale(bip); ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); @@ -164,9 +163,9 @@ xfs_buf_item_size( if (bip->bli_flags & XFS_BLI_ORDERED) { /* - * The buffer has been logged just to order it. - * It is not being included in the transaction - * commit, so no vectors are used at all. + * The buffer has been logged just to order it. It is not being + * included in the transaction commit, so no vectors are used at + * all. */ trace_xfs_buf_item_size_ordered(bip); *nvecs = XFS_LOG_VEC_ORDERED; @@ -394,17 +393,8 @@ xfs_buf_item_pin( } /* - * This is called to unpin the buffer associated with the buf log - * item which was previously pinned with a call to xfs_buf_item_pin(). - * - * Also drop the reference to the buf item for the current transaction. - * If the XFS_BLI_STALE flag is set and we are the last reference, - * then free up the buf log item and unlock the buffer. - * - * If the remove flag is set we are called from uncommit in the - * forced-shutdown path. If that is true and the reference count on - * the log item is going to drop to zero we need to free the item's - * descriptor in the transaction. + * This is called to unpin the buffer associated with the buf log item which + * was previously pinned with a call to xfs_buf_item_pin(). */ STATIC void xfs_buf_item_unpin( @@ -421,38 +411,35 @@ xfs_buf_item_unpin( trace_xfs_buf_item_unpin(bip); + /* + * Drop the bli ref associated with the pin and grab the hold required + * for the I/O simulation failure in the abort case. We have to do this + * before the pin count drops because the AIL doesn't acquire a bli + * reference. Therefore if the refcount drops to zero, the bli could + * still be AIL resident and the buffer submitted for I/O (and freed on + * completion) at any point before we return. This can be removed once + * the AIL properly holds a reference on the bli. + */ freed = atomic_dec_and_test(&bip->bli_refcount); - + if (freed && !stale && remove) + xfs_buf_hold(bp); if (atomic_dec_and_test(&bp->b_pin_count)) wake_up_all(&bp->b_waiters); - if (freed && stale) { + /* nothing to do but drop the pin count if the bli is active */ + if (!freed) + return; + + if (stale) { ASSERT(bip->bli_flags & XFS_BLI_STALE); ASSERT(xfs_buf_islocked(bp)); ASSERT(bp->b_flags & XBF_STALE); ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); + ASSERT(list_empty(&lip->li_trans)); + ASSERT(!bp->b_transp); trace_xfs_buf_item_unpin_stale(bip); - if (remove) { - /* - * If we are in a transaction context, we have to - * remove the log item from the transaction as we are - * about to release our reference to the buffer. If we - * don't, the unlock that occurs later in - * xfs_trans_uncommit() will try to reference the - * buffer which we no longer have a hold on. - */ - if (!list_empty(&lip->li_trans)) - xfs_trans_del_item(lip); - - /* - * Since the transaction no longer refers to the buffer, - * the buffer should no longer refer to the transaction. - */ - bp->b_transp = NULL; - } - /* * If we get called here because of an IO error, we may or may * not have the item on the AIL. xfs_trans_ail_delete() will @@ -469,13 +456,13 @@ xfs_buf_item_unpin( ASSERT(bp->b_log_item == NULL); } xfs_buf_relse(bp); - } else if (freed && remove) { + } else if (remove) { /* * The buffer must be locked and held by the caller to simulate - * an async I/O failure. + * an async I/O failure. We acquired the hold for this case + * before the buffer was unpinned. */ xfs_buf_lock(bp); - xfs_buf_hold(bp); bp->b_flags |= XBF_ASYNC; xfs_buf_ioend_fail(bp); } @@ -633,7 +620,7 @@ xfs_buf_item_release( STATIC void xfs_buf_item_committing( struct xfs_log_item *lip, - xfs_lsn_t commit_lsn) + xfs_csn_t seq) { return xfs_buf_item_release(lip); } diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c index d44e8b4a3391..b374c9cee117 100644 --- a/fs/xfs/xfs_buf_item_recover.c +++ b/fs/xfs/xfs_buf_item_recover.c @@ -796,6 +796,7 @@ xlog_recover_get_buf_lsn( switch (magicda) { case XFS_DIR3_LEAF1_MAGIC: case XFS_DIR3_LEAFN_MAGIC: + case XFS_ATTR3_LEAF_MAGIC: case XFS_DA3_NODE_MAGIC: lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); uuid = &((struct xfs_da3_blkinfo *)blk)->uuid; @@ -805,7 +806,7 @@ xlog_recover_get_buf_lsn( } if (lsn != (xfs_lsn_t)-1) { - if (!uuid_equal(&mp->m_sb.sb_uuid, uuid)) + if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid)) goto recover_immediately; return lsn; } diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 1d95ed387d66..80c4579d6835 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -500,6 +500,42 @@ xfs_dquot_alloc( return dqp; } +/* Check the ondisk dquot's id and type match what the incore dquot expects. */ +static bool +xfs_dquot_check_type( + struct xfs_dquot *dqp, + struct xfs_disk_dquot *ddqp) +{ + uint8_t ddqp_type; + uint8_t dqp_type; + + ddqp_type = ddqp->d_type & XFS_DQTYPE_REC_MASK; + dqp_type = xfs_dquot_type(dqp); + + if (be32_to_cpu(ddqp->d_id) != dqp->q_id) + return false; + + /* + * V5 filesystems always expect an exact type match. V4 filesystems + * expect an exact match for user dquots and for non-root group and + * project dquots. + */ + if (xfs_sb_version_hascrc(&dqp->q_mount->m_sb) || + dqp_type == XFS_DQTYPE_USER || dqp->q_id != 0) + return ddqp_type == dqp_type; + + /* + * V4 filesystems support either group or project quotas, but not both + * at the same time. The non-user quota file can be switched between + * group and project quota uses depending on the mount options, which + * means that we can encounter the other type when we try to load quota + * defaults. Quotacheck will soon reset the the entire quota file + * (including the root dquot) anyway, but don't log scary corruption + * reports to dmesg. + */ + return ddqp_type == XFS_DQTYPE_GROUP || ddqp_type == XFS_DQTYPE_PROJ; +} + /* Copy the in-core quota fields in from the on-disk buffer. */ STATIC int xfs_dquot_from_disk( @@ -512,8 +548,7 @@ xfs_dquot_from_disk( * Ensure that we got the type and ID we were looking for. * Everything else was checked by the dquot buffer verifier. */ - if ((ddqp->d_type & XFS_DQTYPE_REC_MASK) != xfs_dquot_type(dqp) || - be32_to_cpu(ddqp->d_id) != dqp->q_id) { + if (!xfs_dquot_check_type(dqp, ddqp)) { xfs_alert_tag(bp->b_mount, XFS_PTAG_VERIFIER_ERROR, "Metadata corruption detected at %pS, quota %u", __this_address, dqp->q_id); diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c index 8c1fdf37ee8f..8ed47b739b6c 100644 --- a/fs/xfs/xfs_dquot_item.c +++ b/fs/xfs/xfs_dquot_item.c @@ -188,7 +188,7 @@ xfs_qm_dquot_logitem_release( STATIC void xfs_qm_dquot_logitem_committing( struct xfs_log_item *lip, - xfs_lsn_t commit_lsn) + xfs_csn_t seq) { return xfs_qm_dquot_logitem_release(lip); } diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c index 7f6e20899473..f9e2f606b5b8 100644 --- a/fs/xfs/xfs_error.c +++ b/fs/xfs/xfs_error.c @@ -293,6 +293,8 @@ xfs_errortag_add( struct xfs_mount *mp, unsigned int error_tag) { + BUILD_BUG_ON(ARRAY_SIZE(xfs_errortag_random_default) != XFS_ERRTAG_MAX); + if (error_tag >= XFS_ERRTAG_MAX) return -EINVAL; diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index 6c11bfc3d452..17aa77fbb5ad 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -482,7 +482,7 @@ xfs_extent_free_finish_item( free->xefi_startblock, free->xefi_blockcount, &free->xefi_oinfo, free->xefi_skip_discard); - kmem_free(free); + kmem_cache_free(xfs_bmap_free_item_zone, free); return error; } @@ -502,7 +502,7 @@ xfs_extent_free_cancel_item( struct xfs_extent_free_item *free; free = container_of(item, struct xfs_extent_free_item, xefi_list); - kmem_free(free); + kmem_cache_free(xfs_bmap_free_item_zone, free); } const struct xfs_defer_op_type xfs_extent_free_defer_type = { @@ -564,7 +564,7 @@ xfs_agfl_free_finish_item( extp->ext_len = free->xefi_blockcount; efdp->efd_next_extent++; - kmem_free(free); + kmem_cache_free(xfs_bmap_free_item_zone, free); return error; } diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 111fe73bb8a7..c9045809b8c4 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -118,6 +118,54 @@ xfs_dir_fsync( return xfs_log_force_inode(ip); } +static xfs_csn_t +xfs_fsync_seq( + struct xfs_inode *ip, + bool datasync) +{ + if (!xfs_ipincount(ip)) + return 0; + if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) + return 0; + return ip->i_itemp->ili_commit_seq; +} + +/* + * All metadata updates are logged, which means that we just have to flush the + * log up to the latest LSN that touched the inode. + * + * If we have concurrent fsync/fdatasync() calls, we need them to all block on + * the log force before we clear the ili_fsync_fields field. This ensures that + * we don't get a racing sync operation that does not wait for the metadata to + * hit the journal before returning. If we race with clearing ili_fsync_fields, + * then all that will happen is the log force will do nothing as the lsn will + * already be on disk. We can't race with setting ili_fsync_fields because that + * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock + * shared until after the ili_fsync_fields is cleared. + */ +static int +xfs_fsync_flush_log( + struct xfs_inode *ip, + bool datasync, + int *log_flushed) +{ + int error = 0; + xfs_csn_t seq; + + xfs_ilock(ip, XFS_ILOCK_SHARED); + seq = xfs_fsync_seq(ip, datasync); + if (seq) { + error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, + log_flushed); + + spin_lock(&ip->i_itemp->ili_lock); + ip->i_itemp->ili_fsync_fields = 0; + spin_unlock(&ip->i_itemp->ili_lock); + } + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return error; +} + STATIC int xfs_file_fsync( struct file *file, @@ -125,13 +173,10 @@ xfs_file_fsync( loff_t end, int datasync) { - struct inode *inode = file->f_mapping->host; - struct xfs_inode *ip = XFS_I(inode); - struct xfs_inode_log_item *iip = ip->i_itemp; + struct xfs_inode *ip = XFS_I(file->f_mapping->host); struct xfs_mount *mp = ip->i_mount; int error = 0; int log_flushed = 0; - xfs_lsn_t lsn = 0; trace_xfs_file_fsync(ip); @@ -155,33 +200,7 @@ xfs_file_fsync( else if (mp->m_logdev_targp != mp->m_ddev_targp) xfs_blkdev_issue_flush(mp->m_ddev_targp); - /* - * All metadata updates are logged, which means that we just have to - * flush the log up to the latest LSN that touched the inode. If we have - * concurrent fsync/fdatasync() calls, we need them to all block on the - * log force before we clear the ili_fsync_fields field. This ensures - * that we don't get a racing sync operation that does not wait for the - * metadata to hit the journal before returning. If we race with - * clearing the ili_fsync_fields, then all that will happen is the log - * force will do nothing as the lsn will already be on disk. We can't - * race with setting ili_fsync_fields because that is done under - * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared - * until after the ili_fsync_fields is cleared. - */ - xfs_ilock(ip, XFS_ILOCK_SHARED); - if (xfs_ipincount(ip)) { - if (!datasync || - (iip->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) - lsn = iip->ili_last_lsn; - } - - if (lsn) { - error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); - spin_lock(&iip->ili_lock); - iip->ili_fsync_fields = 0; - spin_unlock(&iip->ili_lock); - } - xfs_iunlock(ip, XFS_ILOCK_SHARED); + error = xfs_fsync_flush_log(ip, datasync, &log_flushed); /* * If we only have a single device, and the log force about was diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c index db23e455eb91..bc41ec0c483d 100644 --- a/fs/xfs/xfs_filestream.c +++ b/fs/xfs/xfs_filestream.c @@ -128,11 +128,12 @@ xfs_filestream_pick_ag( if (!pag->pagf_init) { err = xfs_alloc_pagf_init(mp, NULL, ag, trylock); if (err) { - xfs_perag_put(pag); - if (err != -EAGAIN) + if (err != -EAGAIN) { + xfs_perag_put(pag); return err; + } /* Couldn't lock the AGF, skip this AG. */ - continue; + goto next_ag; } } diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index b7c5783a031c..b514db6131ff 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -376,46 +376,36 @@ xfs_reserve_blocks( * If the request is larger than the current reservation, reserve the * blocks before we update the reserve counters. Sample m_fdblocks and * perform a partial reservation if the request exceeds free space. + * + * The code below estimates how many blocks it can request from + * fdblocks to stash in the reserve pool. This is a classic TOCTOU + * race since fdblocks updates are not always coordinated via + * m_sb_lock. Set the reserve size even if there's not enough free + * space to fill it because mod_fdblocks will refill an undersized + * reserve when it can. */ - error = -ENOSPC; - do { - free = percpu_counter_sum(&mp->m_fdblocks) - - mp->m_alloc_set_aside; - if (free <= 0) - break; - - delta = request - mp->m_resblks; - lcounter = free - delta; - if (lcounter < 0) - /* We can't satisfy the request, just get what we can */ - fdblks_delta = free; - else - fdblks_delta = delta; - + free = percpu_counter_sum(&mp->m_fdblocks) - + xfs_fdblocks_unavailable(mp); + delta = request - mp->m_resblks; + mp->m_resblks = request; + if (delta > 0 && free > 0) { /* * We'll either succeed in getting space from the free block - * count or we'll get an ENOSPC. If we get a ENOSPC, it means - * things changed while we were calculating fdblks_delta and so - * we should try again to see if there is anything left to - * reserve. + * count or we'll get an ENOSPC. Don't set the reserved flag + * here - we don't want to reserve the extra reserve blocks + * from the reserve. * - * Don't set the reserved flag here - we don't want to reserve - * the extra reserve blocks from the reserve..... + * The desired reserve size can change after we drop the lock. + * Use mod_fdblocks to put the space into the reserve or into + * fdblocks as appropriate. */ + fdblks_delta = min(free, delta); spin_unlock(&mp->m_sb_lock); error = xfs_mod_fdblocks(mp, -fdblks_delta, 0); + if (!error) + xfs_mod_fdblocks(mp, fdblks_delta, 0); spin_lock(&mp->m_sb_lock); - } while (error == -ENOSPC); - - /* - * Update the reserve counters if blocks have been successfully - * allocated. - */ - if (!error && fdblks_delta) { - mp->m_resblks += fdblks_delta; - mp->m_resblks_avail += fdblks_delta; } - out: if (outval) { outval->resblks = mp->m_resblks; diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index deb99300d171..e69a08ed7de4 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -47,8 +47,9 @@ xfs_inode_alloc( return NULL; } - /* VFS doesn't initialise i_mode! */ + /* VFS doesn't initialise i_mode or i_state! */ VFS_I(ip)->i_mode = 0; + VFS_I(ip)->i_state = 0; XFS_STATS_INC(mp, vn_active); ASSERT(atomic_read(&ip->i_pincount) == 0); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 2bfbcf28b1bd..19008838df76 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -802,6 +802,7 @@ xfs_ialloc( xfs_buf_t **ialloc_context, xfs_inode_t **ipp) { + struct inode *dir = pip ? VFS_I(pip) : NULL; struct xfs_mount *mp = tp->t_mountp; xfs_ino_t ino; xfs_inode_t *ip; @@ -847,18 +848,17 @@ xfs_ialloc( return error; ASSERT(ip != NULL); inode = VFS_I(ip); - inode->i_mode = mode; set_nlink(inode, nlink); - inode->i_uid = current_fsuid(); inode->i_rdev = rdev; ip->i_d.di_projid = prid; - if (pip && XFS_INHERIT_GID(pip)) { - inode->i_gid = VFS_I(pip)->i_gid; - if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode)) - inode->i_mode |= S_ISGID; + if (dir && !(dir->i_mode & S_ISGID) && + (mp->m_flags & XFS_MOUNT_GRPID)) { + inode->i_uid = current_fsuid(); + inode->i_gid = dir->i_gid; + inode->i_mode = mode; } else { - inode->i_gid = current_fsgid(); + inode_init_owner(inode, dir, mode); } /* @@ -2669,14 +2669,13 @@ xfs_ifree_cluster( } /* - * This is called to return an inode to the inode free list. - * The inode should already be truncated to 0 length and have - * no pages associated with it. This routine also assumes that - * the inode is already a part of the transaction. + * This is called to return an inode to the inode free list. The inode should + * already be truncated to 0 length and have no pages associated with it. This + * routine also assumes that the inode is already a part of the transaction. * - * The on-disk copy of the inode will have been added to the list - * of unlinked inodes in the AGI. We need to remove the inode from - * that list atomically with respect to freeing it here. + * The on-disk copy of the inode will have been added to the list of unlinked + * inodes in the AGI. We need to remove the inode from that list atomically with + * respect to freeing it here. */ int xfs_ifree( @@ -2694,13 +2693,16 @@ xfs_ifree( ASSERT(ip->i_d.di_nblocks == 0); /* - * Pull the on-disk inode from the AGI unlinked list. + * Free the inode first so that we guarantee that the AGI lock is going + * to be taken before we remove the inode from the unlinked list. This + * makes the AGI lock -> unlinked list modification order the same as + * used in O_TMPFILE creation. */ - error = xfs_iunlink_remove(tp, ip); + error = xfs_difree(tp, ip->i_ino, &xic); if (error) return error; - error = xfs_difree(tp, ip->i_ino, &xic); + error = xfs_iunlink_remove(tp, ip); if (error) return error; @@ -2754,7 +2756,7 @@ xfs_iunpin( trace_xfs_inode_unpin_nowait(ip, _RET_IP_); /* Give the log a push to start the unpinning I/O */ - xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL); + xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL); } @@ -3152,7 +3154,7 @@ xfs_rename( struct xfs_trans *tp; struct xfs_inode *wip = NULL; /* whiteout inode */ struct xfs_inode *inodes[__XFS_SORT_INODES]; - struct xfs_buf *agibp; + int i; int num_inodes = __XFS_SORT_INODES; bool new_parent = (src_dp != target_dp); bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode); @@ -3170,7 +3172,6 @@ xfs_rename( * appropriately. */ if (flags & RENAME_WHITEOUT) { - ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE))); error = xfs_rename_alloc_whiteout(target_dp, &wip); if (error) return error; @@ -3265,6 +3266,30 @@ xfs_rename( } } + /* + * Lock the AGI buffers we need to handle bumping the nlink of the + * whiteout inode off the unlinked list and to handle dropping the + * nlink of the target inode. Per locking order rules, do this in + * increasing AG order and before directory block allocation tries to + * grab AGFs because we grab AGIs before AGFs. + * + * The (vfs) caller must ensure that if src is a directory then + * target_ip is either null or an empty directory. + */ + for (i = 0; i < num_inodes && inodes[i] != NULL; i++) { + if (inodes[i] == wip || + (inodes[i] == target_ip && + (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) { + struct xfs_buf *bp; + xfs_agnumber_t agno; + + agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino); + error = xfs_read_agi(mp, tp, agno, &bp); + if (error) + goto out_trans_cancel; + } + } + /* * Directory entry creation below may acquire the AGF. Remove * the whiteout from the unlinked list first to preserve correct @@ -3317,22 +3342,6 @@ xfs_rename( * In case there is already an entry with the same * name at the destination directory, remove it first. */ - - /* - * Check whether the replace operation will need to allocate - * blocks. This happens when the shortform directory lacks - * space and we have to convert it to a block format directory. - * When more blocks are necessary, we must lock the AGI first - * to preserve locking order (AGI -> AGF). - */ - if (xfs_dir2_sf_replace_needblock(target_dp, src_ip->i_ino)) { - error = xfs_read_agi(mp, tp, - XFS_INO_TO_AGNO(mp, target_ip->i_ino), - &agibp); - if (error) - goto out_trans_cancel; - } - error = xfs_dir_replace(tp, target_dp, target_name, src_ip->i_ino, spaceres); if (error) @@ -3709,16 +3718,16 @@ int xfs_log_force_inode( struct xfs_inode *ip) { - xfs_lsn_t lsn = 0; + xfs_csn_t seq = 0; xfs_ilock(ip, XFS_ILOCK_SHARED); if (xfs_ipincount(ip)) - lsn = ip->i_itemp->ili_last_lsn; + seq = ip->i_itemp->ili_commit_seq; xfs_iunlock(ip, XFS_ILOCK_SHARED); - if (!lsn) + if (!seq) return 0; - return xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC, NULL); + return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL); } /* diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 17e20a6d8b4e..3aba4559469f 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -28,6 +28,20 @@ static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip) return container_of(lip, struct xfs_inode_log_item, ili_item); } +/* + * The logged size of an inode fork is always the current size of the inode + * fork. This means that when an inode fork is relogged, the size of the logged + * region is determined by the current state, not the combination of the + * previously logged state + the current state. This is different relogging + * behaviour to most other log items which will retain the size of the + * previously logged changes when smaller regions are relogged. + * + * Hence operations that remove data from the inode fork (e.g. shortform + * dir/attr remove, extent form extent removal, etc), the size of the relogged + * inode gets -smaller- rather than stays the same size as the previously logged + * size and this can result in the committing transaction reducing the amount of + * space being consumed by the CIL. + */ STATIC void xfs_inode_item_data_fork_size( struct xfs_inode_log_item *iip, @@ -603,9 +617,9 @@ xfs_inode_item_committed( STATIC void xfs_inode_item_committing( struct xfs_log_item *lip, - xfs_lsn_t commit_lsn) + xfs_csn_t seq) { - INODE_ITEM(lip)->ili_last_lsn = commit_lsn; + INODE_ITEM(lip)->ili_commit_seq = seq; return xfs_inode_item_release(lip); } diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h index 4b926e32831c..403b45ab9aa2 100644 --- a/fs/xfs/xfs_inode_item.h +++ b/fs/xfs/xfs_inode_item.h @@ -33,7 +33,7 @@ struct xfs_inode_log_item { unsigned int ili_fields; /* fields to be logged */ unsigned int ili_fsync_fields; /* logged since last fsync */ xfs_lsn_t ili_flush_lsn; /* lsn at last flush */ - xfs_lsn_t ili_last_lsn; /* lsn at last transaction */ + xfs_csn_t ili_commit_seq; /* last transaction commit */ }; static inline int xfs_inode_clean(struct xfs_inode *ip) diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c index cb44f7653f03..538724f9f85c 100644 --- a/fs/xfs/xfs_inode_item_recover.c +++ b/fs/xfs/xfs_inode_item_recover.c @@ -145,7 +145,8 @@ xfs_log_dinode_to_disk_ts( STATIC void xfs_log_dinode_to_disk( struct xfs_log_dinode *from, - struct xfs_dinode *to) + struct xfs_dinode *to, + xfs_lsn_t lsn) { to->di_magic = cpu_to_be16(from->di_magic); to->di_mode = cpu_to_be16(from->di_mode); @@ -182,7 +183,7 @@ xfs_log_dinode_to_disk( to->di_flags2 = cpu_to_be64(from->di_flags2); to->di_cowextsize = cpu_to_be32(from->di_cowextsize); to->di_ino = cpu_to_be64(from->di_ino); - to->di_lsn = cpu_to_be64(from->di_lsn); + to->di_lsn = cpu_to_be64(lsn); memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2)); uuid_copy(&to->di_uuid, &from->di_uuid); to->di_flushiter = 0; @@ -261,16 +262,25 @@ xlog_recover_inode_commit_pass2( } /* - * If the inode has an LSN in it, recover the inode only if it's less - * than the lsn of the transaction we are replaying. Note: we still - * need to replay an owner change even though the inode is more recent - * than the transaction as there is no guarantee that all the btree - * blocks are more recent than this transaction, too. + * If the inode has an LSN in it, recover the inode only if the on-disk + * inode's LSN is older than the lsn of the transaction we are + * replaying. We can have multiple checkpoints with the same start LSN, + * so the current LSN being equal to the on-disk LSN doesn't necessarily + * mean that the on-disk inode is more recent than the change being + * replayed. + * + * We must check the current_lsn against the on-disk inode + * here because the we can't trust the log dinode to contain a valid LSN + * (see comment below before replaying the log dinode for details). + * + * Note: we still need to replay an owner change even though the inode + * is more recent than the transaction as there is no guarantee that all + * the btree blocks are more recent than this transaction, too. */ if (dip->di_version >= 3) { xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn); - if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { + if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) > 0) { trace_xfs_log_recover_inode_skip(log, in_f); error = 0; goto out_owner_change; @@ -368,8 +378,17 @@ xlog_recover_inode_commit_pass2( goto out_release; } - /* recover the log dinode inode into the on disk inode */ - xfs_log_dinode_to_disk(ldip, dip); + /* + * Recover the log dinode inode into the on disk inode. + * + * The LSN in the log dinode is garbage - it can be zero or reflect + * stale in-memory runtime state that isn't coherent with the changes + * logged in this transaction or the changes written to the on-disk + * inode. Hence we write the current lSN into the inode because that + * matches what xfs_iflush() would write inode the inode when flushing + * the changes in this transaction. + */ + xfs_log_dinode_to_disk(ldip, dip, current_lsn); fields = in_f->ilf_fields; if (fields & XFS_ILOG_DEV) diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 646735aad45d..103fa8381e7d 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -371,7 +371,7 @@ int xfs_ioc_attr_list( struct xfs_inode *dp, void __user *ubuf, - int bufsize, + size_t bufsize, int flags, struct xfs_attrlist_cursor __user *ucursor) { @@ -1689,7 +1689,7 @@ xfs_ioc_getbmap( if (bmx.bmv_count < 2) return -EINVAL; - if (bmx.bmv_count > ULONG_MAX / recsize) + if (bmx.bmv_count >= INT_MAX / recsize) return -ENOMEM; buf = kvzalloc(bmx.bmv_count * sizeof(*buf), GFP_KERNEL); diff --git a/fs/xfs/xfs_ioctl.h b/fs/xfs/xfs_ioctl.h index bab6a5a92407..416e20de66e7 100644 --- a/fs/xfs/xfs_ioctl.h +++ b/fs/xfs/xfs_ioctl.h @@ -38,8 +38,9 @@ xfs_readlink_by_handle( int xfs_ioc_attrmulti_one(struct file *parfilp, struct inode *inode, uint32_t opcode, void __user *uname, void __user *value, uint32_t *len, uint32_t flags); -int xfs_ioc_attr_list(struct xfs_inode *dp, void __user *ubuf, int bufsize, - int flags, struct xfs_attrlist_cursor __user *ucursor); +int xfs_ioc_attr_list(struct xfs_inode *dp, void __user *ubuf, + size_t bufsize, int flags, + struct xfs_attrlist_cursor __user *ucursor); extern struct dentry * xfs_handle_to_dentry( diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 7b9ff824e82d..bd5a25f4952d 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -870,6 +870,9 @@ xfs_buffered_write_iomap_begin( int allocfork = XFS_DATA_FORK; int error = 0; + if (XFS_FORCED_SHUTDOWN(mp)) + return -EIO; + /* we can't use delayed allocations when using extent size hints */ if (xfs_get_extsz_hint(ip)) return xfs_direct_write_iomap_begin(inode, offset, count, @@ -1059,11 +1062,11 @@ found_cow: error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0); if (error) return error; - } else { - xfs_trim_extent(&cmap, offset_fsb, - imap.br_startoff - offset_fsb); + return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED); } - return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED); + + xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb); + return xfs_bmbt_to_iomap(ip, iomap, &cmap, 0); out_unlock: xfs_iunlock(ip, XFS_ILOCK_EXCL); diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index b7f7b31a77d5..6a3026e78a9b 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -1328,7 +1328,7 @@ xfs_setup_inode( gfp_t gfp_mask; inode->i_ino = ip->i_ino; - inode->i_state = I_NEW; + inode->i_state |= I_NEW; inode_sb_list_add(inode); /* make the inode look hashed for the writeback code */ diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c index 2a45138831e3..eae3aff9bc97 100644 --- a/fs/xfs/xfs_iwalk.c +++ b/fs/xfs/xfs_iwalk.c @@ -363,7 +363,7 @@ xfs_iwalk_run_callbacks( /* Delete cursor but remember the last record we cached... */ xfs_iwalk_del_inobt(tp, curpp, agi_bpp, 0); irec = &iwag->recs[iwag->nr_recs - 1]; - ASSERT(next_agino == irec->ir_startino + XFS_INODES_PER_CHUNK); + ASSERT(next_agino >= irec->ir_startino + XFS_INODES_PER_CHUNK); error = xfs_iwalk_ag_recs(iwag); if (error) diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index fa2d05e65ff1..22d7d74231d4 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -347,6 +347,25 @@ xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type) tic->t_res_num++; } +bool +xfs_log_writable( + struct xfs_mount *mp) +{ + /* + * Never write to the log on norecovery mounts, if the block device is + * read-only, or if the filesystem is shutdown. Read-only mounts still + * allow internal writes for log recovery and unmount purposes, so don't + * restrict that case here. + */ + if (mp->m_flags & XFS_MOUNT_NORECOVERY) + return false; + if (xfs_readonly_buftarg(mp->m_log->l_targ)) + return false; + if (XFS_FORCED_SHUTDOWN(mp)) + return false; + return true; +} + /* * Replenish the byte reservation required by moving the grant write head. */ @@ -746,6 +765,9 @@ xfs_log_mount_finish( if (readonly) mp->m_flags |= XFS_MOUNT_RDONLY; + /* Make sure the log is dead if we're returning failure. */ + ASSERT(!error || (mp->m_log->l_flags & XLOG_IO_ERROR)); + return error; } @@ -886,15 +908,8 @@ xfs_log_unmount_write( { struct xlog *log = mp->m_log; - /* - * Don't write out unmount record on norecovery mounts or ro devices. - * Or, if we are doing a forced umount (typically because of IO errors). - */ - if (mp->m_flags & XFS_MOUNT_NORECOVERY || - xfs_readonly_buftarg(log->l_targ)) { - ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); + if (!xfs_log_writable(mp)) return; - } xfs_log_force(mp, XFS_LOG_SYNC); @@ -3198,14 +3213,13 @@ out_error: } static int -__xfs_log_force_lsn( - struct xfs_mount *mp, +xlog_force_lsn( + struct xlog *log, xfs_lsn_t lsn, uint flags, int *log_flushed, bool already_slept) { - struct xlog *log = mp->m_log; struct xlog_in_core *iclog; spin_lock(&log->l_icloglock); @@ -3238,8 +3252,6 @@ __xfs_log_force_lsn( if (!already_slept && (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC || iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) { - XFS_STATS_INC(mp, xs_log_force_sleep); - xlog_wait(&iclog->ic_prev->ic_write_wait, &log->l_icloglock); return -EAGAIN; @@ -3277,25 +3289,29 @@ out_error: * to disk, that thread will wake up all threads waiting on the queue. */ int -xfs_log_force_lsn( +xfs_log_force_seq( struct xfs_mount *mp, - xfs_lsn_t lsn, + xfs_csn_t seq, uint flags, int *log_flushed) { + struct xlog *log = mp->m_log; + xfs_lsn_t lsn; int ret; - ASSERT(lsn != 0); + ASSERT(seq != 0); XFS_STATS_INC(mp, xs_log_force); - trace_xfs_log_force(mp, lsn, _RET_IP_); + trace_xfs_log_force(mp, seq, _RET_IP_); - lsn = xlog_cil_force_lsn(mp->m_log, lsn); + lsn = xlog_cil_force_seq(log, seq); if (lsn == NULLCOMMITLSN) return 0; - ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, false); - if (ret == -EAGAIN) - ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, true); + ret = xlog_force_lsn(log, lsn, flags, log_flushed, false); + if (ret == -EAGAIN) { + XFS_STATS_INC(mp, xs_log_force_sleep); + ret = xlog_force_lsn(log, lsn, flags, log_flushed, true); + } return ret; } diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index 58c3fcbec94a..a1089f8b7169 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h @@ -106,7 +106,7 @@ struct xfs_item_ops; struct xfs_trans; int xfs_log_force(struct xfs_mount *mp, uint flags); -int xfs_log_force_lsn(struct xfs_mount *mp, xfs_lsn_t lsn, uint flags, +int xfs_log_force_seq(struct xfs_mount *mp, xfs_csn_t seq, uint flags, int *log_forced); int xfs_log_mount(struct xfs_mount *mp, struct xfs_buftarg *log_target, @@ -127,12 +127,11 @@ int xfs_log_reserve(struct xfs_mount *mp, int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic); void xfs_log_unmount(struct xfs_mount *mp); int xfs_log_force_umount(struct xfs_mount *mp, int logerror); +bool xfs_log_writable(struct xfs_mount *mp); struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket); void xfs_log_ticket_put(struct xlog_ticket *ticket); -void xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp, - xfs_lsn_t *commit_lsn, bool regrant); void xlog_cil_process_committed(struct list_head *list); bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip); diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index b0ef071b3cb5..fbe160d5e9b9 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -668,9 +668,14 @@ xlog_cil_push_work( ASSERT(push_seq <= ctx->sequence); /* - * Wake up any background push waiters now this context is being pushed. + * As we are about to switch to a new, empty CIL context, we no longer + * need to throttle tasks on CIL space overruns. Wake any waiters that + * the hard push throttle may have caught so they can start committing + * to the new context. The ctx->xc_push_lock provides the serialisation + * necessary for safely using the lockless waitqueue_active() check in + * this context. */ - if (ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) + if (waitqueue_active(&cil->xc_push_wait)) wake_up_all(&cil->xc_push_wait); /* @@ -772,7 +777,7 @@ xlog_cil_push_work( * that higher sequences will wait for us to write out a commit record * before they do. * - * xfs_log_force_lsn requires us to mirror the new sequence into the cil + * xfs_log_force_seq requires us to mirror the new sequence into the cil * structure atomically with the addition of this sequence to the * committing list. This also ensures that we can do unlocked checks * against the current sequence in log forces without risking @@ -907,7 +912,7 @@ xlog_cil_push_background( ASSERT(!list_empty(&cil->xc_cil)); /* - * don't do a background push if we haven't used up all the + * Don't do a background push if we haven't used up all the * space available yet. */ if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) { @@ -931,9 +936,16 @@ xlog_cil_push_background( /* * If we are well over the space limit, throttle the work that is being - * done until the push work on this context has begun. + * done until the push work on this context has begun. Enforce the hard + * throttle on all transaction commits once it has been activated, even + * if the committing transactions have resulted in the space usage + * dipping back down under the hard limit. + * + * The ctx->xc_push_lock provides the serialisation necessary for safely + * using the lockless waitqueue_active() check in this context. */ - if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) { + if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log) || + waitqueue_active(&cil->xc_push_wait)) { trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket); ASSERT(cil->xc_ctx->space_used < log->l_logsize); xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock); @@ -1008,16 +1020,14 @@ xlog_cil_empty( * allowed again. */ void -xfs_log_commit_cil( - struct xfs_mount *mp, +xlog_cil_commit( + struct xlog *log, struct xfs_trans *tp, - xfs_lsn_t *commit_lsn, + xfs_csn_t *commit_seq, bool regrant) { - struct xlog *log = mp->m_log; struct xfs_cil *cil = log->l_cilp; struct xfs_log_item *lip, *next; - xfs_lsn_t xc_commit_lsn; /* * Do all necessary memory allocation before we lock the CIL. @@ -1031,10 +1041,6 @@ xfs_log_commit_cil( xlog_cil_insert_items(log, tp); - xc_commit_lsn = cil->xc_ctx->sequence; - if (commit_lsn) - *commit_lsn = xc_commit_lsn; - if (regrant && !XLOG_FORCED_SHUTDOWN(log)) xfs_log_ticket_regrant(log, tp->t_ticket); else @@ -1057,8 +1063,10 @@ xfs_log_commit_cil( list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { xfs_trans_del_item(lip); if (lip->li_ops->iop_committing) - lip->li_ops->iop_committing(lip, xc_commit_lsn); + lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence); } + if (commit_seq) + *commit_seq = cil->xc_ctx->sequence; /* xlog_cil_push_background() releases cil->xc_ctx_lock */ xlog_cil_push_background(log); @@ -1075,9 +1083,9 @@ xfs_log_commit_cil( * iclog flush is necessary following this call. */ xfs_lsn_t -xlog_cil_force_lsn( +xlog_cil_force_seq( struct xlog *log, - xfs_lsn_t sequence) + xfs_csn_t sequence) { struct xfs_cil *cil = log->l_cilp; struct xfs_cil_ctx *ctx; @@ -1171,23 +1179,19 @@ out_shutdown: */ bool xfs_log_item_in_current_chkpt( - struct xfs_log_item *lip) + struct xfs_log_item *lip) { - struct xfs_cil_ctx *ctx; + struct xfs_cil *cil = lip->li_mountp->m_log->l_cilp; if (list_empty(&lip->li_cil)) return false; - ctx = lip->li_mountp->m_log->l_cilp->xc_ctx; - /* * li_seq is written on the first commit of a log item to record the * first checkpoint it is written to. Hence if it is different to the * current sequence, we're in a new checkpoint. */ - if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0) - return false; - return true; + return lip->li_seq == READ_ONCE(cil->xc_current_sequence); } /* diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 1c6fdbf3d506..42cd1602ac25 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -230,7 +230,7 @@ struct xfs_cil; struct xfs_cil_ctx { struct xfs_cil *cil; - xfs_lsn_t sequence; /* chkpt sequence # */ + xfs_csn_t sequence; /* chkpt sequence # */ xfs_lsn_t start_lsn; /* first LSN of chkpt commit */ xfs_lsn_t commit_lsn; /* chkpt commit record lsn */ struct xlog_ticket *ticket; /* chkpt ticket */ @@ -268,10 +268,10 @@ struct xfs_cil { struct xfs_cil_ctx *xc_ctx; spinlock_t xc_push_lock ____cacheline_aligned_in_smp; - xfs_lsn_t xc_push_seq; + xfs_csn_t xc_push_seq; struct list_head xc_committing; wait_queue_head_t xc_commit_wait; - xfs_lsn_t xc_current_sequence; + xfs_csn_t xc_current_sequence; struct work_struct xc_push_work; wait_queue_head_t xc_push_wait; /* background push throttle */ } ____cacheline_aligned_in_smp; @@ -547,19 +547,18 @@ int xlog_cil_init(struct xlog *log); void xlog_cil_init_post_recovery(struct xlog *log); void xlog_cil_destroy(struct xlog *log); bool xlog_cil_empty(struct xlog *log); +void xlog_cil_commit(struct xlog *log, struct xfs_trans *tp, + xfs_csn_t *commit_seq, bool regrant); /* * CIL force routines */ -xfs_lsn_t -xlog_cil_force_lsn( - struct xlog *log, - xfs_lsn_t sequence); +xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence); static inline void xlog_cil_force(struct xlog *log) { - xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence); + xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence); } /* diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 87886b7f77da..e61f28ce3e44 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2061,7 +2061,9 @@ xlog_recover_add_to_cont_trans( old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; old_len = item->ri_buf[item->ri_cnt-1].i_len; - ptr = krealloc(old_ptr, len + old_len, GFP_KERNEL | __GFP_NOFAIL); + ptr = kvrealloc(old_ptr, old_len, len + old_len, GFP_KERNEL); + if (!ptr) + return -ENOMEM; memcpy(&ptr[old_len], dp, len); item->ri_buf[item->ri_cnt-1].i_len += len; item->ri_buf[item->ri_cnt-1].i_addr = ptr; @@ -2457,8 +2459,10 @@ xlog_finish_defer_ops( error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres, dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp); - if (error) + if (error) { + xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); return error; + } /* * Transfer to this new transaction all the dfops we captured @@ -3454,6 +3458,7 @@ xlog_recover_finish( * this) before we get around to xfs_log_mount_cancel. */ xlog_recover_cancel_intents(log); + xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); xfs_alert(log->l_mp, "Failed to recover intents"); return error; } diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 7110507a2b6b..a2a5a0fd9233 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -631,6 +631,47 @@ xfs_check_summary_counts( return xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount); } +/* + * Flush and reclaim dirty inodes in preparation for unmount. Inodes and + * internal inode structures can be sitting in the CIL and AIL at this point, + * so we need to unpin them, write them back and/or reclaim them before unmount + * can proceed. + * + * An inode cluster that has been freed can have its buffer still pinned in + * memory because the transaction is still sitting in a iclog. The stale inodes + * on that buffer will be pinned to the buffer until the transaction hits the + * disk and the callbacks run. Pushing the AIL will skip the stale inodes and + * may never see the pinned buffer, so nothing will push out the iclog and + * unpin the buffer. + * + * Hence we need to force the log to unpin everything first. However, log + * forces don't wait for the discards they issue to complete, so we have to + * explicitly wait for them to complete here as well. + * + * Then we can tell the world we are unmounting so that error handling knows + * that the filesystem is going away and we should error out anything that we + * have been retrying in the background. This will prevent never-ending + * retries in AIL pushing from hanging the unmount. + * + * Finally, we can push the AIL to clean all the remaining dirty objects, then + * reclaim the remaining inodes that are still in memory at this point in time. + */ +static void +xfs_unmount_flush_inodes( + struct xfs_mount *mp) +{ + xfs_log_force(mp, XFS_LOG_SYNC); + xfs_extent_busy_wait_all(mp); + flush_workqueue(xfs_discard_wq); + + mp->m_flags |= XFS_MOUNT_UNMOUNTING; + + xfs_ail_push_all_sync(mp->m_ail); + cancel_delayed_work_sync(&mp->m_reclaim_work); + xfs_reclaim_inodes(mp); + xfs_health_unmount(mp); +} + /* * This function does the following on an initial mount of a file system: * - reads the superblock from disk and init the mount struct @@ -927,9 +968,17 @@ xfs_mountfs( /* * Finish recovering the file system. This part needed to be delayed * until after the root and real-time bitmap inodes were consistently - * read in. + * read in. Temporarily create per-AG space reservations for metadata + * btree shape changes because space freeing transactions (for inode + * inactivation) require the per-AG reservation in lieu of reserving + * blocks. */ + error = xfs_fs_reserve_ag_blocks(mp); + if (error && error == -ENOSPC) + xfs_warn(mp, + "ENOSPC reserving per-AG metadata pool, log recovery may fail."); error = xfs_log_mount_finish(mp); + xfs_fs_unreserve_ag_blocks(mp); if (error) { xfs_warn(mp, "log mount finish failed"); goto out_rtunmount; @@ -1005,7 +1054,7 @@ xfs_mountfs( /* Clean out dquots that might be in memory after quotacheck. */ xfs_qm_unmount(mp); /* - * Cancel all delayed reclaim work and reclaim the inodes directly. + * Flush all inode reclamation work and flush the log. * We have to do this /after/ rtunmount and qm_unmount because those * two will have scheduled delayed reclaim for the rt/quota inodes. * @@ -1015,11 +1064,8 @@ xfs_mountfs( * qm_unmount_quotas and therefore rely on qm_unmount to release the * quota inodes. */ - cancel_delayed_work_sync(&mp->m_reclaim_work); - xfs_reclaim_inodes(mp); - xfs_health_unmount(mp); + xfs_unmount_flush_inodes(mp); out_log_dealloc: - mp->m_flags |= XFS_MOUNT_UNMOUNTING; xfs_log_mount_cancel(mp); out_fail_wait: if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) @@ -1060,47 +1106,7 @@ xfs_unmountfs( xfs_rtunmount_inodes(mp); xfs_irele(mp->m_rootip); - /* - * We can potentially deadlock here if we have an inode cluster - * that has been freed has its buffer still pinned in memory because - * the transaction is still sitting in a iclog. The stale inodes - * on that buffer will be pinned to the buffer until the - * transaction hits the disk and the callbacks run. Pushing the AIL will - * skip the stale inodes and may never see the pinned buffer, so - * nothing will push out the iclog and unpin the buffer. Hence we - * need to force the log here to ensure all items are flushed into the - * AIL before we go any further. - */ - xfs_log_force(mp, XFS_LOG_SYNC); - - /* - * Wait for all busy extents to be freed, including completion of - * any discard operation. - */ - xfs_extent_busy_wait_all(mp); - flush_workqueue(xfs_discard_wq); - - /* - * We now need to tell the world we are unmounting. This will allow - * us to detect that the filesystem is going away and we should error - * out anything that we have been retrying in the background. This will - * prevent neverending retries in AIL pushing from hanging the unmount. - */ - mp->m_flags |= XFS_MOUNT_UNMOUNTING; - - /* - * Flush all pending changes from the AIL. - */ - xfs_ail_push_all_sync(mp->m_ail); - - /* - * Reclaim all inodes. At this point there should be no dirty inodes and - * none should be pinned or locked. Stop background inode reclaim here - * if it is still running. - */ - cancel_delayed_work_sync(&mp->m_reclaim_work); - xfs_reclaim_inodes(mp); - xfs_health_unmount(mp); + xfs_unmount_flush_inodes(mp); xfs_qm_unmount(mp); @@ -1176,8 +1182,7 @@ xfs_fs_writable( int xfs_log_sbcount(xfs_mount_t *mp) { - /* allow this to proceed during the freeze sequence... */ - if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE)) + if (!xfs_log_writable(mp)) return 0; /* diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index dfa429b77ee2..3a6bc9dc11b5 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -406,6 +406,14 @@ extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount, xfs_agnumber_t *maxagi); extern void xfs_unmountfs(xfs_mount_t *); +/* Accessor added for 5.10.y backport */ +static inline uint64_t +xfs_fdblocks_unavailable( + struct xfs_mount *mp) +{ + return mp->m_alloc_set_aside; +} + extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta, bool reserved); extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta); diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index b2a9abee8b2b..64e5da33733b 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -1785,6 +1785,29 @@ xfs_qm_vop_chown( xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks); xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1); + /* + * Back when we made quota reservations for the chown, we reserved the + * ondisk blocks + delalloc blocks with the new dquot. Now that we've + * switched the dquots, decrease the new dquot's block reservation + * (having already bumped up the real counter) so that we don't have + * any reservation to give back when we commit. + */ + xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS, + -ip->i_delayed_blks); + + /* + * Give the incore reservation for delalloc blocks back to the old + * dquot. We don't normally handle delalloc quota reservations + * transactionally, so just lock the dquot and subtract from the + * reservation. Dirty the transaction because it's too late to turn + * back now. + */ + tp->t_flags |= XFS_TRANS_DIRTY; + xfs_dqlock(prevdq); + ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks); + prevdq->q_blk.reserved -= ip->i_delayed_blks; + xfs_dqunlock(prevdq); + /* * Take an extra reference, because the inode is going to keep * this dquot pointer even after the trans_commit. @@ -1807,84 +1830,39 @@ xfs_qm_vop_chown_reserve( uint flags) { struct xfs_mount *mp = ip->i_mount; - uint64_t delblks; unsigned int blkflags; - struct xfs_dquot *udq_unres = NULL; - struct xfs_dquot *gdq_unres = NULL; - struct xfs_dquot *pdq_unres = NULL; struct xfs_dquot *udq_delblks = NULL; struct xfs_dquot *gdq_delblks = NULL; struct xfs_dquot *pdq_delblks = NULL; - int error; - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); ASSERT(XFS_IS_QUOTA_RUNNING(mp)); - delblks = ip->i_delayed_blks; blkflags = XFS_IS_REALTIME_INODE(ip) ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; if (XFS_IS_UQUOTA_ON(mp) && udqp && - i_uid_read(VFS_I(ip)) != udqp->q_id) { + i_uid_read(VFS_I(ip)) != udqp->q_id) udq_delblks = udqp; - /* - * If there are delayed allocation blocks, then we have to - * unreserve those from the old dquot, and add them to the - * new dquot. - */ - if (delblks) { - ASSERT(ip->i_udquot); - udq_unres = ip->i_udquot; - } - } + if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp && - i_gid_read(VFS_I(ip)) != gdqp->q_id) { + i_gid_read(VFS_I(ip)) != gdqp->q_id) gdq_delblks = gdqp; - if (delblks) { - ASSERT(ip->i_gdquot); - gdq_unres = ip->i_gdquot; - } - } if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp && - ip->i_d.di_projid != pdqp->q_id) { + ip->i_d.di_projid != pdqp->q_id) pdq_delblks = pdqp; - if (delblks) { - ASSERT(ip->i_pdquot); - pdq_unres = ip->i_pdquot; - } - } - - error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, - udq_delblks, gdq_delblks, pdq_delblks, - ip->i_d.di_nblocks, 1, flags | blkflags); - if (error) - return error; /* - * Do the delayed blks reservations/unreservations now. Since, these - * are done without the help of a transaction, if a reservation fails - * its previous reservations won't be automatically undone by trans - * code. So, we have to do it manually here. + * Reserve enough quota to handle blocks on disk and reserved for a + * delayed allocation. We'll actually transfer the delalloc + * reservation between dquots at chown time, even though that part is + * only semi-transactional. */ - if (delblks) { - /* - * Do the reservations first. Unreservation can't fail. - */ - ASSERT(udq_delblks || gdq_delblks || pdq_delblks); - ASSERT(udq_unres || gdq_unres || pdq_unres); - error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, - udq_delblks, gdq_delblks, pdq_delblks, - (xfs_qcnt_t)delblks, 0, flags | blkflags); - if (error) - return error; - xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, - udq_unres, gdq_unres, pdq_unres, - -((xfs_qcnt_t)delblks), 0, blkflags); - } - - return 0; + return xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, udq_delblks, + gdq_delblks, pdq_delblks, + ip->i_d.di_nblocks + ip->i_delayed_blks, + 1, blkflags | flags); } int diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 6fa05fb78189..aa46b75d75af 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -1503,7 +1503,8 @@ xfs_reflink_unshare( if (error) goto out; - error = filemap_write_and_wait_range(inode->i_mapping, offset, len); + error = filemap_write_and_wait_range(inode->i_mapping, offset, + offset + len - 1); if (error) goto out; diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 6a7724a3560a..a21abc472d5b 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -199,10 +199,12 @@ xfs_fs_show_options( seq_printf(m, ",swidth=%d", (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); - if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD)) - seq_puts(m, ",usrquota"); - else if (mp->m_qflags & XFS_UQUOTA_ACCT) - seq_puts(m, ",uqnoenforce"); + if (mp->m_qflags & XFS_UQUOTA_ACCT) { + if (mp->m_qflags & XFS_UQUOTA_ENFD) + seq_puts(m, ",usrquota"); + else + seq_puts(m, ",uqnoenforce"); + } if (mp->m_qflags & XFS_PQUOTA_ACCT) { if (mp->m_qflags & XFS_PQUOTA_ENFD) @@ -755,6 +757,7 @@ xfs_fs_sync_fs( int wait) { struct xfs_mount *mp = XFS_M(sb); + int error; /* * Doing anything during the async pass would be counterproductive. @@ -762,7 +765,10 @@ xfs_fs_sync_fs( if (!wait) return 0; - xfs_log_force(mp, XFS_LOG_SYNC); + error = xfs_log_force(mp, XFS_LOG_SYNC); + if (error) + return error; + if (laptop_mode) { /* * The disk must be active because we're syncing. @@ -1153,6 +1159,22 @@ suffix_kstrtoint( return ret; } +static inline void +xfs_fs_warn_deprecated( + struct fs_context *fc, + struct fs_parameter *param, + uint64_t flag, + bool value) +{ + /* Don't print the warning if reconfiguring and current mount point + * already had the flag set + */ + if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && + !!(XFS_M(fc->root->d_sb)->m_flags & flag) == value) + return; + xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); +} + /* * Set mount state from a mount option. * @@ -1163,7 +1185,7 @@ xfs_fc_parse_param( struct fs_context *fc, struct fs_parameter *param) { - struct xfs_mount *mp = fc->s_fs_info; + struct xfs_mount *parsing_mp = fc->s_fs_info; struct fs_parse_result result; int size = 0; int opt; @@ -1174,142 +1196,142 @@ xfs_fc_parse_param( switch (opt) { case Opt_logbufs: - mp->m_logbufs = result.uint_32; + parsing_mp->m_logbufs = result.uint_32; return 0; case Opt_logbsize: - if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize)) + if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize)) return -EINVAL; return 0; case Opt_logdev: - kfree(mp->m_logname); - mp->m_logname = kstrdup(param->string, GFP_KERNEL); - if (!mp->m_logname) + kfree(parsing_mp->m_logname); + parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL); + if (!parsing_mp->m_logname) return -ENOMEM; return 0; case Opt_rtdev: - kfree(mp->m_rtname); - mp->m_rtname = kstrdup(param->string, GFP_KERNEL); - if (!mp->m_rtname) + kfree(parsing_mp->m_rtname); + parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL); + if (!parsing_mp->m_rtname) return -ENOMEM; return 0; case Opt_allocsize: if (suffix_kstrtoint(param->string, 10, &size)) return -EINVAL; - mp->m_allocsize_log = ffs(size) - 1; - mp->m_flags |= XFS_MOUNT_ALLOCSIZE; + parsing_mp->m_allocsize_log = ffs(size) - 1; + parsing_mp->m_flags |= XFS_MOUNT_ALLOCSIZE; return 0; case Opt_grpid: case Opt_bsdgroups: - mp->m_flags |= XFS_MOUNT_GRPID; + parsing_mp->m_flags |= XFS_MOUNT_GRPID; return 0; case Opt_nogrpid: case Opt_sysvgroups: - mp->m_flags &= ~XFS_MOUNT_GRPID; + parsing_mp->m_flags &= ~XFS_MOUNT_GRPID; return 0; case Opt_wsync: - mp->m_flags |= XFS_MOUNT_WSYNC; + parsing_mp->m_flags |= XFS_MOUNT_WSYNC; return 0; case Opt_norecovery: - mp->m_flags |= XFS_MOUNT_NORECOVERY; + parsing_mp->m_flags |= XFS_MOUNT_NORECOVERY; return 0; case Opt_noalign: - mp->m_flags |= XFS_MOUNT_NOALIGN; + parsing_mp->m_flags |= XFS_MOUNT_NOALIGN; return 0; case Opt_swalloc: - mp->m_flags |= XFS_MOUNT_SWALLOC; + parsing_mp->m_flags |= XFS_MOUNT_SWALLOC; return 0; case Opt_sunit: - mp->m_dalign = result.uint_32; + parsing_mp->m_dalign = result.uint_32; return 0; case Opt_swidth: - mp->m_swidth = result.uint_32; + parsing_mp->m_swidth = result.uint_32; return 0; case Opt_inode32: - mp->m_flags |= XFS_MOUNT_SMALL_INUMS; + parsing_mp->m_flags |= XFS_MOUNT_SMALL_INUMS; return 0; case Opt_inode64: - mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; + parsing_mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; return 0; case Opt_nouuid: - mp->m_flags |= XFS_MOUNT_NOUUID; + parsing_mp->m_flags |= XFS_MOUNT_NOUUID; return 0; case Opt_largeio: - mp->m_flags |= XFS_MOUNT_LARGEIO; + parsing_mp->m_flags |= XFS_MOUNT_LARGEIO; return 0; case Opt_nolargeio: - mp->m_flags &= ~XFS_MOUNT_LARGEIO; + parsing_mp->m_flags &= ~XFS_MOUNT_LARGEIO; return 0; case Opt_filestreams: - mp->m_flags |= XFS_MOUNT_FILESTREAMS; + parsing_mp->m_flags |= XFS_MOUNT_FILESTREAMS; return 0; case Opt_noquota: - mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; - mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; - mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE; + parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; + parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; + parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE; return 0; case Opt_quota: case Opt_uquota: case Opt_usrquota: - mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | + parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | XFS_UQUOTA_ENFD); return 0; case Opt_qnoenforce: case Opt_uqnoenforce: - mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); - mp->m_qflags &= ~XFS_UQUOTA_ENFD; + parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); + parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD; return 0; case Opt_pquota: case Opt_prjquota: - mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | + parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | XFS_PQUOTA_ENFD); return 0; case Opt_pqnoenforce: - mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); - mp->m_qflags &= ~XFS_PQUOTA_ENFD; + parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); + parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD; return 0; case Opt_gquota: case Opt_grpquota: - mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | + parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | XFS_GQUOTA_ENFD); return 0; case Opt_gqnoenforce: - mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); - mp->m_qflags &= ~XFS_GQUOTA_ENFD; + parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); + parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD; return 0; case Opt_discard: - mp->m_flags |= XFS_MOUNT_DISCARD; + parsing_mp->m_flags |= XFS_MOUNT_DISCARD; return 0; case Opt_nodiscard: - mp->m_flags &= ~XFS_MOUNT_DISCARD; + parsing_mp->m_flags &= ~XFS_MOUNT_DISCARD; return 0; #ifdef CONFIG_FS_DAX case Opt_dax: - xfs_mount_set_dax_mode(mp, XFS_DAX_ALWAYS); + xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS); return 0; case Opt_dax_enum: - xfs_mount_set_dax_mode(mp, result.uint_32); + xfs_mount_set_dax_mode(parsing_mp, result.uint_32); return 0; #endif /* Following mount options will be removed in September 2025 */ case Opt_ikeep: - xfs_warn(mp, "%s mount option is deprecated.", param->key); - mp->m_flags |= XFS_MOUNT_IKEEP; + xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, true); + parsing_mp->m_flags |= XFS_MOUNT_IKEEP; return 0; case Opt_noikeep: - xfs_warn(mp, "%s mount option is deprecated.", param->key); - mp->m_flags &= ~XFS_MOUNT_IKEEP; + xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, false); + parsing_mp->m_flags &= ~XFS_MOUNT_IKEEP; return 0; case Opt_attr2: - xfs_warn(mp, "%s mount option is deprecated.", param->key); - mp->m_flags |= XFS_MOUNT_ATTR2; + xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_ATTR2, true); + parsing_mp->m_flags |= XFS_MOUNT_ATTR2; return 0; case Opt_noattr2: - xfs_warn(mp, "%s mount option is deprecated.", param->key); - mp->m_flags &= ~XFS_MOUNT_ATTR2; - mp->m_flags |= XFS_MOUNT_NOATTR2; + xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_NOATTR2, true); + parsing_mp->m_flags &= ~XFS_MOUNT_ATTR2; + parsing_mp->m_flags |= XFS_MOUNT_NOATTR2; return 0; default: - xfs_warn(mp, "unknown mount option [%s].", param->key); + xfs_warn(parsing_mp, "unknown mount option [%s].", param->key); return -EINVAL; } @@ -1693,7 +1715,15 @@ static int xfs_remount_ro( struct xfs_mount *mp) { - int error; + struct xfs_eofblocks eofb = { + .eof_flags = XFS_EOF_FLAGS_SYNC, + }; + int error; + + /* Flush all the dirty data to disk. */ + error = sync_filesystem(mp->m_super); + if (error) + return error; /* * Cancel background eofb scanning so it cannot race with the final @@ -1701,8 +1731,13 @@ xfs_remount_ro( */ xfs_stop_block_reaping(mp); - /* Get rid of any leftover CoW reservations... */ - error = xfs_icache_free_cowblocks(mp, NULL); + /* + * Clear out all remaining COW staging extents and speculative post-EOF + * preallocations so that we don't leave inodes requiring inactivation + * cleanups during reclaim on a read-only mount. We must process every + * cached inode, so this requires a synchronous cache scan. + */ + error = xfs_icache_free_cowblocks(mp, &eofb); if (error) { xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); return error; @@ -1760,8 +1795,6 @@ xfs_fc_reconfigure( if (error) return error; - sync_filesystem(mp->m_super); - /* inode32 -> inode64 */ if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) { @@ -1908,7 +1941,7 @@ xfs_init_zones(void) if (!xfs_ifork_zone) goto out_destroy_da_state_zone; - xfs_trans_zone = kmem_cache_create("xf_trans", + xfs_trans_zone = kmem_cache_create("xfs_trans", sizeof(struct xfs_trans), 0, 0, NULL); if (!xfs_trans_zone) diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c index 8e88a7ca387e..8d3abf06c54f 100644 --- a/fs/xfs/xfs_symlink.c +++ b/fs/xfs/xfs_symlink.c @@ -300,6 +300,7 @@ xfs_symlink( } ASSERT(pathlen == 0); } + i_size_write(VFS_I(ip), ip->i_d.di_size); /* * Create the directory entry for the symlink. diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index c94e71f741b6..73a1de7ceefc 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -68,6 +68,7 @@ xfs_trans_free( xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false); trace_xfs_trans_free(tp, _RET_IP_); + xfs_trans_clear_context(tp); if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT)) sb_end_intwrite(tp->t_mountp->m_super); xfs_trans_free_dqinfo(tp); @@ -119,7 +120,8 @@ xfs_trans_dup( ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; tp->t_rtx_res = tp->t_rtx_res_used; - ntp->t_pflags = tp->t_pflags; + + xfs_trans_switch_context(tp, ntp); /* move deferred ops over to the new tp */ xfs_defer_move(ntp, tp); @@ -153,9 +155,6 @@ xfs_trans_reserve( int error = 0; bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; - /* Mark this thread as being in a transaction */ - current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); - /* * Attempt to reserve the needed disk blocks by decrementing * the number needed from the number available. This will @@ -163,10 +162,8 @@ xfs_trans_reserve( */ if (blocks > 0) { error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd); - if (error != 0) { - current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); + if (error != 0) return -ENOSPC; - } tp->t_blk_res += blocks; } @@ -240,9 +237,6 @@ undo_blocks: xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd); tp->t_blk_res = 0; } - - current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); - return error; } @@ -266,6 +260,7 @@ xfs_trans_alloc( tp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL); if (!(flags & XFS_TRANS_NO_WRITECOUNT)) sb_start_intwrite(mp->m_super); + xfs_trans_set_context(tp); /* * Zero-reservation ("empty") transactions can't modify anything, so @@ -620,6 +615,9 @@ xfs_trans_unreserve_and_mod_sb( /* apply remaining deltas */ spin_lock(&mp->m_sb_lock); + mp->m_sb.sb_fdblocks += tp->t_fdblocks_delta + tp->t_res_fdblocks_delta; + mp->m_sb.sb_icount += idelta; + mp->m_sb.sb_ifree += ifreedelta; mp->m_sb.sb_frextents += rtxdelta; mp->m_sb.sb_dblocks += tp->t_dblocks_delta; mp->m_sb.sb_agcount += tp->t_agcount_delta; @@ -834,7 +832,7 @@ __xfs_trans_commit( bool regrant) { struct xfs_mount *mp = tp->t_mountp; - xfs_lsn_t commit_lsn = -1; + xfs_csn_t commit_seq = 0; int error = 0; int sync = tp->t_flags & XFS_TRANS_SYNC; @@ -876,9 +874,8 @@ __xfs_trans_commit( xfs_trans_apply_sb_deltas(tp); xfs_trans_apply_dquot_deltas(tp); - xfs_log_commit_cil(mp, tp, &commit_lsn, regrant); + xlog_cil_commit(mp->m_log, tp, &commit_seq, regrant); - current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); xfs_trans_free(tp); /* @@ -886,7 +883,7 @@ __xfs_trans_commit( * log out now and wait for it. */ if (sync) { - error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL); + error = xfs_log_force_seq(mp, commit_seq, XFS_LOG_SYNC, NULL); XFS_STATS_INC(mp, xs_trans_sync); } else { XFS_STATS_INC(mp, xs_trans_async); @@ -910,7 +907,6 @@ out_unreserve: xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket); tp->t_ticket = NULL; } - current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); xfs_trans_free_items(tp, !!error); xfs_trans_free(tp); @@ -970,9 +966,6 @@ xfs_trans_cancel( tp->t_ticket = NULL; } - /* mark this thread as no longer being in a transaction */ - current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); - xfs_trans_free_items(tp, dirty); xfs_trans_free(tp); } diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 084658946cc8..97485559008b 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -43,7 +43,7 @@ struct xfs_log_item { struct list_head li_cil; /* CIL pointers */ struct xfs_log_vec *li_lv; /* active log vector */ struct xfs_log_vec *li_lv_shadow; /* standby vector */ - xfs_lsn_t li_seq; /* CIL commit seq */ + xfs_csn_t li_seq; /* CIL commit seq */ }; /* @@ -69,7 +69,7 @@ struct xfs_item_ops { void (*iop_pin)(struct xfs_log_item *); void (*iop_unpin)(struct xfs_log_item *, int remove); uint (*iop_push)(struct xfs_log_item *, struct list_head *); - void (*iop_committing)(struct xfs_log_item *, xfs_lsn_t commit_lsn); + void (*iop_committing)(struct xfs_log_item *lip, xfs_csn_t seq); void (*iop_release)(struct xfs_log_item *); xfs_lsn_t (*iop_committed)(struct xfs_log_item *, xfs_lsn_t); int (*iop_recover)(struct xfs_log_item *lip, @@ -268,4 +268,34 @@ xfs_trans_item_relog( return lip->li_ops->iop_relog(lip, tp); } +static inline void +xfs_trans_set_context( + struct xfs_trans *tp) +{ + ASSERT(current->journal_info == NULL); + tp->t_pflags = memalloc_nofs_save(); + current->journal_info = tp; +} + +static inline void +xfs_trans_clear_context( + struct xfs_trans *tp) +{ + if (current->journal_info == tp) { + memalloc_nofs_restore(tp->t_pflags); + current->journal_info = NULL; + } +} + +static inline void +xfs_trans_switch_context( + struct xfs_trans *old_tp, + struct xfs_trans *new_tp) +{ + ASSERT(current->journal_info == old_tp); + new_tp->t_pflags = old_tp->t_pflags; + old_tp->t_pflags = 0; + current->journal_info = new_tp; +} + #endif /* __XFS_TRANS_H__ */ diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index fe45b0c3970c..288ea38c43ad 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c @@ -615,7 +615,6 @@ xfs_dqresv_check( return QUOTA_NL_ISOFTLONGWARN; } - res->warnings++; return QUOTA_NL_ISOFTWARN; } diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index 0ed752c6ef25..57ab34b1a9d4 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -32,6 +32,17 @@ static inline int zonefs_zone_mgmt(struct inode *inode, lockdep_assert_held(&zi->i_truncate_mutex); + /* + * With ZNS drives, closing an explicitly open zone that has not been + * written will change the zone state to "closed", that is, the zone + * will remain active. Since this can then cause failure of explicit + * open operation on other zones if the drive active zone resources + * are exceeded, make sure that the zone does not remain active by + * resetting it. + */ + if (op == REQ_OP_ZONE_CLOSE && !zi->i_wpoffset) + op = REQ_OP_ZONE_RESET; + ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector, zi->i_zone_size >> SECTOR_SHIFT, GFP_NOFS); if (ret) { @@ -57,15 +68,49 @@ static inline void zonefs_i_size_write(struct inode *inode, loff_t isize) zi->i_flags &= ~ZONEFS_ZONE_OPEN; } -static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, - unsigned int flags, struct iomap *iomap, - struct iomap *srcmap) +static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset, + loff_t length, unsigned int flags, + struct iomap *iomap, struct iomap *srcmap) { struct zonefs_inode_info *zi = ZONEFS_I(inode); struct super_block *sb = inode->i_sb; loff_t isize; - /* All I/Os should always be within the file maximum size */ + /* + * All blocks are always mapped below EOF. If reading past EOF, + * act as if there is a hole up to the file maximum size. + */ + mutex_lock(&zi->i_truncate_mutex); + iomap->bdev = inode->i_sb->s_bdev; + iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); + isize = i_size_read(inode); + if (iomap->offset >= isize) { + iomap->type = IOMAP_HOLE; + iomap->addr = IOMAP_NULL_ADDR; + iomap->length = length; + } else { + iomap->type = IOMAP_MAPPED; + iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset; + iomap->length = isize - iomap->offset; + } + mutex_unlock(&zi->i_truncate_mutex); + + return 0; +} + +static const struct iomap_ops zonefs_read_iomap_ops = { + .iomap_begin = zonefs_read_iomap_begin, +}; + +static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset, + loff_t length, unsigned int flags, + struct iomap *iomap, struct iomap *srcmap) +{ + struct zonefs_inode_info *zi = ZONEFS_I(inode); + struct super_block *sb = inode->i_sb; + loff_t isize; + + /* All write I/Os should always be within the file maximum size */ if (WARN_ON_ONCE(offset + length > zi->i_max_size)) return -EIO; @@ -75,7 +120,7 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, * operation. */ if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ && - (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT))) + !(flags & IOMAP_DIRECT))) return -EIO; /* @@ -84,45 +129,42 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, * write pointer) and unwriten beyond. */ mutex_lock(&zi->i_truncate_mutex); - isize = i_size_read(inode); - if (offset >= isize) - iomap->type = IOMAP_UNWRITTEN; - else - iomap->type = IOMAP_MAPPED; - if (flags & IOMAP_WRITE) - length = zi->i_max_size - offset; - else - length = min(length, isize - offset); - mutex_unlock(&zi->i_truncate_mutex); - - iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); - iomap->length = ALIGN(offset + length, sb->s_blocksize) - iomap->offset; iomap->bdev = inode->i_sb->s_bdev; + iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset; + isize = i_size_read(inode); + if (iomap->offset >= isize) { + iomap->type = IOMAP_UNWRITTEN; + iomap->length = zi->i_max_size - iomap->offset; + } else { + iomap->type = IOMAP_MAPPED; + iomap->length = isize - iomap->offset; + } + mutex_unlock(&zi->i_truncate_mutex); return 0; } -static const struct iomap_ops zonefs_iomap_ops = { - .iomap_begin = zonefs_iomap_begin, +static const struct iomap_ops zonefs_write_iomap_ops = { + .iomap_begin = zonefs_write_iomap_begin, }; static int zonefs_readpage(struct file *unused, struct page *page) { - return iomap_readpage(page, &zonefs_iomap_ops); + return iomap_readpage(page, &zonefs_read_iomap_ops); } static void zonefs_readahead(struct readahead_control *rac) { - iomap_readahead(rac, &zonefs_iomap_ops); + iomap_readahead(rac, &zonefs_read_iomap_ops); } /* * Map blocks for page writeback. This is used only on conventional zone files, * which implies that the page range can only be within the fixed inode size. */ -static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc, - struct inode *inode, loff_t offset) +static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc, + struct inode *inode, loff_t offset) { struct zonefs_inode_info *zi = ZONEFS_I(inode); @@ -136,12 +178,12 @@ static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc, offset < wpc->iomap.offset + wpc->iomap.length) return 0; - return zonefs_iomap_begin(inode, offset, zi->i_max_size - offset, - IOMAP_WRITE, &wpc->iomap, NULL); + return zonefs_write_iomap_begin(inode, offset, zi->i_max_size - offset, + IOMAP_WRITE, &wpc->iomap, NULL); } static const struct iomap_writeback_ops zonefs_writeback_ops = { - .map_blocks = zonefs_map_blocks, + .map_blocks = zonefs_write_map_blocks, }; static int zonefs_writepage(struct page *page, struct writeback_control *wbc) @@ -171,7 +213,8 @@ static int zonefs_swap_activate(struct swap_info_struct *sis, return -EINVAL; } - return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops); + return iomap_swapfile_activate(sis, swap_file, span, + &zonefs_read_iomap_ops); } static const struct address_space_operations zonefs_file_aops = { @@ -397,14 +440,22 @@ static void __zonefs_io_error(struct inode *inode, bool write) struct super_block *sb = inode->i_sb; struct zonefs_sb_info *sbi = ZONEFS_SB(sb); unsigned int noio_flag; - unsigned int nr_zones = - zi->i_zone_size >> (sbi->s_zone_sectors_shift + SECTOR_SHIFT); + unsigned int nr_zones = 1; struct zonefs_ioerr_data err = { .inode = inode, .write = write, }; int ret; + /* + * The only files that have more than one zone are conventional zone + * files with aggregated conventional zones, for which the inode zone + * size is always larger than the device zone size. + */ + if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev)) + nr_zones = zi->i_zone_size >> + (sbi->s_zone_sectors_shift + SECTOR_SHIFT); + /* * Memory allocations in blkdev_report_zones() can trigger a memory * reclaim which may in turn cause a recursion into zonefs as well as @@ -601,7 +652,7 @@ static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf) /* Serialize against truncates */ down_read(&zi->i_mmap_sem); - ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops); + ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops); up_read(&zi->i_mmap_sem); sb_end_pagefault(inode->i_sb); @@ -858,7 +909,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) if (append) ret = zonefs_file_dio_append(iocb, from); else - ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops, + ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops, &zonefs_write_dio_ops, sync); if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && (ret > 0 || ret == -EIOCBQUEUED)) { @@ -900,7 +951,7 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb, if (ret <= 0) goto inode_unlock; - ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops); + ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops); if (ret > 0) iocb->ki_pos += ret; else if (ret == -EIO) @@ -993,7 +1044,7 @@ static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) goto inode_unlock; } file_accessed(iocb->ki_filp); - ret = iomap_dio_rw(iocb, to, &zonefs_iomap_ops, + ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops, &zonefs_read_dio_ops, is_sync_kiocb(iocb)); } else { ret = generic_file_read_iter(iocb, to); @@ -1152,6 +1203,7 @@ static struct inode *zonefs_alloc_inode(struct super_block *sb) mutex_init(&zi->i_truncate_mutex); init_rwsem(&zi->i_mmap_sem); zi->i_wr_refcnt = 0; + zi->i_flags = 0; return &zi->i_vnode; } @@ -1306,12 +1358,13 @@ static void zonefs_init_dir_inode(struct inode *parent, struct inode *inode, inc_nlink(parent); } -static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone, - enum zonefs_ztype type) +static int zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone, + enum zonefs_ztype type) { struct super_block *sb = inode->i_sb; struct zonefs_sb_info *sbi = ZONEFS_SB(sb); struct zonefs_inode_info *zi = ZONEFS_I(inode); + int ret = 0; inode->i_ino = zone->start >> sbi->s_zone_sectors_shift; inode->i_mode = S_IFREG | sbi->s_perm; @@ -1319,6 +1372,14 @@ static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone, zi->i_ztype = type; zi->i_zsector = zone->start; zi->i_zone_size = zone->len << SECTOR_SHIFT; + if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT && + !(sbi->s_features & ZONEFS_F_AGGRCNV)) { + zonefs_err(sb, + "zone size %llu doesn't match device's zone sectors %llu\n", + zi->i_zone_size, + bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT); + return -EINVAL; + } zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE, zone->capacity << SECTOR_SHIFT); @@ -1336,6 +1397,22 @@ static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone, sb->s_maxbytes = max(zi->i_max_size, sb->s_maxbytes); sbi->s_blocks += zi->i_max_size >> sb->s_blocksize_bits; sbi->s_used_blocks += zi->i_wpoffset >> sb->s_blocksize_bits; + + /* + * For sequential zones, make sure that any open zone is closed first + * to ensure that the initial number of open zones is 0, in sync with + * the open zone accounting done when the mount option + * ZONEFS_MNTOPT_EXPLICIT_OPEN is used. + */ + if (type == ZONEFS_ZTYPE_SEQ && + (zone->cond == BLK_ZONE_COND_IMP_OPEN || + zone->cond == BLK_ZONE_COND_EXP_OPEN)) { + mutex_lock(&zi->i_truncate_mutex); + ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_CLOSE); + mutex_unlock(&zi->i_truncate_mutex); + } + + return ret; } static struct dentry *zonefs_create_inode(struct dentry *parent, @@ -1345,20 +1422,27 @@ static struct dentry *zonefs_create_inode(struct dentry *parent, struct inode *dir = d_inode(parent); struct dentry *dentry; struct inode *inode; + int ret = -ENOMEM; dentry = d_alloc_name(parent, name); if (!dentry) - return NULL; + return ERR_PTR(ret); inode = new_inode(parent->d_sb); if (!inode) goto dput; inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime; - if (zone) - zonefs_init_file_inode(inode, zone, type); - else + if (zone) { + ret = zonefs_init_file_inode(inode, zone, type); + if (ret) { + iput(inode); + goto dput; + } + } else { zonefs_init_dir_inode(dir, inode, type); + } + d_add(dentry, inode); dir->i_size++; @@ -1367,7 +1451,7 @@ static struct dentry *zonefs_create_inode(struct dentry *parent, dput: dput(dentry); - return NULL; + return ERR_PTR(ret); } struct zonefs_zone_data { @@ -1387,7 +1471,7 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, struct blk_zone *zone, *next, *end; const char *zgroup_name; char *file_name; - struct dentry *dir; + struct dentry *dir, *dent; unsigned int n = 0; int ret; @@ -1405,8 +1489,8 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, zgroup_name = "seq"; dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type); - if (!dir) { - ret = -ENOMEM; + if (IS_ERR(dir)) { + ret = PTR_ERR(dir); goto free; } @@ -1452,8 +1536,9 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, * Use the file number within its group as file name. */ snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n); - if (!zonefs_create_inode(dir, file_name, zone, type)) { - ret = -ENOMEM; + dent = zonefs_create_inode(dir, file_name, zone, type); + if (IS_ERR(dent)) { + ret = PTR_ERR(dent); goto free; } @@ -1670,11 +1755,6 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent) sbi->s_mount_opts = ZONEFS_MNTOPT_ERRORS_RO; sbi->s_max_open_zones = bdev_max_open_zones(sb->s_bdev); atomic_set(&sbi->s_open_zones, 0); - if (!sbi->s_max_open_zones && - sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) { - zonefs_info(sb, "No open zones limit. Ignoring explicit_open mount option\n"); - sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN; - } ret = zonefs_read_super(sb); if (ret) @@ -1693,6 +1773,12 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent) zonefs_info(sb, "Mounting %u zones", blkdev_nr_zones(sb->s_bdev->bd_disk)); + if (!sbi->s_max_open_zones && + sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) { + zonefs_info(sb, "No open zones limit. Ignoring explicit_open mount option\n"); + sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN; + } + /* Create root directory inode */ ret = -ENOMEM; inode = new_inode(sb); diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index a6a9373ab863..d9417abf4cd0 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -16,7 +16,7 @@ #include #include -/* Support CPPCv2 and CPPCv3 */ +/* CPPCv2 and CPPCv3 support */ #define CPPC_V2_REV 2 #define CPPC_V3_REV 3 #define CPPC_V2_NUM_ENT 21 diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index 34fb3431a8f3..292a5c40bd0c 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -71,7 +71,7 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb); void ghes_unregister_vendor_record_notifier(struct notifier_block *nb); #endif -int ghes_estatus_pool_init(int num_ghes); +int ghes_estatus_pool_init(unsigned int num_ghes); /* From drivers/edac/ghes_edac.c */ diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index 0e7316a86240..21aa26e7c988 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -35,9 +35,6 @@ static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p) unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); - if (READ_ONCE(*p) & mask) - return 1; - old = atomic_long_fetch_or(mask, (atomic_long_t *)p); return !!(old & mask); } @@ -48,9 +45,6 @@ static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p) unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); - if (!(READ_ONCE(*p) & mask)) - return 0; - old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p); return !!(old & mask); } diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index d16302d3eb59..72f1e2a8c167 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -114,7 +114,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt, /** * memory_intersects - checks if the region occupied by an object intersects * with another memory region - * @begin: virtual address of the beginning of the memory regien + * @begin: virtual address of the beginning of the memory region * @end: virtual address of the end of the memory region * @virt: virtual address of the memory object * @size: size of the memory object @@ -127,7 +127,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt, { void *vend = virt + size; - return (virt >= begin && virt < end) || (vend >= begin && vend < end); + if (virt < end && vend > begin) + return true; + + return false; } /** diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 6661ee1cff47..f40c9534f20b 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -205,12 +205,16 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table); #define tlb_needs_table_invalidate() (true) #endif +void tlb_remove_table_sync_one(void); + #else #ifdef tlb_needs_table_invalidate #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE #endif +static inline void tlb_remove_table_sync_one(void) { } + #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ @@ -563,10 +567,14 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ do { \ unsigned long _sz = huge_page_size(h); \ - if (_sz == PMD_SIZE) \ - tlb_flush_pmd_range(tlb, address, _sz); \ - else if (_sz == PUD_SIZE) \ + if (_sz >= P4D_SIZE) \ + tlb_flush_p4d_range(tlb, address, _sz); \ + else if (_sz >= PUD_SIZE) \ tlb_flush_pud_range(tlb, address, _sz); \ + else if (_sz >= PMD_SIZE) \ + tlb_flush_pmd_range(tlb, address, _sz); \ + else \ + tlb_flush_pte_range(tlb, address, _sz); \ __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index b30b08b73c01..5317ac993486 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -341,6 +341,7 @@ #define DATA_DATA \ *(.xiptext) \ *(DATA_MAIN) \ + *(.data..decrypted) \ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ MEM_KEEP(init.data*) \ @@ -1014,7 +1015,6 @@ #ifdef CONFIG_AMD_MEM_ENCRYPT #define PERCPU_DECRYPTED_SECTION \ . = ALIGN(PAGE_SIZE); \ - *(.data..decrypted) \ *(.data..percpu..decrypted) \ . = ALIGN(PAGE_SIZE); #else diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h index bc3fb59442ce..4e30e1799e61 100644 --- a/include/crypto/blake2s.h +++ b/include/crypto/blake2s.h @@ -101,7 +101,4 @@ static inline void blake2s(u8 *out, const u8 *in, const u8 *key, blake2s_final(&state, out); } -void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, - const size_t keylen); - #endif /* _CRYPTO_BLAKE2S_H */ diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h index dabaee698718..b3ea73b81944 100644 --- a/include/crypto/chacha.h +++ b/include/crypto/chacha.h @@ -47,12 +47,19 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds) hchacha_block_generic(state, out, nrounds); } +enum chacha_constants { /* expand 32-byte k */ + CHACHA_CONSTANT_EXPA = 0x61707865U, + CHACHA_CONSTANT_ND_3 = 0x3320646eU, + CHACHA_CONSTANT_2_BY = 0x79622d32U, + CHACHA_CONSTANT_TE_K = 0x6b206574U +}; + static inline void chacha_init_consts(u32 *state) { - state[0] = 0x61707865; /* "expa" */ - state[1] = 0x3320646e; /* "nd 3" */ - state[2] = 0x79622d32; /* "2-by" */ - state[3] = 0x6b206574; /* "te k" */ + state[0] = CHACHA_CONSTANT_EXPA; + state[1] = CHACHA_CONSTANT_ND_3; + state[2] = CHACHA_CONSTANT_2_BY; + state[3] = CHACHA_CONSTANT_TE_K; } void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv); diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index c4165126937e..a6c3b8e7deb6 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -105,6 +105,12 @@ struct drbg_test_data { struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */ }; +enum drbg_seed_state { + DRBG_SEED_STATE_UNSEEDED, + DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */ + DRBG_SEED_STATE_FULL, +}; + struct drbg_state { struct mutex drbg_mutex; /* lock around DRBG */ unsigned char *V; /* internal state 10.1.1.1 1a) */ @@ -127,16 +133,14 @@ struct drbg_state { struct crypto_wait ctr_wait; /* CTR mode async wait obj */ struct scatterlist sg_in, sg_out; /* CTR mode SGLs */ - bool seeded; /* DRBG fully seeded? */ + enum drbg_seed_state seeded; /* DRBG fully seeded? */ bool pr; /* Prediction resistance enabled? */ bool fips_primed; /* Continuous test primed? */ unsigned char *prev; /* FIPS 140-2 continuous test value */ - struct work_struct seed_work; /* asynchronous seeding support */ struct crypto_rng *jent; const struct drbg_state_ops *d_ops; const struct drbg_core *core; struct drbg_string test_data; - struct random_ready_callback random_ready; }; static inline __u8 drbg_statelen(struct drbg_state *drbg) diff --git a/include/crypto/hash.h b/include/crypto/hash.h index b2bc1e46e86a..14d75caa233d 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -149,7 +149,7 @@ struct ahash_alg { struct shash_desc { struct crypto_shash *tfm; - void *__ctx[] __aligned(ARCH_SLAB_MINALIGN); + void *__ctx[] __aligned(UL(16)); }; #define HASH_MAX_DIGESTSIZE 64 diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h index 8e50d487500f..52363eee2b20 100644 --- a/include/crypto/internal/blake2s.h +++ b/include/crypto/internal/blake2s.h @@ -11,11 +11,11 @@ #include #include -void blake2s_compress_generic(struct blake2s_state *state,const u8 *block, +void blake2s_compress_generic(struct blake2s_state *state, const u8 *block, size_t nblocks, const u32 inc); -void blake2s_compress_arch(struct blake2s_state *state,const u8 *block, - size_t nblocks, const u32 inc); +void blake2s_compress(struct blake2s_state *state, const u8 *block, + size_t nblocks, const u32 inc); bool blake2s_selftest(void); @@ -24,14 +24,11 @@ static inline void blake2s_set_lastblock(struct blake2s_state *state) state->f[0] = -1; } -typedef void (*blake2s_compress_t)(struct blake2s_state *state, - const u8 *block, size_t nblocks, u32 inc); - /* Helper functions for BLAKE2s shared by the library and shash APIs */ -static inline void __blake2s_update(struct blake2s_state *state, - const u8 *in, size_t inlen, - blake2s_compress_t compress) +static __always_inline void +__blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen, + bool force_generic) { const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; @@ -39,7 +36,12 @@ static inline void __blake2s_update(struct blake2s_state *state, return; if (inlen > fill) { memcpy(state->buf + state->buflen, in, fill); - (*compress)(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); + if (force_generic) + blake2s_compress_generic(state, state->buf, 1, + BLAKE2S_BLOCK_SIZE); + else + blake2s_compress(state, state->buf, 1, + BLAKE2S_BLOCK_SIZE); state->buflen = 0; in += fill; inlen -= fill; @@ -47,7 +49,12 @@ static inline void __blake2s_update(struct blake2s_state *state, if (inlen > BLAKE2S_BLOCK_SIZE) { const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); /* Hash one less (full) block than strictly possible */ - (*compress)(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); + if (force_generic) + blake2s_compress_generic(state, in, nblocks - 1, + BLAKE2S_BLOCK_SIZE); + else + blake2s_compress(state, in, nblocks - 1, + BLAKE2S_BLOCK_SIZE); in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); } @@ -55,13 +62,16 @@ static inline void __blake2s_update(struct blake2s_state *state, state->buflen += inlen; } -static inline void __blake2s_final(struct blake2s_state *state, u8 *out, - blake2s_compress_t compress) +static __always_inline void +__blake2s_final(struct blake2s_state *state, u8 *out, bool force_generic) { blake2s_set_lastblock(state); memset(state->buf + state->buflen, 0, BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ - (*compress)(state, state->buf, 1, state->buflen); + if (force_generic) + blake2s_compress_generic(state, state->buf, 1, state->buflen); + else + blake2s_compress(state, state->buf, 1, state->buflen); cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); memcpy(out, state->h, state->outlen); } @@ -99,20 +109,20 @@ static inline int crypto_blake2s_init(struct shash_desc *desc) static inline int crypto_blake2s_update(struct shash_desc *desc, const u8 *in, unsigned int inlen, - blake2s_compress_t compress) + bool force_generic) { struct blake2s_state *state = shash_desc_ctx(desc); - __blake2s_update(state, in, inlen, compress); + __blake2s_update(state, in, inlen, force_generic); return 0; } static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out, - blake2s_compress_t compress) + bool force_generic) { struct blake2s_state *state = shash_desc_ctx(desc); - __blake2s_final(state, out, compress); + __blake2s_final(state, out, force_generic); return 0; } diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h index 52b619d44ba2..3be89a7c20a9 100644 --- a/include/dt-bindings/leds/common.h +++ b/include/dt-bindings/leds/common.h @@ -60,6 +60,13 @@ #define LED_FUNCTION_MICMUTE "micmute" #define LED_FUNCTION_MUTE "mute" +/* Used for player LEDs as found on game controllers from e.g. Nintendo, Sony. */ +#define LED_FUNCTION_PLAYER1 "player-1" +#define LED_FUNCTION_PLAYER2 "player-2" +#define LED_FUNCTION_PLAYER3 "player-3" +#define LED_FUNCTION_PLAYER4 "player-4" +#define LED_FUNCTION_PLAYER5 "player-5" + /* Miscelleaus functions. Use functions above if you can. */ #define LED_FUNCTION_ACTIVITY "activity" #define LED_FUNCTION_ALARM "alarm" diff --git a/include/linux/ata.h b/include/linux/ata.h index 6e67aded28f8..6d2d31b03b4d 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -565,6 +565,18 @@ struct ata_bmdma_prd { ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 2))) +#define ata_id_has_devslp(id) \ + ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ + ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ + ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))) +#define ata_id_has_ncq_autosense(id) \ + ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ + ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ + ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))) +#define ata_id_has_dipm(id) \ + ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ + ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ + ((id)[ATA_ID_FEATURE_SUPP] & (1 << 3))) #define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10)) #define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11)) #define ata_id_u32(id,n) \ @@ -577,9 +589,6 @@ struct ata_bmdma_prd { #define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) #define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4)) -#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)) -#define ata_id_has_ncq_autosense(id) \ - ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7)) static inline bool ata_id_has_hipm(const u16 *id) { @@ -591,17 +600,6 @@ static inline bool ata_id_has_hipm(const u16 *id) return val & (1 << 9); } -static inline bool ata_id_has_dipm(const u16 *id) -{ - u16 val = id[ATA_ID_FEATURE_SUPP]; - - if (val == 0 || val == 0xffff) - return false; - - return val & (1 << 3); -} - - static inline bool ata_id_has_fua(const u16 *id) { if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000) @@ -770,16 +768,21 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id) static inline bool ata_id_has_sense_reporting(const u16 *id) { - if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) + if (!(id[ATA_ID_CFS_ENABLE_2] & BIT(15))) return false; - return id[ATA_ID_COMMAND_SET_3] & (1 << 6); + if ((id[ATA_ID_COMMAND_SET_3] & (BIT(15) | BIT(14))) != BIT(14)) + return false; + return id[ATA_ID_COMMAND_SET_3] & BIT(6); } static inline bool ata_id_sense_reporting_enabled(const u16 *id) { - if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) + if (!ata_id_has_sense_reporting(id)) return false; - return id[ATA_ID_COMMAND_SET_4] & (1 << 6); + /* ata_id_has_sense_reporting() == true, word 86 must have bit 15 set */ + if ((id[ATA_ID_COMMAND_SET_4] & (BIT(15) | BIT(14))) != BIT(14)) + return false; + return id[ATA_ID_COMMAND_SET_4] & BIT(6); } /** diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 4e035aca6f7e..6093fa6db260 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -41,6 +41,22 @@ #define __bf_shf(x) (__builtin_ffsll(x) - 1) +#define __scalar_type_to_unsigned_cases(type) \ + unsigned type: (unsigned type)0, \ + signed type: (unsigned type)0 + +#define __unsigned_scalar_typeof(x) typeof( \ + _Generic((x), \ + char: (unsigned char)0, \ + __scalar_type_to_unsigned_cases(char), \ + __scalar_type_to_unsigned_cases(short), \ + __scalar_type_to_unsigned_cases(int), \ + __scalar_type_to_unsigned_cases(long), \ + __scalar_type_to_unsigned_cases(long long), \ + default: (x))) + +#define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x)) + #define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \ ({ \ BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \ @@ -49,7 +65,8 @@ BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \ ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \ _pfx "value too large for the field"); \ - BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \ + BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \ + __bf_cast_unsigned(_reg, ~0ull), \ _pfx "type of reg too small for mask"); \ __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \ (1ULL << __bf_shf(_mask))); \ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 99058eb81042..c4f6a9270c03 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -4,10 +4,12 @@ #ifndef __ASSEMBLY__ -#include #include -#include #include +#include +#include + +struct device; /* * bitmaps provide bit arrays that consume one or more unsigned @@ -122,6 +124,12 @@ extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); extern void bitmap_free(const unsigned long *bitmap); +/* Managed variants of the above. */ +unsigned long *devm_bitmap_alloc(struct device *dev, + unsigned int nbits, gfp_t flags); +unsigned long *devm_bitmap_zalloc(struct device *dev, + unsigned int nbits, gfp_t flags); + /* * lib/bitmap.c provides these functions: */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f33c425cdb71..c24d077c7a1b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1472,6 +1472,9 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); +int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr); bool btf_ctx_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info); @@ -1686,6 +1689,13 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, return -ENOTSUPP; } +static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr) +{ + return -ENOTSUPP; +} + static inline void bpf_map_put(struct bpf_map *map) { } @@ -1745,6 +1755,8 @@ void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, struct net_device *netdev); bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); +void unpriv_ebpf_notify(int new_state); + #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index e20362ae573e..f09a5bbc9b8b 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -46,7 +46,7 @@ struct bpf_reg_state { enum bpf_reg_type type; union { /* valid when type == PTR_TO_PACKET */ - u16 range; + int range; /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | * PTR_TO_MAP_VALUE_OR_NULL @@ -291,6 +291,27 @@ struct bpf_verifier_state { iter < frame->allocated_stack / BPF_REG_SIZE; \ iter++, reg = bpf_get_spilled_reg(iter, frame)) +/* Invoke __expr over regsiters in __vst, setting __state and __reg */ +#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \ + ({ \ + struct bpf_verifier_state *___vstate = __vst; \ + int ___i, ___j; \ + for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \ + struct bpf_reg_state *___regs; \ + __state = ___vstate->frame[___i]; \ + ___regs = __state->regs; \ + for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \ + __reg = &___regs[___j]; \ + (void)(__expr); \ + } \ + bpf_for_each_spilled_reg(___j, __state, __reg) { \ + if (!__reg) \ + continue; \ + (void)(__expr); \ + } \ + } \ + }) + /* linked list of verifier states used to prune search */ struct bpf_verifier_state_list { struct bpf_verifier_state state; diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index b04d34bab124..edabbc429515 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -117,7 +117,6 @@ static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \ * of the form "mark_buffer_foo()". These are higher-level functions which * do something in addition to setting a b_state bit. */ -BUFFER_FNS(Uptodate, uptodate) BUFFER_FNS(Dirty, dirty) TAS_BUFFER_FNS(Dirty, dirty) BUFFER_FNS(Lock, locked) @@ -135,6 +134,41 @@ BUFFER_FNS(Meta, meta) BUFFER_FNS(Prio, prio) BUFFER_FNS(Defer_Completion, defer_completion) +static __always_inline void set_buffer_uptodate(struct buffer_head *bh) +{ + /* + * If somebody else already set this uptodate, they will + * have done the memory barrier, and a reader will thus + * see *some* valid buffer state. + * + * Any other serialization (with IO errors or whatever that + * might clear the bit) has to come from other state (eg BH_Lock). + */ + if (test_bit(BH_Uptodate, &bh->b_state)) + return; + + /* + * make it consistent with folio_mark_uptodate + * pairs with smp_load_acquire in buffer_uptodate + */ + smp_mb__before_atomic(); + set_bit(BH_Uptodate, &bh->b_state); +} + +static __always_inline void clear_buffer_uptodate(struct buffer_head *bh) +{ + clear_bit(BH_Uptodate, &bh->b_state); +} + +static __always_inline int buffer_uptodate(const struct buffer_head *bh) +{ + /* + * make it consistent with folio_test_uptodate + * pairs with smp_mb__before_atomic in set_buffer_uptodate + */ + return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0; +} + #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) /* If we *know* page->private refers to buffer_heads */ diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h index 5755ae5a4712..6a869682c120 100644 --- a/include/linux/can/platform/sja1000.h +++ b/include/linux/can/platform/sja1000.h @@ -14,7 +14,7 @@ #define OCR_MODE_TEST 0x01 #define OCR_MODE_NORMAL 0x02 #define OCR_MODE_CLOCK 0x03 -#define OCR_MODE_MASK 0x07 +#define OCR_MODE_MASK 0x03 #define OCR_TX0_INVERT 0x04 #define OCR_TX0_PULLDOWN 0x08 #define OCR_TX0_PULLUP 0x10 diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 455e9b9e2adf..8287382d3d1d 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -288,6 +288,7 @@ enum { CEPH_SESSION_FLUSHMSG_ACK, CEPH_SESSION_FORCE_RO, CEPH_SESSION_REJECT, + CEPH_SESSION_REQUEST_FLUSH_MDLOG, }; extern const char *ceph_session_op_name(int op); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 83fa08a06507..787fff5ec7f5 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -287,6 +287,9 @@ struct ceph_osd_linger_request { rados_watcherrcb_t errcb; void *data; + struct ceph_pagelist *request_pl; + struct page **notify_id_pages; + struct page ***preply_pages; size_t *preply_len; }; diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 8c193033604f..3ee42f15483b 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -282,6 +282,13 @@ struct css_set { struct rcu_head rcu_head; }; +struct ext_css_set { + struct css_set cset; + + struct list_head mg_src_preload_node; + struct list_head mg_dst_preload_node; +}; + struct cgroup_base_stat { struct task_cputime cputime; }; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 67b2190b1dae..00b390fa47d4 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -71,8 +71,10 @@ struct css_task_iter { ANDROID_KABI_RESERVE(1); }; +extern struct file_system_type cgroup_fs_type; extern struct cgroup_root cgrp_dfl_root; -extern struct css_set init_css_set; +extern struct ext_css_set init_ext_css_set; +#define init_css_set init_ext_css_set.cset #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; #include diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 4cf524ccab43..ae2de4e1cd6f 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -54,9 +54,6 @@ #define __compiletime_object_size(obj) __builtin_object_size(obj, 0) -#define __compiletime_warning(message) __attribute__((__warning__(message))) -#define __compiletime_error(message) __attribute__((__error__(message))) - #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) #define __latent_entropy __attribute__((latent_entropy)) #endif diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index b2a3f4f641a7..08eb06301791 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -30,6 +30,7 @@ # define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) # define __GCC4_has_attribute___copy__ 0 # define __GCC4_has_attribute___designated_init__ 0 +# define __GCC4_has_attribute___error__ 1 # define __GCC4_has_attribute___externally_visible__ 1 # define __GCC4_has_attribute___no_caller_saved_registers__ 0 # define __GCC4_has_attribute___noclone__ 1 @@ -37,6 +38,7 @@ # define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8) # define __GCC4_has_attribute___no_sanitize_undefined__ (__GNUC_MINOR__ >= 9) # define __GCC4_has_attribute___fallthrough__ 0 +# define __GCC4_has_attribute___warning__ 1 #endif /* @@ -136,6 +138,17 @@ # define __designated_init #endif +/* + * Optional: only supported since clang >= 14.0 + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-error-function-attribute + */ +#if __has_attribute(__error__) +# define __compiletime_error(msg) __attribute__((__error__(msg))) +#else +# define __compiletime_error(msg) +#endif + /* * Optional: not supported by clang * @@ -272,6 +285,17 @@ */ #define __used __attribute__((__used__)) +/* + * Optional: only supported since clang >= 14.0 + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warning-function-attribute + */ +#if __has_attribute(__warning__) +# define __compiletime_warning(msg) __attribute__((__warning__(msg))) +#else +# define __compiletime_warning(msg) +#endif + /* * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 2a31195cb50f..75f7e8725232 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -289,12 +289,6 @@ struct ftrace_likely_data { #ifndef __compiletime_object_size # define __compiletime_object_size(obj) -1 #endif -#ifndef __compiletime_warning -# define __compiletime_warning(message) -#endif -#ifndef __compiletime_error -# define __compiletime_error(message) -#endif #ifdef __OPTIMIZE__ # define __compiletime_assert(condition, msg, prefix, suffix) \ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 33ac06c01458..ff057a39c634 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -65,6 +65,11 @@ extern ssize_t cpu_show_tsx_async_abort(struct device *dev, extern ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_mmio_stale_data(struct device *dev, + struct device_attribute *attr, + char *buf); +extern ssize_t cpu_show_retbleed(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index edb5c186b0b7..ad06852cbd5d 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -5,6 +5,7 @@ #include #include #include +#include struct task_struct; diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index d6c4cc9ecc77..2357109a8901 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -91,6 +91,8 @@ struct dentry *debugfs_create_automount(const char *name, void debugfs_remove(struct dentry *dentry); #define debugfs_remove_recursive debugfs_remove +void debugfs_lookup_and_remove(const char *name, struct dentry *parent); + const struct file_operations *debugfs_real_fops(const struct file *filp); int debugfs_file_get(struct dentry *dentry); @@ -220,6 +222,10 @@ static inline void debugfs_remove(struct dentry *dentry) static inline void debugfs_remove_recursive(struct dentry *dentry) { } +static inline void debugfs_lookup_and_remove(const char *name, + struct dentry *parent) +{ } + const struct file_operations *debugfs_real_fops(const struct file *filp); static inline int debugfs_file_get(struct dentry *dentry) diff --git a/include/linux/dim.h b/include/linux/dim.h index b698266d0035..6c5733981563 100644 --- a/include/linux/dim.h +++ b/include/linux/dim.h @@ -21,7 +21,7 @@ * We consider 10% difference as significant. */ #define IS_SIGNIFICANT_DIFF(val, ref) \ - (((100UL * abs((val) - (ref))) / (ref)) > 10) + ((ref) && (((100UL * abs((val) - (ref))) / (ref)) > 10)) /* * Calculate the gap between two values. diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index b9a8549f9a7f..c25005764d44 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -191,10 +191,10 @@ static inline int dma_declare_coherent_memory(struct device *dev, return -ENOSYS; } -#define dma_release_coherent_memory(dev) (0) #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) #define dma_release_from_dev_coherent(dev, order, vaddr) (0) #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) +static inline void dma_release_coherent_memory(struct device *dev) { } static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle) diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 03befff63eda..393aa55f5452 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -61,14 +61,6 @@ */ #define DMA_ATTR_PRIVILEGED (1UL << 9) -/* - * This is a hint to the DMA-mapping subsystem that the device is expected - * to overwrite the entire mapped size, thus the caller does not require any - * of the previous buffer contents to be preserved. This allows - * bounce-buffering implementations to optimise DMA_FROM_DEVICE transfers. - */ -#define DMA_ATTR_OVERWRITE (1UL << 10) - /* * DMA_ATTR_SYS_CACHE_ONLY: used to indicate that the buffer should be mapped * with the correct memory attributes so that it can be cached in the system diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index a57ee75342cf..c0c6ea9ea7e3 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -50,9 +50,6 @@ struct _ddebug { #if defined(CONFIG_DYNAMIC_DEBUG_CORE) -/* exported for module authors to exercise >control */ -int dynamic_debug_exec_queries(const char *query, const char *modname); - int ddebug_add_module(struct _ddebug *tab, unsigned int n, const char *modname); extern int ddebug_remove_module(const char *mod_name); @@ -196,7 +193,7 @@ static inline int ddebug_remove_module(const char *mod) static inline int ddebug_dyndbg_module_param_cb(char *param, char *val, const char *modname) { - if (strstr(param, "dyndbg")) { + if (!strcmp(param, "dyndbg")) { /* avoid pr_warn(), which wants pr_fmt() fully defined */ printk(KERN_WARNING "dyndbg param is supported only in " "CONFIG_DYNAMIC_DEBUG builds\n"); @@ -216,12 +213,6 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val, rowsize, groupsize, buf, len, ascii); \ } while (0) -static inline int dynamic_debug_exec_queries(const char *query, const char *modname) -{ - pr_warn("kernel not built with CONFIG_DYNAMIC_DEBUG_CORE\n"); - return 0; -} - #endif /* !CONFIG_DYNAMIC_DEBUG_CORE */ #endif diff --git a/include/linux/efi.h b/include/linux/efi.h index e17cd4c44f93..7feb70d32d95 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -167,6 +167,8 @@ struct capsule_info { size_t page_bytes_remain; }; +int efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff, + size_t hdr_bytes); int __efi_capsule_setup_info(struct capsule_info *cap_info); typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg); @@ -1159,7 +1161,7 @@ void efi_retrieve_tpm2_eventlog(void); arch_efi_call_virt_teardown(); \ }) -#define EFI_RANDOM_SEED_SIZE 64U +#define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE struct linux_efi_random_seed { u32 size; diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index 7dff07713a07..46c42479f950 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -69,7 +69,7 @@ #define EXIT_TO_USER_MODE_WORK \ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | \ + _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ ARCH_EXIT_TO_USER_MODE_WORK) /** @@ -259,12 +259,13 @@ static __always_inline void arch_exit_to_user_mode(void) { } #endif /** - * arch_do_signal - Architecture specific signal delivery function + * arch_do_signal_or_restart - Architecture specific signal delivery function * @regs: Pointer to currents pt_regs + * @has_signal: actual signal to handle * * Invoked from exit_to_user_mode_loop(). */ -void arch_do_signal(struct pt_regs *regs); +void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal); /** * arch_syscall_exit_tracehook - Wrapper around tracehook_report_syscall_exit() diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h index 0cef17afb41a..9b93f8584ff7 100644 --- a/include/linux/entry-kvm.h +++ b/include/linux/entry-kvm.h @@ -11,8 +11,8 @@ # define ARCH_XFER_TO_GUEST_MODE_WORK (0) #endif -#define XFER_TO_GUEST_MODE_WORK \ - (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ +#define XFER_TO_GUEST_MODE_WORK \ + (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \ _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK) struct kvm_vcpu; diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 2e5debc0373c..99209f50915f 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -127,7 +127,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr) #endif } -static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2]) +static inline bool is_multicast_ether_addr_64bits(const u8 *addr) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 #ifdef __BIG_ENDIAN @@ -352,8 +352,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2) * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits. */ -static inline bool ether_addr_equal_64bits(const u8 addr1[6+2], - const u8 addr2[6+2]) +static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2); diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index dc4fd8a6644d..ce1cf42740bf 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -39,6 +39,7 @@ struct file *eventfd_fget(int fd); struct eventfd_ctx *eventfd_ctx_fdget(int fd); struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); +__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask); int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt); @@ -66,6 +67,12 @@ static inline int eventfd_signal(struct eventfd_ctx *ctx, int n) return -ENOSYS; } +static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, + unsigned mask) +{ + return -ENOSYS; +} + static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) { diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index d445150c5350..5dd1e52b8997 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -73,6 +73,20 @@ struct f2fs_device { __le32 total_segments; } __packed; +/* reason of stop_checkpoint */ +enum stop_cp_reason { + STOP_CP_REASON_SHUTDOWN, + STOP_CP_REASON_FAULT_INJECT, + STOP_CP_REASON_META_PAGE, + STOP_CP_REASON_WRITE_FAIL, + STOP_CP_REASON_CORRUPTED_SUMMARY, + STOP_CP_REASON_UPDATE_INODE, + STOP_CP_REASON_FLUSH_FAIL, + STOP_CP_REASON_MAX, +}; + +#define MAX_STOP_REASON 32 + struct f2fs_super_block { __le32 magic; /* Magic Number */ __le16 major_ver; /* Major Version */ @@ -116,7 +130,8 @@ struct f2fs_super_block { __u8 hot_ext_count; /* # of hot file extension */ __le16 s_encoding; /* Filename charset encoding */ __le16 s_encoding_flags; /* Filename charset encoding flags */ - __u8 reserved[306]; /* valid reserved region */ + __u8 s_stop_reason[MAX_STOP_REASON]; /* stop checkpoint reason */ + __u8 reserved[274]; /* valid reserved region */ __le32 crc; /* checksum of superblock */ } __packed; diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h index ff5596dd30f8..2382dec6d6ab 100644 --- a/include/linux/fbcon.h +++ b/include/linux/fbcon.h @@ -15,6 +15,8 @@ void fbcon_new_modelist(struct fb_info *info); void fbcon_get_requirement(struct fb_info *info, struct fb_blit_caps *caps); void fbcon_fb_blanked(struct fb_info *info, int blank); +int fbcon_modechange_possible(struct fb_info *info, + struct fb_var_screeninfo *var); void fbcon_update_vcs(struct fb_info *info, bool all); void fbcon_remap_all(struct fb_info *info); int fbcon_set_con2fb_map_ioctl(void __user *argp); @@ -33,6 +35,8 @@ static inline void fbcon_new_modelist(struct fb_info *info) {} static inline void fbcon_get_requirement(struct fb_info *info, struct fb_blit_caps *caps) {} static inline void fbcon_fb_blanked(struct fb_info *info, int blank) {} +static inline int fbcon_modechange_possible(struct fb_info *info, + struct fb_var_screeninfo *var) { return 0; } static inline void fbcon_update_vcs(struct fb_info *info, bool all) {} static inline void fbcon_remap_all(struct fb_info *info) {} static inline int fbcon_set_con2fb_map_ioctl(void __user *argp) { return 0; } diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h index 921e750843e6..766fcd973beb 100644 --- a/include/linux/fcntl.h +++ b/include/linux/fcntl.h @@ -19,7 +19,7 @@ /* List of all valid flags for the how->resolve argument: */ #define VALID_RESOLVE_FLAGS \ (RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \ - RESOLVE_BENEATH | RESOLVE_IN_ROOT) + RESOLVE_BENEATH | RESOLVE_IN_ROOT | RESOLVE_CACHED) /* List of all open_how "versions". */ #define OPEN_HOW_SIZE_VER0 24 /* sizeof first published struct */ diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index a32bf47c593e..f1a99d3e5570 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -123,7 +123,7 @@ extern void __fd_install(struct files_struct *files, extern int __close_fd(struct files_struct *files, unsigned int fd); extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags); -extern int __close_fd_get_file(unsigned int fd, struct file **res); +extern int close_fd_get_file(unsigned int fd, struct file **res); extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, struct files_struct **new_fdp); diff --git a/include/linux/fs.h b/include/linux/fs.h index 4019e6fa3b95..72977650dd4c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -515,6 +515,11 @@ static inline void i_mmap_unlock_write(struct address_space *mapping) up_write(&mapping->i_mmap_rwsem); } +static inline int i_mmap_trylock_read(struct address_space *mapping) +{ + return down_read_trylock(&mapping->i_mmap_rwsem); +} + static inline void i_mmap_lock_read(struct address_space *mapping) { down_read(&mapping->i_mmap_rwsem); @@ -1466,7 +1471,16 @@ struct super_block { const struct xattr_handler **s_xattr; #ifdef CONFIG_FS_ENCRYPTION const struct fscrypt_operations *s_cop; +#ifdef __GENKSYMS__ + /* + * Android ABI CRC preservation due to commit 391cceee6d43 ("fscrypt: + * stop using keyrings subsystem for fscrypt_master_key") changing this + * type. Size is the same, this is a private field. + */ struct key *s_master_keys; /* master crypto keys in use */ +#else + struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */ +#endif #endif #ifdef CONFIG_FS_VERITY const struct fsverity_operations *s_vop; @@ -1851,6 +1865,14 @@ struct dir_context { */ #define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN) +/* + * These flags control the behavior of vfs_copy_file_range(). + * They are not available to the user via syscall. + * + * COPY_FILE_SPLICE: call splice direct instead of fs clone/copy ops + */ +#define COPY_FILE_SPLICE (1 << 0) + struct iov_iter; struct file_operations { diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 219f28bf97ca..b2c6493f4208 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -200,7 +200,7 @@ fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy) } /* keyring.c */ -void fscrypt_sb_free(struct super_block *sb); +void fscrypt_destroy_keyring(struct super_block *sb); int fscrypt_ioctl_add_key(struct file *filp, void __user *arg); int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg); int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg); @@ -388,7 +388,7 @@ fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy) } /* keyring.c */ -static inline void fscrypt_sb_free(struct super_block *sb) +static inline void fscrypt_destroy_keyring(struct super_block *sb) { } diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index ff3e6e3eb9c1..c5585bcf53a5 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -268,7 +268,14 @@ struct gpio_irq_chip { */ void (*irq_mask)(struct irq_data *data); - ANDROID_KABI_RESERVE(1); + /** + * @initialized: + * + * Flag to track GPIO chip irq member's initialization. + * This flag will make sure GPIO chip irq members are not used + * before they are initialized. + */ + ANDROID_KABI_USE(1, bool initialized); ANDROID_KABI_RESERVE(2); }; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d5407d38ec03..25a24db254ea 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -179,8 +179,8 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pd(struct vm_area_struct *vma, unsigned long address, hugepd_t hpd, int flags, int pdshift); -struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, - pmd_t *pmd, int flags); +struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, + int flags); struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, pud_t *pud, int flags); struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, @@ -267,8 +267,8 @@ static inline struct page *follow_huge_pd(struct vm_area_struct *vma, return NULL; } -static inline struct page *follow_huge_pmd(struct mm_struct *mm, - unsigned long address, pmd_t *pmd, int flags) +static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, + unsigned long address, int flags) { return NULL; } diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 8e6dd908da21..aa1d4da03538 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -60,7 +60,5 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng); /** Unregister a Hardware Random Number Generator driver. */ extern void hwrng_unregister(struct hwrng *rng); extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng); -/** Feed random bits into the pool. */ -extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy); #endif /* LINUX_HWRANDOM_H_ */ diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 33e939977444..c16a9dda3ad5 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -228,6 +228,7 @@ struct st_sensor_settings { * @hw_irq_trigger: if we're using the hardware interrupt on the sensor. * @hw_timestamp: Latest timestamp from the interrupt handler, when in use. * @buffer_data: Data used by buffer part. + * @odr_lock: Local lock for preventing concurrent ODR accesses/changes */ struct st_sensor_data { struct device *dev; @@ -253,6 +254,8 @@ struct st_sensor_data { s64 hw_timestamp; char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned; + + struct mutex odr_lock; }; #ifdef CONFIG_IIO_BUFFER diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 3515ca64e638..57a4e4d64e98 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -178,6 +178,15 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *); +#ifdef CONFIG_INET +int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size); +#else +static inline int inet_gifconf(struct net_device *dev, char __user *buf, + int len, int size) +{ + return 0; +} +#endif void devinet_init(void); struct in_device *inetdev_by_index(struct net *, int); __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 35b2d845704d..5ae871af45de 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -5,6 +5,37 @@ #include #include +#ifdef __GENKSYMS__ +/* + * ANDROID ABI HACK + * + * In the 5.10.162 release, the io_uring code was synced with the version + * that is in the 5.15.y kernel tree in order to resolve a huge number of + * potential, and known, problems with the codebase. This makes for a more + * secure and easier-to-update-and-maintain 5.10.y kernel tree, so this is + * a great thing, however this caused some issues when it comes to the + * Android KABI preservation and checking tools. + * + * A number of the io_uring structures get used in other core kernel + * structures, only as "opaque" pointers, so there is not any real ABI + * breakage. But, due to the visibility of the structures going away, the + * CRC values of many scheduler variables and functions were changed. + * + * In order to preserve the CRC values, to prevent all device kernels to be + * forced to rebuild for no reason whatsoever from a functional point of + * view, we need to keep around the "old" io_uring structures for the CRC + * calculation only. This is done by the following definitions of struct + * io_identity and struct io_uring_task which will only be visible when the + * CRC calculation build happens, not in any functional kernel build. + * + * Yes, this all is a horrible hack, and these really are not the true + * structures that any code uses, but so life is in the world of stable + * apis... + * The real structures are in io_uring/io_uring.c, see the ones there if + * you need to touch or do anything with it. + * + * NEVER touch these structure definitions, they are fake and not valid code. + */ struct io_identity { struct files_struct *files; struct mm_struct *mm; @@ -33,22 +64,23 @@ struct io_uring_task { atomic_t in_idle; bool sqpoll; }; +#endif /* ANDROID ABI HACK */ + #if defined(CONFIG_IO_URING) struct sock *io_uring_get_socket(struct file *file); -void __io_uring_task_cancel(void); -void __io_uring_files_cancel(struct files_struct *files); +void __io_uring_cancel(bool cancel_all); void __io_uring_free(struct task_struct *tsk); +static inline void io_uring_files_cancel(void) +{ + if (current->io_uring) + __io_uring_cancel(false); +} static inline void io_uring_task_cancel(void) { - if (current->io_uring && !xa_empty(¤t->io_uring->xa)) - __io_uring_task_cancel(); -} -static inline void io_uring_files_cancel(struct files_struct *files) -{ - if (current->io_uring && !xa_empty(¤t->io_uring->xa)) - __io_uring_files_cancel(files); + if (current->io_uring) + __io_uring_cancel(true); } static inline void io_uring_free(struct task_struct *tsk) { @@ -63,7 +95,7 @@ static inline struct sock *io_uring_get_socket(struct file *file) static inline void io_uring_task_cancel(void) { } -static inline void io_uring_files_cancel(struct files_struct *files) +static inline void io_uring_files_cancel(void) { } static inline void io_uring_free(struct task_struct *tsk) diff --git a/include/linux/iova.h b/include/linux/iova.h index 97dc7d74b651..95f5cbf3ab3c 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -133,7 +133,7 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) return iova >> iova_shift(iovad); } -#if IS_ENABLED(CONFIG_IOMMU_IOVA) +#if IS_REACHABLE(CONFIG_IOMMU_IOVA) int iova_cache_get(void); void iova_cache_put(void); diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index a7c5c7f4d97f..9a6832eb8b0c 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -52,7 +52,7 @@ struct ipv6_devconf { __s32 use_optimistic; #endif #ifdef CONFIG_IPV6_MROUTE - __s32 mc_forwarding; + atomic_t mc_forwarding; #endif __s32 disable_ipv6; __s32 drop_unicast_in_l2_multicast; diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 6175b3e0d220..a1b067f0c6ad 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -190,6 +190,8 @@ struct module; #ifdef BUILD_FIPS140_KO +#include + static inline int static_key_count(struct static_key *key) { return atomic_read(&key->enabled); @@ -270,9 +272,9 @@ extern void static_key_disable_cpuslocked(struct static_key *key); #include #include -static inline int static_key_count(struct static_key *key) +static __always_inline int static_key_count(struct static_key *key) { - return atomic_read(&key->enabled); + return arch_atomic_read(&key->enabled); } static __always_inline void jump_label_init(void) diff --git a/include/linux/kasan-enabled.h b/include/linux/kasan-enabled.h new file mode 100644 index 000000000000..6f612d69ea0c --- /dev/null +++ b/include/linux/kasan-enabled.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KASAN_ENABLED_H +#define _LINUX_KASAN_ENABLED_H + +#include + +#ifdef CONFIG_KASAN_HW_TAGS + +DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); + +static __always_inline bool kasan_enabled(void) +{ + return static_branch_likely(&kasan_flag_enabled); +} + +static inline bool kasan_hw_tags_enabled(void) +{ + return kasan_enabled(); +} + +#else /* CONFIG_KASAN_HW_TAGS */ + +static inline bool kasan_enabled(void) +{ + return IS_ENABLED(CONFIG_KASAN); +} + +static inline bool kasan_hw_tags_enabled(void) +{ + return false; +} + +#endif /* CONFIG_KASAN_HW_TAGS */ + +#endif /* LINUX_KASAN_ENABLED_H */ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index b59675cc19b7..99dc14b5ad44 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -3,6 +3,8 @@ #define _LINUX_KASAN_H #include +#include +#include #include #include @@ -82,33 +84,11 @@ static inline void kasan_disable_current(void) {} #ifdef CONFIG_KASAN_HW_TAGS -DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); - -static __always_inline bool kasan_enabled(void) -{ - return static_branch_likely(&kasan_flag_enabled); -} - -static inline bool kasan_hw_tags_enabled(void) -{ - return kasan_enabled(); -} - void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags); void kasan_free_pages(struct page *page, unsigned int order); #else /* CONFIG_KASAN_HW_TAGS */ -static inline bool kasan_enabled(void) -{ - return IS_ENABLED(CONFIG_KASAN); -} - -static inline bool kasan_hw_tags_enabled(void) -{ - return false; -} - static __always_inline void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags) { diff --git a/include/linux/kd.h b/include/linux/kd.h deleted file mode 100644 index b130a18f860f..000000000000 --- a/include/linux/kd.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_KD_H -#define _LINUX_KD_H - -#include - -#define KD_FONT_FLAG_OLD 0x80000000 /* Invoked via old interface [compat] */ -#endif /* _LINUX_KD_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2f05e9128201..c333dc6fb1ac 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -635,7 +635,11 @@ static inline char *hex_byte_pack_upper(char *buf, u8 byte) return buf; } +#ifdef __GENKSYMS__ extern int hex_to_bin(char ch); +#else +extern int hex_to_bin(unsigned char ch); +#endif extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); extern char *bin2hex(char *dst, const void *src, size_t count); diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 5f61389f5f36..a1f12e959bba 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -187,14 +187,6 @@ void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len); void *arch_kexec_kernel_image_load(struct kimage *image); -int arch_kexec_apply_relocations_add(struct purgatory_info *pi, - Elf_Shdr *section, - const Elf_Shdr *relsec, - const Elf_Shdr *symtab); -int arch_kexec_apply_relocations(struct purgatory_info *pi, - Elf_Shdr *section, - const Elf_Shdr *relsec, - const Elf_Shdr *symtab); int arch_kimage_file_post_load_cleanup(struct kimage *image); #ifdef CONFIG_KEXEC_SIG int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, @@ -223,6 +215,44 @@ extern int crash_exclude_mem_range(struct crash_mem *mem, unsigned long long mend); extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map, void **addr, unsigned long *sz); + +#ifndef arch_kexec_apply_relocations_add +/* + * arch_kexec_apply_relocations_add - apply relocations of type RELA + * @pi: Purgatory to be relocated. + * @section: Section relocations applying to. + * @relsec: Section containing RELAs. + * @symtab: Corresponding symtab. + * + * Return: 0 on success, negative errno on error. + */ +static inline int +arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section, + const Elf_Shdr *relsec, const Elf_Shdr *symtab) +{ + pr_err("RELA relocation unsupported.\n"); + return -ENOEXEC; +} +#endif + +#ifndef arch_kexec_apply_relocations +/* + * arch_kexec_apply_relocations - apply relocations of type REL + * @pi: Purgatory to be relocated. + * @section: Section relocations applying to. + * @relsec: Section containing RELs. + * @symtab: Corresponding symtab. + * + * Return: 0 on success, negative errno on error. + */ +static inline int +arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section, + const Elf_Shdr *relsec, const Elf_Shdr *symtab) +{ + pr_err("REL relocation unsupported.\n"); + return -ENOEXEC; +} +#endif #endif /* CONFIG_KEXEC_FILE */ #ifdef CONFIG_KEXEC_ELF @@ -412,6 +442,12 @@ static inline int kexec_crash_loaded(void) { return 0; } #define kexec_in_progress false #endif /* CONFIG_KEXEC_CORE */ +#ifdef CONFIG_KEXEC_SIG +void set_kexec_sig_enforced(void); +#else +static inline void set_kexec_sig_enforced(void) {} +#endif + #endif /* !defined(__ASSEBMLY__) */ #endif /* LINUX_KEXEC_H */ diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 86249476b57f..0b35a41440ff 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -688,7 +688,7 @@ __kfifo_uint_must_check_helper( \ * writer, you don't need extra locking to use these macro. */ #define kfifo_to_user(fifo, to, len, copied) \ -__kfifo_uint_must_check_helper( \ +__kfifo_int_must_check_helper( \ ({ \ typeof((fifo) + 1) __tmp = (fifo); \ void __user *__to = (to); \ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c66c702a4f07..9cb0a3d7874f 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -146,6 +146,7 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_PENDING_TIMER 2 #define KVM_REQ_UNHALT 3 +#define KVM_REQ_VM_BUGGED (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQUEST_ARCH_BASE 8 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ @@ -505,6 +506,7 @@ struct kvm { struct srcu_struct irq_srcu; pid_t userspace_pid; unsigned int max_halt_poll_ns; + bool vm_bugged; }; #define kvm_err(fmt, ...) \ @@ -533,6 +535,31 @@ struct kvm { #define vcpu_err(vcpu, fmt, ...) \ kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) +bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); +static inline void kvm_vm_bugged(struct kvm *kvm) +{ + kvm->vm_bugged = true; + kvm_make_all_cpus_request(kvm, KVM_REQ_VM_BUGGED); +} + +#define KVM_BUG(cond, kvm, fmt...) \ +({ \ + int __ret = (cond); \ + \ + if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \ + kvm_vm_bugged(kvm); \ + unlikely(__ret); \ +}) + +#define KVM_BUG_ON(cond, kvm) \ +({ \ + int __ret = (cond); \ + \ + if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ + kvm_vm_bugged(kvm); \ + unlikely(__ret); \ +}) + static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) { return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); @@ -850,7 +877,6 @@ void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, struct kvm_vcpu *except, unsigned long *vcpu_bitmap, cpumask_var_t tmp); -bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, struct kvm_vcpu *except); bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, @@ -885,6 +911,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap); long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); +long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg); int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); @@ -988,7 +1016,7 @@ static inline void kvm_arch_end_assignment(struct kvm *kvm) { } -static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) +static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm) { return false; } @@ -1463,6 +1491,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, unsigned long start, unsigned long end); +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); + #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); #else diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 20b6797babe2..2c2586312b44 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -192,7 +192,7 @@ static inline void lockdep_init_map_waits(struct lockdep_map *lock, const char *name, struct lock_class_key *key, int subclass, u8 inner, u8 outer) { - lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL); + lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL); } static inline void @@ -215,24 +215,28 @@ static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, * or they are too narrow (they suffer from a false class-split): */ #define lockdep_set_class(lock, key) \ - lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer) + lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \ + (lock)->dep_map.wait_type_inner, \ + (lock)->dep_map.wait_type_outer, \ + (lock)->dep_map.lock_type) #define lockdep_set_class_and_name(lock, key, name) \ - lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer) + lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \ + (lock)->dep_map.wait_type_inner, \ + (lock)->dep_map.wait_type_outer, \ + (lock)->dep_map.lock_type) #define lockdep_set_class_and_subclass(lock, key, sub) \ - lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer) + lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \ + (lock)->dep_map.wait_type_inner, \ + (lock)->dep_map.wait_type_outer, \ + (lock)->dep_map.lock_type) #define lockdep_set_subclass(lock, sub) \ - lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer) + lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ + (lock)->dep_map.wait_type_inner, \ + (lock)->dep_map.wait_type_outer, \ + (lock)->dep_map.lock_type) #define lockdep_set_novalidate_class(lock) \ lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 37dc5594081d..4bd58de49222 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -313,7 +313,7 @@ LSM_HOOK(int, 0, secmark_relabel_packet, u32 secid) LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void) LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void) LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req, - struct flowi *fl) + struct flowi_common *flic) LSM_HOOK(int, 0, tun_dev_alloc_security, void **security) LSM_HOOK(void, LSM_RET_VOID, tun_dev_free_security, void *security) LSM_HOOK(int, 0, tun_dev_create, void) @@ -353,7 +353,7 @@ LSM_HOOK(int, 0, xfrm_state_delete_security, struct xfrm_state *x) LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x, - struct xfrm_policy *xp, const struct flowi *fl) + struct xfrm_policy *xp, const struct flowi_common *flic) LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid, int ckall) #endif /* CONFIG_SECURITY_NETWORK_XFRM */ diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 0ad8aa657914..613928e8a9ea 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1114,7 +1114,7 @@ * @xfrm_state_pol_flow_match: * @x contains the state to match. * @xp contains the policy to check for a match. - * @fl contains the flow to check for a match. + * @flic contains the flowi_common struct to check for a match. * Return 1 if there is a match. * @xfrm_decode_session: * @skb points to skb to decode. diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h index 0661af17a758..b0da04fe087b 100644 --- a/include/linux/mc146818rtc.h +++ b/include/linux/mc146818rtc.h @@ -86,6 +86,8 @@ struct cmos_rtc_board_info { /* 2 values for divider stage reset, others for "testing purposes only" */ # define RTC_DIV_RESET1 0x60 # define RTC_DIV_RESET2 0x70 + /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */ +# define RTC_AMD_BANK_SELECT 0x10 /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */ # define RTC_RATE_SELECT 0x0F @@ -123,7 +125,11 @@ struct cmos_rtc_board_info { #define RTC_IO_EXTENT_USED RTC_IO_EXTENT #endif /* ARCH_RTC_LOCATION */ -unsigned int mc146818_get_time(struct rtc_time *time); +bool mc146818_does_rtc_work(void); +int mc146818_get_time(struct rtc_time *time); int mc146818_set_time(struct rtc_time *time); +bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param), + void *param); + #endif /* _MC146818RTC_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index b3a571992d82..bdf9dcd2c9df 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -491,6 +491,7 @@ bool memblock_is_map_memory(phys_addr_t addr); bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size); bool memblock_is_reserved(phys_addr_t addr); bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); +bool memblock_is_nomap_remove(void); void memblock_dump_all(void); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6ea0873708b1..2e9c8a504fab 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -345,6 +345,9 @@ struct mem_cgroup { extern struct mem_cgroup *root_mem_cgroup; +struct lruvec *page_to_lruvec(struct page *page, pg_data_t *pgdat); +void do_traversal_all_lruvec(void); + static __always_inline bool memcg_stat_item_in_bytes(int idx) { if (idx == MEMCG_PERCPU_B) @@ -969,6 +972,15 @@ void split_page_memcg(struct page *head, unsigned int nr); struct mem_cgroup; +static inline struct lruvec *page_to_lruvec(struct page *page, pg_data_t *pgdat) +{ + return NULL; +} + +static inline void do_traversal_all_lruvec(void) +{ +} + static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) { return true; diff --git a/include/linux/memregion.h b/include/linux/memregion.h index e11595256cac..c04c4fd2e209 100644 --- a/include/linux/memregion.h +++ b/include/linux/memregion.h @@ -16,7 +16,7 @@ static inline int memregion_alloc(gfp_t gfp) { return -ENOMEM; } -void memregion_free(int id) +static inline void memregion_free(int id) { } #endif diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h index 69632c1b07bd..ae3e7a5c5219 100644 --- a/include/linux/mfd/t7l66xb.h +++ b/include/linux/mfd/t7l66xb.h @@ -12,7 +12,6 @@ struct t7l66xb_platform_data { int (*enable)(struct platform_device *dev); - int (*disable)(struct platform_device *dev); int (*suspend)(struct platform_device *dev); int (*resume)(struct platform_device *dev); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 41fbb4793394..ae88362216a4 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -899,7 +899,7 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); struct mlx5_async_ctx { struct mlx5_core_dev *dev; atomic_t num_inflight; - struct wait_queue_head wait; + struct completion inflight_done; }; struct mlx5_async_work; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index eba1f1cbc9fb..6ca97729b54a 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -4877,12 +4877,11 @@ struct mlx5_ifc_query_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x20]; - u8 ece[0x20]; + u8 reserved_at_40[0x40]; u8 opt_param_mask[0x20]; - u8 reserved_at_a0[0x20]; + u8 ece[0x20]; struct mlx5_ifc_qpc_bits qpc; diff --git a/include/linux/mm.h b/include/linux/mm.h index ff79e257a861..dfefcfa1d6a4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -845,6 +845,8 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) return kvmalloc_array(n, size, flags | __GFP_ZERO); } +extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, + gfp_t flags); extern void kvfree(const void *addr); extern void kvfree_sensitive(const void *addr, size_t len); @@ -1758,6 +1760,12 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, #ifdef CONFIG_SPECULATIVE_PAGE_FAULT static inline void vm_write_begin(struct vm_area_struct *vma) { + /* + * Isolated vma might be freed without exclusive mmap_lock but + * speculative page fault handler still needs to know it was changed. + */ + if (!RB_EMPTY_NODE(&vma->vm_rb)) + mmap_assert_write_locked(vma->vm_mm); /* * The reads never spins and preemption * disablement is not required. @@ -2735,6 +2743,7 @@ extern int install_special_mapping(struct mm_struct *mm, unsigned long flags, struct page **pages); unsigned long randomize_stack_top(unsigned long stack_top); +unsigned long randomize_page(unsigned long start, unsigned long range); extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); @@ -3362,7 +3371,6 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping, extern int sysctl_nr_trim_pages; extern bool pte_map_lock_addr(struct vm_fault *vmf, unsigned long addr); extern int reclaim_shmem_address_space(struct address_space *mapping); -extern int reclaim_pages_from_list(struct list_head *page_list); /** * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 8fc71e9d7bb0..835e4f83e7a4 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -4,6 +4,10 @@ #include #include +#ifndef __GENKSYMS__ +#define PROTECT_TRACE_INCLUDE_PATH +#include +#endif /** * page_is_file_lru - should the page be on a file LRU or anon LRU? @@ -48,6 +52,7 @@ static __always_inline void update_lru_size(struct lruvec *lruvec, static __always_inline void add_page_to_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { + trace_android_vh_add_page_to_lrulist(page, false, lru); update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); list_add(&page->lru, &lruvec->lists[lru]); } @@ -55,6 +60,7 @@ static __always_inline void add_page_to_lru_list(struct page *page, static __always_inline void add_page_to_lru_list_tail(struct page *page, struct lruvec *lruvec, enum lru_list lru) { + trace_android_vh_add_page_to_lrulist(page, false, lru); update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); list_add_tail(&page->lru, &lruvec->lists[lru]); } @@ -62,6 +68,7 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page, static __always_inline void del_page_from_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { + trace_android_vh_del_page_from_lrulist(page, false, lru); list_del(&page->lru); update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page)); } diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index bb62eae4d88a..71e585990fe9 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -273,6 +273,7 @@ struct mmc_card { #define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */ #define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */ #define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */ +#define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */ bool reenable_cmdq; /* Re-enable Command Queue */ diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 2884634fdf0a..9051aca9ce4d 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -445,7 +445,7 @@ static inline bool mmc_ready_for_data(u32 status) #define MMC_SECURE_TRIM1_ARG 0x80000001 #define MMC_SECURE_TRIM2_ARG 0x80008000 #define MMC_SECURE_ARGS 0x80000000 -#define MMC_TRIM_ARGS 0x00008001 +#define MMC_TRIM_OR_DISCARD_ARGS 0x00008003 #define mmc_driver_type_mask(n) (1 << (n)) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f72b488b790b..f611a06670cb 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1279,13 +1279,16 @@ static inline unsigned long *section_to_usemap(struct mem_section *ms) static inline struct mem_section *__nr_to_section(unsigned long nr) { + unsigned long root = SECTION_NR_TO_ROOT(nr); + + if (unlikely(root >= NR_SECTION_ROOTS)) + return NULL; + #ifdef CONFIG_SPARSEMEM_EXTREME - if (!mem_section) + if (!mem_section || !mem_section[root]) return NULL; #endif - if (!mem_section[SECTION_NR_TO_ROOT(nr)]) - return NULL; - return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; + return &mem_section[root][nr & SECTION_ROOT_MASK]; } extern unsigned long __section_nr(struct mem_section *ms); extern size_t mem_section_usage_size(void); @@ -1467,37 +1470,6 @@ struct mminit_pfnnid_cache { #define pfn_valid_within(pfn) (1) #endif -#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL -/* - * pfn_valid() is meant to be able to tell if a given PFN has valid memmap - * associated with it or not. This means that a struct page exists for this - * pfn. The caller cannot assume the page is fully initialized in general. - * Hotplugable pages might not have been onlined yet. pfn_to_online_page() - * will ensure the struct page is fully online and initialized. Special pages - * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly. - * - * In FLATMEM, it is expected that holes always have valid memmap as long as - * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed - * that a valid section has a memmap for the entire section. - * - * However, an ARM, and maybe other embedded architectures in the future - * free memmap backing holes to save memory on the assumption the memmap is - * never used. The page_zone linkages are then broken even though pfn_valid() - * returns true. A walker of the full memmap must then do this additional - * check to ensure the memmap they are looking at is sane by making sure - * the zone and PFN linkages are still valid. This is expensive, but walkers - * of the full memmap are extremely rare. - */ -bool memmap_valid_within(unsigned long pfn, - struct page *page, struct zone *zone); -#else -static inline bool memmap_valid_within(unsigned long pfn, - struct page *page, struct zone *zone) -{ - return true; -} -#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ - #endif /* !__GENERATING_BOUNDS.H */ #endif /* !__ASSEMBLY__ */ #endif /* _LINUX_MMZONE_H */ diff --git a/include/linux/module.h b/include/linux/module.h index c5c3ce08f646..4cd6d889d5ba 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -293,8 +293,7 @@ extern typeof(name) __mod_##type##__##name##_device_table \ * files require multiple MODULE_FIRMWARE() specifiers */ #define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware) -#define _MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, #ns) -#define MODULE_IMPORT_NS(ns) _MODULE_IMPORT_NS(ns) +#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, __stringify(ns)) struct notifier_block; diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index fd1ecb821106..d88bb56c18e2 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -286,6 +286,7 @@ struct cfi_private { map_word sector_erase_cmd; unsigned long chipshift; /* Because they're of the same type */ const char *im_name; /* inter_module name for cmdset_setup */ + unsigned long quirks; struct flchip chips[]; /* per-chip data structure for each chip */ }; diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 157357ec1441..fc41fecfe44f 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -388,10 +388,8 @@ struct mtd_info { /* List of partitions attached to this MTD device */ struct list_head partitions; - union { - struct mtd_part part; - struct mtd_master master; - }; + struct mtd_part part; + struct mtd_master master; }; static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd) diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index db2eaff77f41..2044fbd55d73 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -474,12 +474,100 @@ struct nand_sdr_timings { u32 tWW_min; }; +/** + * struct nand_nvddr_timings - NV-DDR NAND chip timings + * + * This struct defines the timing requirements of a NV-DDR NAND data interface. + * These information can be found in every NAND datasheets and the timings + * meaning are described in the ONFI specifications: + * https://media-www.micron.com/-/media/client/onfi/specs/onfi_4_1_gold.pdf + * (chapter 4.18.2 NV-DDR) + * + * All these timings are expressed in picoseconds. + * + * @tBERS_max: Block erase time + * @tCCS_min: Change column setup time + * @tPROG_max: Page program time + * @tR_max: Page read time + * @tAC_min: Access window of DQ[7:0] from CLK + * @tAC_max: Access window of DQ[7:0] from CLK + * @tADL_min: ALE to data loading time + * @tCAD_min: Command, Address, Data delay + * @tCAH_min: Command/Address DQ hold time + * @tCALH_min: W/R_n, CLE and ALE hold time + * @tCALS_min: W/R_n, CLE and ALE setup time + * @tCAS_min: Command/address DQ setup time + * @tCEH_min: CE# high hold time + * @tCH_min: CE# hold time + * @tCK_min: Average clock cycle time + * @tCS_min: CE# setup time + * @tDH_min: Data hold time + * @tDQSCK_min: Start of the access window of DQS from CLK + * @tDQSCK_max: End of the access window of DQS from CLK + * @tDQSD_min: Min W/R_n low to DQS/DQ driven by device + * @tDQSD_max: Max W/R_n low to DQS/DQ driven by device + * @tDQSHZ_max: W/R_n high to DQS/DQ tri-state by device + * @tDQSQ_max: DQS-DQ skew, DQS to last DQ valid, per access + * @tDS_min: Data setup time + * @tDSC_min: DQS cycle time + * @tFEAT_max: Busy time for Set Features and Get Features + * @tITC_max: Interface and Timing Mode Change time + * @tQHS_max: Data hold skew factor + * @tRHW_min: Data output cycle to command, address, or data input cycle + * @tRR_min: Ready to RE# low (data only) + * @tRST_max: Device reset time, measured from the falling edge of R/B# to the + * rising edge of R/B#. + * @tWB_max: WE# high to SR[6] low + * @tWHR_min: WE# high to RE# low + * @tWRCK_min: W/R_n low to data output cycle + * @tWW_min: WP# transition to WE# low + */ +struct nand_nvddr_timings { + u64 tBERS_max; + u32 tCCS_min; + u64 tPROG_max; + u64 tR_max; + u32 tAC_min; + u32 tAC_max; + u32 tADL_min; + u32 tCAD_min; + u32 tCAH_min; + u32 tCALH_min; + u32 tCALS_min; + u32 tCAS_min; + u32 tCEH_min; + u32 tCH_min; + u32 tCK_min; + u32 tCS_min; + u32 tDH_min; + u32 tDQSCK_min; + u32 tDQSCK_max; + u32 tDQSD_min; + u32 tDQSD_max; + u32 tDQSHZ_max; + u32 tDQSQ_max; + u32 tDS_min; + u32 tDSC_min; + u32 tFEAT_max; + u32 tITC_max; + u32 tQHS_max; + u32 tRHW_min; + u32 tRR_min; + u32 tRST_max; + u32 tWB_max; + u32 tWHR_min; + u32 tWRCK_min; + u32 tWW_min; +}; + /** * enum nand_interface_type - NAND interface type * @NAND_SDR_IFACE: Single Data Rate interface + * @NAND_NVDDR_IFACE: Double Data Rate interface */ enum nand_interface_type { NAND_SDR_IFACE, + NAND_NVDDR_IFACE, }; /** @@ -488,6 +576,7 @@ enum nand_interface_type { * @timings: The timing information * @timings.mode: Timing mode as defined in the specification * @timings.sdr: Use it when @type is %NAND_SDR_IFACE. + * @timings.nvddr: Use it when @type is %NAND_NVDDR_IFACE. */ struct nand_interface_config { enum nand_interface_type type; @@ -495,10 +584,29 @@ struct nand_interface_config { unsigned int mode; union { struct nand_sdr_timings sdr; + struct nand_nvddr_timings nvddr; }; } timings; }; +/** + * nand_interface_is_sdr - get the interface type + * @conf: The data interface + */ +static bool nand_interface_is_sdr(const struct nand_interface_config *conf) +{ + return conf->type == NAND_SDR_IFACE; +} + +/** + * nand_interface_is_nvddr - get the interface type + * @conf: The data interface + */ +static bool nand_interface_is_nvddr(const struct nand_interface_config *conf) +{ + return conf->type == NAND_NVDDR_IFACE; +} + /** * nand_get_sdr_timings - get SDR timing from data interface * @conf: The data interface @@ -506,12 +614,25 @@ struct nand_interface_config { static inline const struct nand_sdr_timings * nand_get_sdr_timings(const struct nand_interface_config *conf) { - if (conf->type != NAND_SDR_IFACE) + if (!nand_interface_is_sdr(conf)) return ERR_PTR(-EINVAL); return &conf->timings.sdr; } +/** + * nand_get_nvddr_timings - get NV-DDR timing from data interface + * @conf: The data interface + */ +static inline const struct nand_nvddr_timings * +nand_get_nvddr_timings(const struct nand_interface_config *conf) +{ + if (!nand_interface_is_nvddr(conf)) + return ERR_PTR(-EINVAL); + + return &conf->timings.nvddr; +} + /** * struct nand_op_cmd_instr - Definition of a command instruction * @opcode: the command to issue in one cycle diff --git a/include/linux/namei.h b/include/linux/namei.h index a4bb992623c4..b9605b2b46e7 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -46,6 +46,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT}; #define LOOKUP_NO_XDEV 0x040000 /* No mountpoint crossing. */ #define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */ #define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */ +#define LOOKUP_CACHED 0x200000 /* Only do cached lookup */ /* LOOKUP_* flags which do scope-related checks based on the dirfd. */ #define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT) diff --git a/include/linux/net.h b/include/linux/net.h index f5569faf930b..e201a7fbf3bc 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -43,8 +43,6 @@ struct net; #define SOCK_PASSCRED 3 #define SOCK_PASSSEC 4 -#define PROTO_CMSG_DATA_ONLY 0x0001 - #ifndef ARCH_HAS_SOCKET_TYPES /** * enum sock_type - Socket types @@ -139,7 +137,8 @@ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, struct proto_ops { int family; - unsigned int flags; + unsigned int flags; // ANDROID - removed in 5.10.162, but remains to + // preserve ABI. It is not used anywhere. struct module *owner; int (*release) (struct socket *sock); int (*bind) (struct socket *sock, diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index f96b7f8d82e5..e2a92697a663 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -158,7 +158,7 @@ enum { #define NETIF_F_GSO_FRAGLIST __NETIF_F(GSO_FRAGLIST) #define NETIF_F_HW_MACSEC __NETIF_F(HW_MACSEC) -/* Finds the next feature with the highest number of the range of start till 0. +/* Finds the next feature with the highest number of the range of start-1 till 0. */ static inline int find_next_netdev_feature(u64 feature, unsigned long start) { @@ -177,7 +177,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start) for ((bit) = find_next_netdev_feature((mask_addr), \ NETDEV_FEATURE_COUNT); \ (bit) >= 0; \ - (bit) = find_next_netdev_feature((mask_addr), (bit) - 1)) + (bit) = find_next_netdev_feature((mask_addr), (bit))) /* Features valid for ethtool to change */ /* = all defined minus driver/device-class-related */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3639d7ae68e8..7e7a003dd25a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -647,9 +647,23 @@ extern int sysctl_devconf_inherit_init_net; */ static inline bool net_has_fallback_tunnels(const struct net *net) { - return !IS_ENABLED(CONFIG_SYSCTL) || - !sysctl_fb_tunnels_only_for_init_net || - (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1); +#if IS_ENABLED(CONFIG_SYSCTL) + int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net); + + return !fb_tunnels_only_for_init_net || + (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1); +#else + return true; +#endif +} + +static inline int net_inherit_devconf(void) +{ +#if IS_ENABLED(CONFIG_SYSCTL) + return READ_ONCE(sysctl_devconf_inherit_init_net); +#else + return 0; +#endif } static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) @@ -3234,14 +3248,6 @@ static inline bool dev_has_header(const struct net_device *dev) return dev->header_ops && dev->header_ops->create; } -typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, - int len, int size); -int register_gifconf(unsigned int family, gifconf_func_t *gifconf); -static inline int unregister_gifconf(unsigned int family) -{ - return register_gifconf(family, NULL); -} - #ifdef CONFIG_NET_FLOW_LIMIT #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ struct sd_flow_limit { diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 5206d0991022..773943bc9228 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -201,6 +201,9 @@ struct ip_set_region { u32 elements; /* Number of elements vs timeout */ }; +/* Max range where every element is added/deleted in one step */ +#define IPSET_MAX_RANGE (1<<20) + /* The core set type structure */ struct ip_set_type { struct list_head list; diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 15775bc5b174..cdd98d7bd4be 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -62,6 +62,33 @@ static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type) return subsys << 8 | msg_type; } +static inline void nfnl_fill_hdr(struct nlmsghdr *nlh, u8 family, u8 version, + __be16 res_id) +{ + struct nfgenmsg *nfmsg; + + nfmsg = nlmsg_data(nlh); + nfmsg->nfgen_family = family; + nfmsg->version = version; + nfmsg->res_id = res_id; +} + +static inline struct nlmsghdr *nfnl_msg_put(struct sk_buff *skb, u32 portid, + u32 seq, int type, int flags, + u8 family, u8 version, + __be16 res_id) +{ + struct nlmsghdr *nlh; + + nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg), flags); + if (!nlh) + return NULL; + + nfnl_fill_hdr(nlh, family, version, res_id); + + return nlh; +} + void nfnl_lock(__u8 subsys_id); void nfnl_unlock(__u8 subsys_id); #ifdef CONFIG_PROVE_LOCKING diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 3a956145a25c..a18fb73a2b77 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -94,10 +94,6 @@ struct ebt_table { struct ebt_replace_kernel *table; unsigned int valid_hooks; rwlock_t lock; - /* e.g. could be the table explicitly only allows certain - * matches, targets, ... 0 == let it in */ - int (*check)(const struct ebt_table_info *info, - unsigned int valid_hooks); /* the data used by the kernel */ struct ebt_table_info *private; struct module *me; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 1e0a3497bdb4..e39342945a80 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -478,10 +478,10 @@ static inline const struct cred *nfs_file_cred(struct file *file) * linux/fs/nfs/direct.c */ extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *); -extern ssize_t nfs_file_direct_read(struct kiocb *iocb, - struct iov_iter *iter); -extern ssize_t nfs_file_direct_write(struct kiocb *iocb, - struct iov_iter *iter); +ssize_t nfs_file_direct_read(struct kiocb *iocb, + struct iov_iter *iter, bool swap); +ssize_t nfs_file_direct_write(struct kiocb *iocb, + struct iov_iter *iter, bool swap); /* * linux/fs/nfs/dir.c @@ -551,7 +551,7 @@ extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_page(struct inode *inode, struct page *page); extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_commit_inode(struct inode *, int); -extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail); +extern struct nfs_commit_data *nfs_commitdata_alloc(void); extern void nfs_commit_free(struct nfs_commit_data *data); bool nfs_commit_end(struct nfs_mds_commit_info *cinfo); diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 750c7f395ca9..f700ff2df074 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -122,6 +122,8 @@ int watchdog_nmi_probe(void); int watchdog_nmi_enable(unsigned int cpu); void watchdog_nmi_disable(unsigned int cpu); +void lockup_detector_reconfigure(void); + /** * touch_nmi_watchdog - restart NMI watchdog timeout. * diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index ac398e143c9a..2a63ef05a6cc 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -42,11 +42,11 @@ * void nodes_shift_right(dst, src, n) Shift right * void nodes_shift_left(dst, src, n) Shift left * - * int first_node(mask) Number lowest set bit, or MAX_NUMNODES - * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES - * int next_node_in(node, mask) Next node past 'node', or wrap to first, + * unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES + * unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES + * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first, * or MAX_NUMNODES - * int first_unset_node(mask) First node not set in mask, or + * unsigned int first_unset_node(mask) First node not set in mask, or * MAX_NUMNODES * * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set @@ -153,7 +153,7 @@ static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits) #define node_test_and_set(node, nodemask) \ __node_test_and_set((node), &(nodemask)) -static inline int __node_test_and_set(int node, nodemask_t *addr) +static inline bool __node_test_and_set(int node, nodemask_t *addr) { return test_and_set_bit(node, addr->bits); } @@ -200,7 +200,7 @@ static inline void __nodes_complement(nodemask_t *dstp, #define nodes_equal(src1, src2) \ __nodes_equal(&(src1), &(src2), MAX_NUMNODES) -static inline int __nodes_equal(const nodemask_t *src1p, +static inline bool __nodes_equal(const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { return bitmap_equal(src1p->bits, src2p->bits, nbits); @@ -208,7 +208,7 @@ static inline int __nodes_equal(const nodemask_t *src1p, #define nodes_intersects(src1, src2) \ __nodes_intersects(&(src1), &(src2), MAX_NUMNODES) -static inline int __nodes_intersects(const nodemask_t *src1p, +static inline bool __nodes_intersects(const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { return bitmap_intersects(src1p->bits, src2p->bits, nbits); @@ -216,20 +216,20 @@ static inline int __nodes_intersects(const nodemask_t *src1p, #define nodes_subset(src1, src2) \ __nodes_subset(&(src1), &(src2), MAX_NUMNODES) -static inline int __nodes_subset(const nodemask_t *src1p, +static inline bool __nodes_subset(const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { return bitmap_subset(src1p->bits, src2p->bits, nbits); } #define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES) -static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits) +static inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits) { return bitmap_empty(srcp->bits, nbits); } #define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES) -static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits) +static inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits) { return bitmap_full(srcp->bits, nbits); } @@ -260,15 +260,15 @@ static inline void __nodes_shift_left(nodemask_t *dstp, > MAX_NUMNODES, then the silly min_ts could be dropped. */ #define first_node(src) __first_node(&(src)) -static inline int __first_node(const nodemask_t *srcp) +static inline unsigned int __first_node(const nodemask_t *srcp) { - return min_t(int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); + return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); } #define next_node(n, src) __next_node((n), &(src)) -static inline int __next_node(int n, const nodemask_t *srcp) +static inline unsigned int __next_node(int n, const nodemask_t *srcp) { - return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); + return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); } /* @@ -276,7 +276,7 @@ static inline int __next_node(int n, const nodemask_t *srcp) * the first node in src if needed. Returns MAX_NUMNODES if src is empty. */ #define next_node_in(n, src) __next_node_in((n), &(src)) -int __next_node_in(int node, const nodemask_t *srcp); +unsigned int __next_node_in(int node, const nodemask_t *srcp); static inline void init_nodemask_of_node(nodemask_t *mask, int node) { @@ -296,9 +296,9 @@ static inline void init_nodemask_of_node(nodemask_t *mask, int node) }) #define first_unset_node(mask) __first_unset_node(&(mask)) -static inline int __first_unset_node(const nodemask_t *maskp) +static inline unsigned int __first_unset_node(const nodemask_t *maskp) { - return min_t(int,MAX_NUMNODES, + return min_t(unsigned int, MAX_NUMNODES, find_first_zero_bit(maskp->bits, MAX_NUMNODES)); } @@ -375,14 +375,13 @@ static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp, } #if MAX_NUMNODES > 1 -#define for_each_node_mask(node, mask) \ - for ((node) = first_node(mask); \ - (node) < MAX_NUMNODES; \ - (node) = next_node((node), (mask))) +#define for_each_node_mask(node, mask) \ + for ((node) = first_node(mask); \ + (node >= 0) && (node) < MAX_NUMNODES; \ + (node) = next_node((node), (mask))) #else /* MAX_NUMNODES == 1 */ -#define for_each_node_mask(node, mask) \ - if (!nodes_empty(mask)) \ - for ((node) = 0; (node) < 1; (node)++) +#define for_each_node_mask(node, mask) \ + for ((node) = 0; (node) < 1 && !nodes_empty(mask); (node)++) #endif /* MAX_NUMNODES */ /* @@ -436,11 +435,11 @@ static inline int num_node_state(enum node_states state) #define first_online_node first_node(node_states[N_ONLINE]) #define first_memory_node first_node(node_states[N_MEMORY]) -static inline int next_online_node(int nid) +static inline unsigned int next_online_node(int nid) { return next_node(nid, node_states[N_ONLINE]); } -static inline int next_memory_node(int nid) +static inline unsigned int next_memory_node(int nid) { return next_node(nid, node_states[N_MEMORY]); } diff --git a/include/linux/objtool.h b/include/linux/objtool.h index 577f51436cf9..662f19374bd9 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -29,11 +29,19 @@ struct unwind_hint { * * UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that * sp_reg+sp_offset points to the iret return frame. + * + * UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function. + * Useful for code which doesn't have an ELF function annotation. + * + * UNWIND_HINT_ENTRY: machine entry without stack, SYSCALL/SYSENTER etc. */ #define UNWIND_HINT_TYPE_CALL 0 #define UNWIND_HINT_TYPE_REGS 1 #define UNWIND_HINT_TYPE_REGS_PARTIAL 2 -#define UNWIND_HINT_TYPE_RET_OFFSET 3 +#define UNWIND_HINT_TYPE_FUNC 3 +#define UNWIND_HINT_TYPE_ENTRY 4 +#define UNWIND_HINT_TYPE_SAVE 5 +#define UNWIND_HINT_TYPE_RESTORE 6 #ifdef CONFIG_STACK_VALIDATION @@ -96,7 +104,7 @@ struct unwind_hint { * the debuginfo as necessary. It will also warn if it sees any * inconsistencies. */ -.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0 +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 .Lunwind_hint_ip_\@: .pushsection .discard.unwind_hints /* struct unwind_hint */ @@ -120,7 +128,7 @@ struct unwind_hint { #define STACK_FRAME_NON_STANDARD(func) #else #define ANNOTATE_INTRA_FUNCTION_CALL -.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0 +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 .endm #endif diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 07ca187fc5e4..fe339106e02c 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -113,8 +113,9 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) } static inline int of_dma_configure_id(struct device *dev, - struct device_node *np, - bool force_dma) + struct device_node *np, + bool force_dma, + const u32 *id) { return 0; } diff --git a/include/linux/once.h b/include/linux/once.h index 9225ee6d96c7..8a1e32f75c81 100644 --- a/include/linux/once.h +++ b/include/linux/once.h @@ -5,10 +5,18 @@ #include #include +/* Helpers used from arbitrary contexts. + * Hard irqs are blocked, be cautious. + */ bool __do_once_start(bool *done, unsigned long *flags); void __do_once_done(bool *done, struct static_key_true *once_key, unsigned long *flags); +/* Variant for process contexts only. */ +bool __do_once_slow_start(bool *done); +void __do_once_slow_done(bool *done, struct static_key_true *once_key, + struct module *mod); + /* Call a function exactly once. The idea of DO_ONCE() is to perform * a function call such as initialization of random seeds, etc, only * once, where DO_ONCE() can live in the fast-path. After @func has @@ -52,9 +60,29 @@ void __do_once_done(bool *done, struct static_key_true *once_key, ___ret; \ }) +/* Variant of DO_ONCE() for process/sleepable contexts. */ +#define DO_ONCE_SLOW(func, ...) \ + ({ \ + bool ___ret = false; \ + static bool __section(".data.once") ___done = false; \ + static DEFINE_STATIC_KEY_TRUE(___once_key); \ + if (static_branch_unlikely(&___once_key)) { \ + ___ret = __do_once_slow_start(&___done); \ + if (unlikely(___ret)) { \ + func(__VA_ARGS__); \ + __do_once_slow_done(&___done, &___once_key, \ + THIS_MODULE); \ + } \ + } \ + ___ret; \ + }) + #define get_random_once(buf, nbytes) \ DO_ONCE(get_random_bytes, (buf), (nbytes)) #define get_random_once_wait(buf, nbytes) \ DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \ +#define get_random_slow_once(buf, nbytes) \ + DO_ONCE_SLOW(get_random_bytes, (buf), (nbytes)) + #endif /* _LINUX_ONCE_H */ diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index bb9a4a2b2aa2..2daace47717c 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -60,8 +60,9 @@ static inline void page_ext_init(void) { } #endif - struct page_ext *lookup_page_ext(const struct page *page); +extern struct page_ext *page_ext_get(struct page *page); +extern void page_ext_put(struct page_ext *page_ext); static inline struct page_ext *page_ext_next(struct page_ext *curr) { @@ -77,11 +78,6 @@ static inline void pgdat_page_ext_init(struct pglist_data *pgdat) { } -static inline struct page_ext *lookup_page_ext(const struct page *page) -{ - return NULL; -} - static inline void page_ext_init(void) { } @@ -93,5 +89,14 @@ static inline void page_ext_init_flatmem_late(void) static inline void page_ext_init_flatmem(void) { } + +static inline struct page_ext *page_ext_get(struct page *page) +{ + return NULL; +} + +static inline void page_ext_put(struct page_ext *page_ext) +{ +} #endif /* CONFIG_PAGE_EXTENSION */ #endif /* __LINUX_PAGE_EXT_H */ diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index d8a6aecf99cb..2a21c793d815 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h @@ -47,62 +47,77 @@ extern struct page_ext_operations page_idle_ops; static inline bool page_is_young(struct page *page) { - struct page_ext *page_ext = lookup_page_ext(page); + struct page_ext *page_ext = page_ext_get(page); + bool page_young; if (unlikely(!page_ext)) return false; - return test_bit(PAGE_EXT_YOUNG, &page_ext->flags); + page_young = test_bit(PAGE_EXT_YOUNG, &page_ext->flags); + page_ext_put(page_ext); + + return page_young; } static inline void set_page_young(struct page *page) { - struct page_ext *page_ext = lookup_page_ext(page); + struct page_ext *page_ext = page_ext_get(page); if (unlikely(!page_ext)) return; set_bit(PAGE_EXT_YOUNG, &page_ext->flags); + page_ext_put(page_ext); } static inline bool test_and_clear_page_young(struct page *page) { - struct page_ext *page_ext = lookup_page_ext(page); + struct page_ext *page_ext = page_ext_get(page); + bool page_young; if (unlikely(!page_ext)) return false; - return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags); + page_young = test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags); + page_ext_put(page_ext); + + return page_young; } static inline bool page_is_idle(struct page *page) { - struct page_ext *page_ext = lookup_page_ext(page); + struct page_ext *page_ext = page_ext_get(page); + bool page_idle; if (unlikely(!page_ext)) return false; - return test_bit(PAGE_EXT_IDLE, &page_ext->flags); + page_idle = test_bit(PAGE_EXT_IDLE, &page_ext->flags); + page_ext_put(page_ext); + + return page_idle; } static inline void set_page_idle(struct page *page) { - struct page_ext *page_ext = lookup_page_ext(page); + struct page_ext *page_ext = page_ext_get(page); if (unlikely(!page_ext)) return; set_bit(PAGE_EXT_IDLE, &page_ext->flags); + page_ext_put(page_ext); } static inline void clear_page_idle(struct page *page) { - struct page_ext *page_ext = lookup_page_ext(page); + struct page_ext *page_ext = page_ext_get(page); if (unlikely(!page_ext)) return; clear_bit(PAGE_EXT_IDLE, &page_ext->flags); + page_ext_put(page_ext); } #endif /* CONFIG_64BIT */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 95fd50c28e18..0bd9c16a39ef 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1625,6 +1625,13 @@ static inline bool pci_aer_available(void) { return false; } bool pci_ats_disabled(void); +#ifdef CONFIG_PCIE_PTM +int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); +#else +static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) +{ return -EINVAL; } +#endif + void pci_cfg_access_lock(struct pci_dev *dev); bool pci_cfg_access_trylock(struct pci_dev *dev); void pci_cfg_access_unlock(struct pci_dev *dev); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index cf4e50e87cb5..a05f2d488b3f 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -59,6 +59,8 @@ #define PCI_CLASS_BRIDGE_EISA 0x0602 #define PCI_CLASS_BRIDGE_MC 0x0603 #define PCI_CLASS_BRIDGE_PCI 0x0604 +#define PCI_CLASS_BRIDGE_PCI_NORMAL 0x060400 +#define PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE 0x060401 #define PCI_CLASS_BRIDGE_PCMCIA 0x0605 #define PCI_CLASS_BRIDGE_NUBUS 0x0606 #define PCI_CLASS_BRIDGE_CARDBUS 0x0607 @@ -81,6 +83,7 @@ #define PCI_CLASS_SYSTEM_RTC 0x0803 #define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804 #define PCI_CLASS_SYSTEM_SDHCI 0x0805 +#define PCI_CLASS_SYSTEM_RCEC 0x0807 #define PCI_CLASS_SYSTEM_OTHER 0x0880 #define PCI_BASE_CLASS_INPUT 0x09 diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 536976636c58..92d172cfce06 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -9,6 +9,9 @@ #include #include +void _trace_android_vh_record_pcpu_rwsem_starttime( + struct task_struct *tsk, unsigned long settime); + struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int __percpu *read_count; @@ -73,6 +76,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem) * bleeding the critical section out. */ preempt_enable(); + _trace_android_vh_record_pcpu_rwsem_starttime(current, jiffies); } static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem) @@ -93,14 +97,17 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem) * bleeding the critical section out. */ - if (ret) + if (ret) { + _trace_android_vh_record_pcpu_rwsem_starttime(current, jiffies); rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); + } return ret; } static inline void percpu_up_read(struct percpu_rw_semaphore *sem) { + _trace_android_vh_record_pcpu_rwsem_starttime(current, 0); rwsem_release(&sem->dep_map, _RET_IP_); preempt_disable(); diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h index 02599687770c..7f03e02c48cd 100644 --- a/include/linux/platform_data/cros_ec_proto.h +++ b/include/linux/platform_data/cros_ec_proto.h @@ -216,6 +216,9 @@ int cros_ec_prepare_tx(struct cros_ec_device *ec_dev, int cros_ec_check_result(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); +int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); diff --git a/include/linux/platform_data/intel-spi.h b/include/linux/platform_data/intel-spi.h index 7f53a5c6f35e..7dda3f690465 100644 --- a/include/linux/platform_data/intel-spi.h +++ b/include/linux/platform_data/intel-spi.h @@ -19,11 +19,13 @@ enum intel_spi_type { /** * struct intel_spi_boardinfo - Board specific data for Intel SPI driver * @type: Type which this controller is compatible with - * @writeable: The chip is writeable + * @set_writeable: Try to make the chip writeable (optional) + * @data: Data to be passed to @set_writeable can be %NULL */ struct intel_spi_boardinfo { enum intel_spi_type type; - bool writeable; + bool (*set_writeable)(void __iomem *base, void *data); + void *data; }; #endif /* INTEL_SPI_PDATA_H */ diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h index 022bcea9edec..99a9b09dc839 100644 --- a/include/linux/platform_data/x86/pmc_atom.h +++ b/include/linux/platform_data/x86/pmc_atom.h @@ -7,6 +7,8 @@ #ifndef PMC_ATOM_H #define PMC_ATOM_H +#include + /* ValleyView Power Control Unit PCI Device ID */ #define PCI_DEVICE_ID_VLV_PMC 0x0F1C /* CherryTrail Power Control Unit PCI Device ID */ @@ -139,9 +141,9 @@ #define ACPI_MMIO_REG_LEN 0x100 #define PM1_CNT 0x4 -#define SLEEP_TYPE_MASK 0xFFFFECFF +#define SLEEP_TYPE_MASK GENMASK(12, 10) #define SLEEP_TYPE_S5 0x1C00 -#define SLEEP_ENABLE 0x2000 +#define SLEEP_ENABLE BIT(13) extern int pmc_atom_read(int offset, u32 *value); extern int pmc_atom_write(int offset, u32 value); diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 30091ab5de28..718600e83020 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -58,7 +58,7 @@ extern void pm_runtime_get_suppliers(struct device *dev); extern void pm_runtime_put_suppliers(struct device *dev); extern void pm_runtime_new_link(struct device *dev); extern void pm_runtime_drop_link(struct device_link *link); -extern void pm_runtime_release_supplier(struct device_link *link, bool check_idle); +extern void pm_runtime_release_supplier(struct device_link *link); /** * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. @@ -280,8 +280,7 @@ static inline void pm_runtime_get_suppliers(struct device *dev) {} static inline void pm_runtime_put_suppliers(struct device *dev) {} static inline void pm_runtime_new_link(struct device *dev) {} static inline void pm_runtime_drop_link(struct device_link *link) {} -static inline void pm_runtime_release_supplier(struct device_link *link, - bool check_idle) {} +static inline void pm_runtime_release_supplier(struct device_link *link) {} #endif /* !CONFIG_PM */ diff --git a/include/linux/prandom.h b/include/linux/prandom.h index 056d31317e49..a4aadd2dc153 100644 --- a/include/linux/prandom.h +++ b/include/linux/prandom.h @@ -10,6 +10,7 @@ #include #include +#include u32 prandom_u32(void); void prandom_bytes(void *buf, size_t nbytes); @@ -27,15 +28,10 @@ DECLARE_PER_CPU(unsigned long, net_rand_noise); * The core SipHash round function. Each line can be executed in * parallel given enough CPU resources. */ -#define PRND_SIPROUND(v0, v1, v2, v3) ( \ - v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \ - v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \ - v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \ - v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \ -) +#define PRND_SIPROUND(v0, v1, v2, v3) SIPHASH_PERMUTATION(v0, v1, v2, v3) -#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261) -#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573) +#define PRND_K0 (SIPHASH_CONST_0 ^ SIPHASH_CONST_2) +#define PRND_K1 (SIPHASH_CONST_1 ^ SIPHASH_CONST_3) #elif BITS_PER_LONG == 32 /* @@ -43,14 +39,9 @@ DECLARE_PER_CPU(unsigned long, net_rand_noise); * This is weaker, but 32-bit machines are not used for high-traffic * applications, so there is less output for an attacker to analyze. */ -#define PRND_SIPROUND(v0, v1, v2, v3) ( \ - v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \ - v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \ - v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \ - v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \ -) -#define PRND_K0 0x6c796765 -#define PRND_K1 0x74656462 +#define PRND_SIPROUND(v0, v1, v2, v3) HSIPHASH_PERMUTATION(v0, v1, v2, v3) +#define PRND_K0 (HSIPHASH_CONST_0 ^ HSIPHASH_CONST_2) +#define PRND_K1 (HSIPHASH_CONST_1 ^ HSIPHASH_CONST_3) #else #error Unsupported BITS_PER_LONG diff --git a/include/linux/printk.h b/include/linux/printk.h index f589b8b60806..14d13ecaa8f8 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -6,7 +6,6 @@ #include #include #include -#include #include extern const char linux_banner[]; diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 2a9df80ea887..ae7dbdfa3d83 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -30,7 +30,6 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, #define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */ #define PT_PTRACED 0x00000001 -#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */ #define PT_OPT_FLAG_SHIFT 3 /* PT_TRACE_* event enable flags */ @@ -47,12 +46,6 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, #define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT) #define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT) -/* single stepping state bits (used on ARM and PA-RISC) */ -#define PT_SINGLESTEP_BIT 31 -#define PT_SINGLESTEP (1< -struct random_ready_callback { - struct list_head list; - void (*func)(struct random_ready_callback *rdy); - struct module *owner; -}; +struct notifier_block; -extern void add_device_randomness(const void *, unsigned int); -extern void add_bootloader_randomness(const void *, unsigned int); +void add_device_randomness(const void *buf, unsigned int len); +void add_bootloader_randomness(const void *buf, size_t len); +void add_input_randomness(unsigned int type, unsigned int code, + unsigned int value) __latent_entropy; +void add_interrupt_randomness(int irq) __latent_entropy; +void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy); #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) static inline void add_latent_entropy(void) { - add_device_randomness((const void *)&latent_entropy, - sizeof(latent_entropy)); + add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy)); } #else -static inline void add_latent_entropy(void) {} -#endif - -extern void add_input_randomness(unsigned int type, unsigned int code, - unsigned int value) __latent_entropy; -extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy; - -extern void get_random_bytes(void *buf, int nbytes); -extern int wait_for_random_bytes(void); -extern int __init rand_initialize(void); -extern bool rng_is_initialized(void); -extern int add_random_ready_callback(struct random_ready_callback *rdy); -extern void del_random_ready_callback(struct random_ready_callback *rdy); -extern int __must_check get_random_bytes_arch(void *buf, int nbytes); - -#ifndef MODULE -extern const struct file_operations random_fops, urandom_fops; +static inline void add_latent_entropy(void) { } #endif +void get_random_bytes(void *buf, int len); +int __must_check get_random_bytes_arch(void *buf, int len); u32 get_random_u32(void); u64 get_random_u64(void); static inline unsigned int get_random_int(void) @@ -80,36 +61,38 @@ static inline unsigned long get_random_long(void) static inline unsigned long get_random_canary(void) { - unsigned long val = get_random_long(); - - return val & CANARY_MASK; + return get_random_long() & CANARY_MASK; } +int __init random_init(const char *command_line); +bool rng_is_initialized(void); +int wait_for_random_bytes(void); +int register_random_ready_notifier(struct notifier_block *nb); +int unregister_random_ready_notifier(struct notifier_block *nb); + /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). * Returns the result of the call to wait_for_random_bytes. */ -static inline int get_random_bytes_wait(void *buf, int nbytes) +static inline int get_random_bytes_wait(void *buf, size_t nbytes) { int ret = wait_for_random_bytes(); get_random_bytes(buf, nbytes); return ret; } -#define declare_get_random_var_wait(var) \ - static inline int get_random_ ## var ## _wait(var *out) { \ +#define declare_get_random_var_wait(name, ret_type) \ + static inline int get_random_ ## name ## _wait(ret_type *out) { \ int ret = wait_for_random_bytes(); \ if (unlikely(ret)) \ return ret; \ - *out = get_random_ ## var(); \ + *out = get_random_ ## name(); \ return 0; \ } -declare_get_random_var_wait(u32) -declare_get_random_var_wait(u64) -declare_get_random_var_wait(int) -declare_get_random_var_wait(long) +declare_get_random_var_wait(u32, u32) +declare_get_random_var_wait(u64, u32) +declare_get_random_var_wait(int, unsigned int) +declare_get_random_var_wait(long, unsigned long) #undef declare_get_random_var -unsigned long randomize_page(unsigned long start, unsigned long range); - /* * This is designed to be standalone for just prandom * users, but for now we include it from @@ -120,22 +103,10 @@ unsigned long randomize_page(unsigned long start, unsigned long range); #ifdef CONFIG_ARCH_RANDOM # include #else -static inline bool __must_check arch_get_random_long(unsigned long *v) -{ - return false; -} -static inline bool __must_check arch_get_random_int(unsigned int *v) -{ - return false; -} -static inline bool __must_check arch_get_random_seed_long(unsigned long *v) -{ - return false; -} -static inline bool __must_check arch_get_random_seed_int(unsigned int *v) -{ - return false; -} +static inline bool __must_check arch_get_random_long(unsigned long *v) { return false; } +static inline bool __must_check arch_get_random_int(unsigned int *v) { return false; } +static inline bool __must_check arch_get_random_seed_long(unsigned long *v) { return false; } +static inline bool __must_check arch_get_random_seed_int(unsigned int *v) { return false; } #endif /* @@ -158,4 +129,26 @@ static inline bool __init arch_get_random_long_early(unsigned long *v) } #endif +#ifdef CONFIG_SMP +int random_prepare_cpu(unsigned int cpu); +int random_online_cpu(unsigned int cpu); +#endif + +#ifndef MODULE +extern const struct file_operations random_fops, urandom_fops; +#endif + +/* + * Android KABI fixups + * Added back the following structure and calls to preserve the ABI for + * out-of-tree drivers that were using them. + */ +struct random_ready_callback { + struct list_head list; + void (*func)(struct random_ready_callback *rdy); + struct module *owner; +}; +extern int add_random_ready_callback(struct random_ready_callback *rdy); +extern void del_random_ready_callback(struct random_ready_callback *rdy); + #endif /* _LINUX_RANDOM_H */ diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h index b676aa419eef..f0e535f199be 100644 --- a/include/linux/ratelimit_types.h +++ b/include/linux/ratelimit_types.h @@ -23,12 +23,16 @@ struct ratelimit_state { unsigned long flags; }; -#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ - .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ - .interval = interval_init, \ - .burst = burst_init, \ +#define RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, flags_init) { \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ + .interval = interval_init, \ + .burst = burst_init, \ + .flags = flags_init, \ } +#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) \ + RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, 0) + #define RATELIMIT_STATE_INIT_DISABLED \ RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST) diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 2024944fd2f7..20e84a84fb77 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -331,6 +331,12 @@ regulator_get_exclusive(struct device *dev, const char *id) return ERR_PTR(-ENODEV); } +static inline struct regulator *__must_check +devm_regulator_get_exclusive(struct device *dev, const char *id) +{ + return ERR_PTR(-ENODEV); +} + static inline struct regulator *__must_check regulator_get_optional(struct device *dev, const char *id) { @@ -486,6 +492,11 @@ static inline int regulator_get_voltage(struct regulator *regulator) return -EINVAL; } +static inline int regulator_sync_voltage(struct regulator *regulator) +{ + return -EINVAL; +} + static inline int regulator_is_supported_voltage(struct regulator *regulator, int min_uV, int max_uV) { @@ -578,6 +589,25 @@ static inline int devm_regulator_unregister_notifier(struct regulator *regulator return 0; } +static inline int regulator_suspend_enable(struct regulator_dev *rdev, + suspend_state_t state) +{ + return -EINVAL; +} + +static inline int regulator_suspend_disable(struct regulator_dev *rdev, + suspend_state_t state) +{ + return -EINVAL; +} + +static inline int regulator_set_suspend_voltage(struct regulator *regulator, + int min_uV, int max_uV, + suspend_state_t state) +{ + return -EINVAL; +} + static inline void *regulator_get_drvdata(struct regulator *regulator) { return NULL; diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 136ea0997e6d..7d5a78f49d43 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -99,8 +99,8 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full); __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, - struct file *filp, poll_table *poll_table); - + struct file *filp, poll_table *poll_table, int full); +void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu); #define RING_BUFFER_ALL_CPUS -1 diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 7482da1ea67b..0a4d49ca8ccf 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -11,6 +11,10 @@ #include #include #include +#ifndef __GENKSYMS__ +#define PROTECT_TRACE_INCLUDE_PATH +#include +#endif /* * The anon_vma heads a list of private "related" vmas, to scan if @@ -130,6 +134,11 @@ static inline void anon_vma_lock_read(struct anon_vma *anon_vma) down_read(&anon_vma->root->rwsem); } +static inline int anon_vma_trylock_read(struct anon_vma *anon_vma) +{ + return down_read_trylock(&anon_vma->root->rwsem); +} + static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) { up_read(&anon_vma->root->rwsem); @@ -194,7 +203,12 @@ void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, static inline void page_dup_rmap(struct page *page, bool compound) { - atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); + bool success = false; + + if (!compound) + trace_android_vh_update_page_mapcount(page, true, compound, NULL, &success); + if (!success) + atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); } /* @@ -252,17 +266,14 @@ void try_to_munlock(struct page *); void remove_migration_ptes(struct page *old, struct page *new, bool locked); -/* - * Called by memory-failure.c to kill processes. - */ -struct anon_vma *page_lock_anon_vma_read(struct page *page); -void page_unlock_anon_vma_read(struct anon_vma *anon_vma); int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); /* * rmap_walk_control: To control rmap traversing for specific needs * * arg: passed to rmap_one() and invalid_vma() + * try_lock: bail out if the rmap lock is contended + * contended: indicate the rmap traversal bailed out due to lock contention * rmap_one: executed on each vma where page is mapped * done: for checking traversing termination condition * anon_lock: for getting anon_lock by optimized way rather than default @@ -270,6 +281,8 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); */ struct rmap_walk_control { void *arg; + bool try_lock; + bool contended; /* * Return false if page table scanning in rmap_walk should be stopped. * Otherwise, return true. @@ -277,13 +290,21 @@ struct rmap_walk_control { bool (*rmap_one)(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *arg); int (*done)(struct page *page); - struct anon_vma *(*anon_lock)(struct page *page); + struct anon_vma *(*anon_lock)(struct page *page, + struct rmap_walk_control *rwc); bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); }; void rmap_walk(struct page *page, struct rmap_walk_control *rwc); void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); +/* + * Called by memory-failure.c to kill processes. + */ +struct anon_vma *page_lock_anon_vma_read(struct page *page, + struct rmap_walk_control *rwc); +void page_unlock_anon_vma_read(struct anon_vma *anon_vma); + #else /* !CONFIG_MMU */ #define anon_vma_init() do {} while (0) diff --git a/include/linux/rtsx_usb.h b/include/linux/rtsx_usb.h index 159729cffd8e..3247ed8e9ff0 100644 --- a/include/linux/rtsx_usb.h +++ b/include/linux/rtsx_usb.h @@ -54,8 +54,6 @@ struct rtsx_ucr { struct usb_device *pusb_dev; struct usb_interface *pusb_intf; struct usb_sg_request current_sg; - unsigned char *iobuf; - dma_addr_t iobuf_dma; struct timer_list sg_timer; struct mutex dev_mutex; diff --git a/include/linux/sched.h b/include/linux/sched.h index 637d25c31374..d3cc279a4639 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -554,10 +554,6 @@ struct sched_dl_entity { * task has to wait for a replenishment to be performed at the * next firing of dl_timer. * - * @dl_boosted tells if we are boosted due to DI. If so we are - * outside bandwidth enforcement mechanism (but only until we - * exit the critical section); - * * @dl_yielded tells if task gave up the CPU before consuming * all its available runtime during the last job. * @@ -1380,7 +1376,9 @@ struct task_struct { ANDROID_VENDOR_DATA_ARRAY(1, 64); ANDROID_OEM_DATA_ARRAY(1, 32); - ANDROID_KABI_RESERVE(1); + /* PF_IO_WORKER */ + ANDROID_KABI_USE(1, void *pf_io_worker); + ANDROID_KABI_RESERVE(2); ANDROID_KABI_RESERVE(3); ANDROID_KABI_RESERVE(4); @@ -1689,7 +1687,7 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags) } extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); -extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); +extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus); #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION extern bool cpupri_check_rt(void); diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h index d2b4204ba4d3..fa067de9f1a9 100644 --- a/include/linux/sched/jobctl.h +++ b/include/linux/sched/jobctl.h @@ -19,7 +19,6 @@ struct task_struct; #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ #define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */ -#define JOBCTL_TASK_WORK_BIT 24 /* set by TWA_SIGNAL */ #define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) #define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) @@ -29,10 +28,9 @@ struct task_struct; #define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) #define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) #define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT) -#define JOBCTL_TASK_WORK (1UL << JOBCTL_TASK_WORK_BIT) #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) -#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK | JOBCTL_TASK_WORK) +#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask); extern void task_clear_jobctl_trapping(struct task_struct *task); diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index dc1f4dcd9a82..e3e5e149b00e 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -106,6 +106,14 @@ static inline void mm_update_next_owner(struct mm_struct *mm) #endif /* CONFIG_MEMCG */ #ifdef CONFIG_MMU +#ifndef arch_get_mmap_end +#define arch_get_mmap_end(addr) (TASK_SIZE) +#endif + +#ifndef arch_get_mmap_base +#define arch_get_mmap_base(addr, base) (base) +#endif + extern void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack); extern unsigned long diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 02d53c6b574c..fb0b3899a86f 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -360,11 +360,23 @@ static inline int restart_syscall(void) return -ERESTARTNOINTR; } -static inline int signal_pending(struct task_struct *p) +static inline int task_sigpending(struct task_struct *p) { return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); } +static inline int signal_pending(struct task_struct *p) +{ + /* + * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same + * behavior in terms of ensuring that we break out of wait loops + * so that notify signal callbacks can be processed. + */ + if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL))) + return 1; + return task_sigpending(p); +} + static inline int __fatal_signal_pending(struct task_struct *p) { return unlikely(sigismember(&p->pending.signal, SIGKILL)); @@ -372,7 +384,7 @@ static inline int __fatal_signal_pending(struct task_struct *p) static inline int fatal_signal_pending(struct task_struct *p) { - return signal_pending(p) && __fatal_signal_pending(p); + return task_sigpending(p) && __fatal_signal_pending(p); } static inline int signal_pending_state(long state, struct task_struct *p) @@ -509,7 +521,7 @@ extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize); static inline void restore_saved_sigmask_unless(bool interrupted) { if (interrupted) - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); + WARN_ON(!signal_pending(current)); else restore_saved_sigmask(); } diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index cefaaef8e0b6..ff1345c7b2ab 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -95,6 +95,13 @@ int sysctl_numa_balancing(struct ctl_table *table, int write, void *buffer, int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); +#ifdef CONFIG_SMP +extern unsigned int sysctl_sched_pelt_multiplier; + +int sched_pelt_multiplier(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); +#endif + #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) extern unsigned int sysctl_sched_energy_aware; int sched_energy_aware_handler(struct ctl_table *table, int write, diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index cea4bdfd0f05..5629761d9790 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -31,6 +31,7 @@ struct kernel_clone_args { /* Number of elements in *set_tid */ size_t set_tid_size; int cgroup; + int io_thread; struct cgroup *cgrp; struct css_set *cset; }; @@ -82,9 +83,10 @@ static inline void exit_thread(struct task_struct *tsk) extern void do_group_exit(int); extern void exit_files(struct task_struct *); -extern void exit_itimers(struct signal_struct *); +extern void exit_itimers(struct task_struct *); extern pid_t kernel_clone(struct kernel_clone_args *kargs); +struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node); struct task_struct *fork_idle(int); struct mm_struct *copy_init_mm(void); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); diff --git a/include/linux/security.h b/include/linux/security.h index a87cbacab078..694db31a46e1 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -121,10 +121,12 @@ enum lockdown_reason { LOCKDOWN_DEBUGFS, LOCKDOWN_XMON_WR, LOCKDOWN_BPF_WRITE_USER, + LOCKDOWN_DBG_WRITE_KERNEL, LOCKDOWN_INTEGRITY_MAX, LOCKDOWN_KCORE, LOCKDOWN_KPROBES, LOCKDOWN_BPF_READ, + LOCKDOWN_DBG_READ_KERNEL, LOCKDOWN_PERF, LOCKDOWN_TRACEFS, LOCKDOWN_XMON_RW, @@ -166,7 +168,7 @@ struct sk_buff; struct sock; struct sockaddr; struct socket; -struct flowi; +struct flowi_common; struct dst_entry; struct xfrm_selector; struct xfrm_policy; @@ -1369,8 +1371,9 @@ int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u int security_sk_alloc(struct sock *sk, int family, gfp_t priority); void security_sk_free(struct sock *sk); void security_sk_clone(const struct sock *sk, struct sock *newsk); -void security_sk_classify_flow(struct sock *sk, struct flowi *fl); -void security_req_classify_flow(const struct request_sock *req, struct flowi *fl); +void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic); +void security_req_classify_flow(const struct request_sock *req, + struct flowi_common *flic); void security_sock_graft(struct sock*sk, struct socket *parent); int security_inet_conn_request(struct sock *sk, struct sk_buff *skb, struct request_sock *req); @@ -1521,11 +1524,13 @@ static inline void security_sk_clone(const struct sock *sk, struct sock *newsk) { } -static inline void security_sk_classify_flow(struct sock *sk, struct flowi *fl) +static inline void security_sk_classify_flow(struct sock *sk, + struct flowi_common *flic) { } -static inline void security_req_classify_flow(const struct request_sock *req, struct flowi *fl) +static inline void security_req_classify_flow(const struct request_sock *req, + struct flowi_common *flic) { } @@ -1652,9 +1657,9 @@ void security_xfrm_state_free(struct xfrm_state *x); int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); int security_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, - const struct flowi *fl); + const struct flowi_common *flic); int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid); -void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl); +void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic); #else /* CONFIG_SECURITY_NETWORK_XFRM */ @@ -1706,7 +1711,8 @@ static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_s } static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x, - struct xfrm_policy *xp, const struct flowi *fl) + struct xfrm_policy *xp, + const struct flowi_common *flic) { return 1; } @@ -1716,7 +1722,8 @@ static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid) return 0; } -static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) +static inline void security_skb_classify_flow(struct sk_buff *skb, + struct flowi_common *flic) { } diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 52004d800c93..9f60f549d956 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -307,6 +307,23 @@ struct uart_state { /* number of characters left in xmit buffer before we ask for more */ #define WAKEUP_CHARS 256 +/** + * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars + * @up: uart_port structure describing the port + * @chars: number of characters sent + * + * This function advances the tail of circular xmit buffer by the number of + * @chars transmitted and handles accounting of transmitted bytes (into + * @up's icount.tx). + */ +static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars) +{ + struct circ_buf *xmit = &up->state->xmit; + + xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1); + up->icount.tx += chars; +} + struct module; struct tty_driver; @@ -403,6 +420,11 @@ static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED; static inline int setup_earlycon(char *buf) { return 0; } #endif +static inline bool uart_console_enabled(struct uart_port *port) +{ + return uart_console(port) && (port->cons->flags & CON_ENABLED); +} + struct uart_port *uart_get_console(struct uart_port *ports, int nr, struct console *c); int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr, diff --git a/include/linux/siphash.h b/include/linux/siphash.h index 0cda61855d90..0bb5ecd507be 100644 --- a/include/linux/siphash.h +++ b/include/linux/siphash.h @@ -136,4 +136,32 @@ static inline u32 hsiphash(const void *data, size_t len, return ___hsiphash_aligned(data, len, key); } +/* + * These macros expose the raw SipHash and HalfSipHash permutations. + * Do not use them directly! If you think you have a use for them, + * be sure to CC the maintainer of this file explaining why. + */ + +#define SIPHASH_PERMUTATION(a, b, c, d) ( \ + (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \ + (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \ + (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \ + (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32)) + +#define SIPHASH_CONST_0 0x736f6d6570736575ULL +#define SIPHASH_CONST_1 0x646f72616e646f6dULL +#define SIPHASH_CONST_2 0x6c7967656e657261ULL +#define SIPHASH_CONST_3 0x7465646279746573ULL + +#define HSIPHASH_PERMUTATION(a, b, c, d) ( \ + (a) += (b), (b) = rol32((b), 5), (b) ^= (a), (a) = rol32((a), 16), \ + (c) += (d), (d) = rol32((d), 8), (d) ^= (c), \ + (a) += (d), (d) = rol32((d), 7), (d) ^= (a), \ + (c) += (b), (b) = rol32((b), 13), (b) ^= (c), (c) = rol32((c), 16)) + +#define HSIPHASH_CONST_0 0U +#define HSIPHASH_CONST_1 0U +#define HSIPHASH_CONST_2 0x6c796765U +#define HSIPHASH_CONST_3 0x74656462U + #endif /* _LINUX_SIPHASH_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3a02503b3637..f73efb3057e6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -685,6 +685,7 @@ typedef unsigned char *sk_buff_data_t; * @csum_level: indicates the number of consecutive checksums found in * the packet minus one that have been verified as * CHECKSUM_UNNECESSARY (max 3) + * @scm_io_uring: SKB holds io_uring registered files * @dst_pending_confirm: need to confirm neighbour * @decrypted: Decrypted SKB * @napi_id: id of the NAPI struct this skb came from @@ -912,7 +913,21 @@ struct sk_buff { __u32 headers_end[0]; /* public: */ - ANDROID_KABI_RESERVE(1); + /* Android KABI preservation. + * + * "open coded" version of ANDROID_KABI_USE() to pack more + * fields/variables into the space that we have. + * + * scm_io_uring is from 04df9719df18 ("io_uring/af_unix: defer + * registered files gc to io_uring release") + */ + _ANDROID_KABI_REPLACE(_ANDROID_KABI_RESERVE(1), + struct { + __u8 scm_io_uring:1; + __u8 android_kabi_reserved1_padding1; + __u16 android_kabi_reserved1_padding2; + __u32 android_kabi_reserved1_padding3; + }); ANDROID_KABI_RESERVE(2); /* These elements must be at the end, see alloc_skb() for details. */ @@ -1417,6 +1432,11 @@ static inline unsigned int skb_end_offset(const struct sk_buff *skb) { return skb->end; } + +static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) +{ + skb->end = offset; +} #else static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { @@ -1427,6 +1447,11 @@ static inline unsigned int skb_end_offset(const struct sk_buff *skb) { return skb->end - skb->head; } + +static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) +{ + skb->end = skb->head + offset; +} #endif /* Internal */ @@ -1646,19 +1671,19 @@ static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) return 0; } -/* This variant of skb_unclone() makes sure skb->truesize is not changed */ +/* This variant of skb_unclone() makes sure skb->truesize + * and skb_end_offset() are not changed, whenever a new skb->head is needed. + * + * Indeed there is no guarantee that ksize(kmalloc(X)) == ksize(kmalloc(X)) + * when various debugging features are in place. + */ +int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri); static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) { might_sleep_if(gfpflags_allow_blocking(pri)); - if (skb_cloned(skb)) { - unsigned int save = skb->truesize; - int res; - - res = pskb_expand_head(skb, 0, 0, pri); - skb->truesize = save; - return res; - } + if (skb_cloned(skb)) + return __skb_unclone_keeptruesize(skb, pri); return 0; } @@ -2245,6 +2270,14 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) #endif /* NET_SKBUFF_DATA_USES_OFFSET */ +static inline void skb_assert_len(struct sk_buff *skb) +{ +#ifdef CONFIG_DEBUG_NET + if (WARN_ONCE(!skb->len, "%s\n", __func__)) + DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); +#endif /* CONFIG_DEBUG_NET */ +} + /* * Add data to an sk_buff */ diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 822c048934e3..1138dd3071db 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -281,7 +281,8 @@ static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start) static inline struct sk_psock *sk_psock(const struct sock *sk) { - return rcu_dereference_sk_user_data(sk); + return __rcu_dereference_sk_user_data_with_flags(sk, + SK_USER_DATA_PSOCK); } static inline void sk_psock_queue_msg(struct sk_psock *psock, diff --git a/include/linux/slab.h b/include/linux/slab.h index eb08b86b64c6..1716cbf91394 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -217,6 +217,18 @@ static inline void __check_heap_object(const void *ptr, unsigned long n, #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #endif +/* + * Arches can define this function if they want to decide the minimum slab + * alignment at runtime. The value returned by the function must be a power + * of two and >= ARCH_SLAB_MINALIGN. + */ +#ifndef arch_slab_minalign +static inline unsigned int arch_slab_minalign(void) +{ + return ARCH_SLAB_MINALIGN; +} +#endif + /* * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN diff --git a/include/linux/socket.h b/include/linux/socket.h index 9aa530d497da..c3b35d18bcd3 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -421,6 +421,9 @@ extern int __sys_accept4_file(struct file *file, unsigned file_flags, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags, unsigned long nofile); +extern struct file *do_accept(struct file *file, unsigned file_flags, + struct sockaddr __user *upeer_sockaddr, + int __user *upeer_addrlen, int flags); extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags); extern int __sys_socket(int family, int type, int protocol); @@ -436,5 +439,6 @@ extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr, int __user *usockaddr_len); extern int __sys_socketpair(int family, int type, int protocol, int __user *usockvec); +extern int __sys_shutdown_sock(struct socket *sock, int how); extern int __sys_shutdown(int fd, int how); #endif /* _LINUX_SOCKET_H */ diff --git a/include/linux/stddef.h b/include/linux/stddef.h index 998a4ba28eba..938216f8ab7e 100644 --- a/include/linux/stddef.h +++ b/include/linux/stddef.h @@ -36,4 +36,52 @@ enum { #define offsetofend(TYPE, MEMBER) \ (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) +/** + * struct_group() - Wrap a set of declarations in a mirrored struct + * + * @NAME: The identifier name of the mirrored sub-struct + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical + * layout and size: one anonymous and one named. The former can be + * used normally without sub-struct naming, and the latter can be + * used to reason about the start, end, and size of the group of + * struct members. + */ +#define struct_group(NAME, MEMBERS...) \ + __struct_group(/* no tag */, NAME, /* no attrs */, MEMBERS) + +/** + * struct_group_attr() - Create a struct_group() with trailing attributes + * + * @NAME: The identifier name of the mirrored sub-struct + * @ATTRS: Any struct attributes to apply + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical + * layout and size: one anonymous and one named. The former can be + * used normally without sub-struct naming, and the latter can be + * used to reason about the start, end, and size of the group of + * struct members. Includes structure attributes argument. + */ +#define struct_group_attr(NAME, ATTRS, MEMBERS...) \ + __struct_group(/* no tag */, NAME, ATTRS, MEMBERS) + +/** + * struct_group_tagged() - Create a struct_group with a reusable tag + * + * @TAG: The tag name for the named sub-struct + * @NAME: The identifier name of the mirrored sub-struct + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical + * layout and size: one anonymous and one named. The former can be + * used normally without sub-struct naming, and the latter can be + * used to reason about the start, end, and size of the group of + * struct members. Includes struct tag argument for the named copy, + * so the specified layout can be reused later. + */ +#define struct_group_tagged(TAG, NAME, MEMBERS...) \ + __struct_group(TAG, NAME, /* no attrs */, MEMBERS) + #endif diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 0e7e2f761c72..9e8df66738c3 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -157,7 +157,6 @@ struct plat_stmmacenet_data { struct stmmac_est *est; int clk_csr; int has_gmac; - int sph_disable; int enh_desc; int tx_coe; int rx_coe; @@ -208,5 +207,6 @@ struct plat_stmmacenet_data { bool vlan_fail_q_en; u8 vlan_fail_q; unsigned int eee_usecs_rate; + bool sph_disable; }; #endif diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 8c2a712cb242..689062afdd61 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -89,5 +89,6 @@ struct sock_xprt { #define XPRT_SOCK_WAKE_WRITE (5) #define XPRT_SOCK_WAKE_PENDING (6) #define XPRT_SOCK_WAKE_DISCONNECT (7) +#define XPRT_SOCK_CONNECT_SENT (8) #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 112ff24ea927..1c170be3f746 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -341,7 +341,7 @@ asmlinkage long sys_io_uring_setup(u32 entries, struct io_uring_params __user *p); asmlinkage long sys_io_uring_enter(unsigned int fd, u32 to_submit, u32 min_complete, u32 flags, - const sigset_t __user *sig, size_t sigsz); + const void __user *argp, size_t argsz); asmlinkage long sys_io_uring_register(unsigned int fd, unsigned int op, void __user *arg, unsigned int nr_args); diff --git a/include/linux/task_work.h b/include/linux/task_work.h index 0d848a1e9e62..5b8a93f288bb 100644 --- a/include/linux/task_work.h +++ b/include/linux/task_work.h @@ -22,6 +22,8 @@ enum task_work_notify_mode { int task_work_add(struct task_struct *task, struct callback_head *twork, enum task_work_notify_mode mode); +struct callback_head *task_work_cancel_match(struct task_struct *task, + bool (*match)(struct callback_head *, void *data), void *data); struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t); void task_work_run(void); diff --git a/include/linux/tcp.h b/include/linux/tcp.h index b163dbd6875c..e079555af7b8 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -266,7 +266,14 @@ struct tcp_sock { u32 packets_out; /* Packets which are "in flight" */ u32 retrans_out; /* Retransmitted packets out */ u32 max_packets_out; /* max packets_out in last window */ +/* GENKSYMS hack to preserve the ABI because of f4ce91ce12a7 ("tcp: fix + * tcp_cwnd_validate() to not forget is_cwnd_limited") + */ +#ifndef __GENKSYMS__ + u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */ +#else u32 max_packets_seq; /* right edge of max_packets_out flight */ +#endif u16 urg_data; /* Saved octet of OOB data and control flags */ u8 ecn_flags; /* ECN status bits. */ diff --git a/include/linux/timex.h b/include/linux/timex.h index ce0859763670..2efab9a806a9 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -62,6 +62,8 @@ #include #include +unsigned long random_get_entropy_fallback(void); + #include #ifndef random_get_entropy @@ -74,8 +76,14 @@ * * By default we use get_cycles() for this purpose, but individual * architectures may override this in their asm/timex.h header file. + * If a given arch does not have get_cycles(), then we fallback to + * using random_get_entropy_fallback(). */ -#define random_get_entropy() get_cycles() +#ifdef get_cycles +#define random_get_entropy() ((unsigned long)get_cycles()) +#else +#define random_get_entropy() random_get_entropy_fallback() +#endif #endif /* diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 739ba9a03ec1..20c0ff54b7a0 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -157,7 +157,7 @@ struct tcg_algorithm_info { * Return: size of the event on success, 0 on failure */ -static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, +static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, struct tcg_pcr_event *event_header, bool do_mapping) { diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index b480e1a07ed8..ee9ab7dbc8c3 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -198,4 +198,27 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) blkcg_maybe_throttle_current(); } +/* + * called by exit_to_user_mode_loop() if ti_work & _TIF_NOTIFY_SIGNAL. This + * is currently used by TWA_SIGNAL based task_work, which requires breaking + * wait loops to ensure that task_work is noticed and run. + */ +static inline void tracehook_notify_signal(void) +{ + clear_thread_flag(TIF_NOTIFY_SIGNAL); + smp_mb__after_atomic(); + if (current->task_works) + task_work_run(); +} + +/* + * Called when we have work to process from exit_to_user_mode_loop() + */ +static inline void set_notify_signal(struct task_struct *task) +{ + if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) && + !wake_up_state(task, TASK_INTERRUPTIBLE)) + kick_process(task); +} + #endif /* */ diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h index 767f62086bd9..c326bfdb5ec2 100644 --- a/include/linux/tty_flip.h +++ b/include/linux/tty_flip.h @@ -12,7 +12,6 @@ extern int tty_insert_flip_string_fixed_flag(struct tty_port *port, extern int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars, size_t size); extern void tty_flip_buffer_push(struct tty_port *port); -void tty_schedule_flip(struct tty_port *port); int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag); static inline int tty_insert_flip_char(struct tty_port *port, @@ -40,4 +39,7 @@ static inline int tty_insert_flip_string(struct tty_port *port, extern void tty_buffer_lock_exclusive(struct tty_port *port); extern void tty_buffer_unlock_exclusive(struct tty_port *port); +int tty_insert_flip_string_and_push_buffer(struct tty_port *port, + const unsigned char *chars, size_t cnt); + #endif /* _LINUX_TTY_FLIP_H */ diff --git a/include/linux/uacce.h b/include/linux/uacce.h index 48e319f40275..9ce88c28b0a8 100644 --- a/include/linux/uacce.h +++ b/include/linux/uacce.h @@ -70,6 +70,7 @@ enum uacce_q_state { * @wait: wait queue head * @list: index into uacce queues list * @qfrs: pointer of qfr regions + * @mutex: protects queue state * @state: queue state machine * @pasid: pasid associated to the mm * @handle: iommu_sva handle returned by iommu_sva_bind_device() @@ -80,6 +81,7 @@ struct uacce_queue { wait_queue_head_t wait; struct list_head list; struct uacce_qfile_region *qfrs[UACCE_MAX_REGION]; + struct mutex mutex; enum uacce_q_state state; u32 pasid; struct iommu_sva *handle; @@ -97,9 +99,9 @@ struct uacce_queue { * @dev_id: id of the uacce device * @cdev: cdev of the uacce * @dev: dev of the uacce + * @mutex: protects uacce operation * @priv: private pointer of the uacce * @queues: list of queues - * @queues_lock: lock for queues list * @inode: core vfs */ struct uacce_device { @@ -113,9 +115,9 @@ struct uacce_device { u32 dev_id; struct cdev *cdev; struct device dev; + struct mutex mutex; void *priv; struct list_head queues; - struct mutex queues_lock; struct inode *inode; }; diff --git a/include/linux/uio.h b/include/linux/uio.h index 27ff8eb786dc..cedb68e49e4f 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -26,6 +26,12 @@ enum iter_type { ITER_DISCARD = 64, }; +struct iov_iter_state { + size_t iov_offset; + size_t count; + unsigned long nr_segs; +}; + struct iov_iter { /* * Bit 0 is the read/write bit, set if we're writing. @@ -55,6 +61,14 @@ static inline enum iter_type iov_iter_type(const struct iov_iter *i) return i->type & ~(READ | WRITE); } +static inline void iov_iter_save_state(struct iov_iter *iter, + struct iov_iter_state *state) +{ + state->iov_offset = iter->iov_offset; + state->count = iter->count; + state->nr_segs = iter->nr_segs; +} + static inline bool iter_is_iovec(const struct iov_iter *i) { return iov_iter_type(i) == ITER_IOVEC; @@ -226,6 +240,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start); int iov_iter_npages(const struct iov_iter *i, int maxpages); +void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state); const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 2ffafbdcda07..d14b39d1418e 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -125,6 +125,7 @@ struct usb_hcd { #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ #define HCD_FLAG_DEAD 6 /* controller has died? */ #define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */ +#define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */ /* The flags can be tested using these macros; they are likely to * be slightly faster than test_bit(). @@ -135,6 +136,7 @@ struct usb_hcd { #define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING)) #define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) #define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) +#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER)) /* * Specifies if interfaces are authorized by default diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h index fc4c7edb2e8a..296909ea04f2 100644 --- a/include/linux/usb/typec_dp.h +++ b/include/linux/usb/typec_dp.h @@ -73,6 +73,11 @@ enum { #define DP_CAP_USB BIT(7) #define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(15, 8)) >> 8) #define DP_CAP_UFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(23, 16)) >> 16) +/* Get pin assignment taking plug & receptacle into consideration */ +#define DP_CAP_PIN_ASSIGN_UFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \ + DP_CAP_UFP_D_PIN_ASSIGN(_cap_) : DP_CAP_DFP_D_PIN_ASSIGN(_cap_)) +#define DP_CAP_PIN_ASSIGN_DFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \ + DP_CAP_DFP_D_PIN_ASSIGN(_cap_) : DP_CAP_UFP_D_PIN_ASSIGN(_cap_)) /* DisplayPort Status Update VDO bits */ #define DP_STATUS_CONNECTION(_status_) ((_status_) & 3) diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 38d3c6a8dc7e..f479c5d7f2c3 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -15,6 +15,18 @@ #include #include +struct vfio_device { + struct device *dev; + const struct vfio_device_ops *ops; + struct vfio_group *group; + + /* Members below here are private, not for driver use */ + refcount_t refcount; + struct completion comp; + struct list_head group_next; + void *device_data; +}; + /** * struct vfio_device_ops - VFIO bus driver device callbacks * @@ -48,11 +60,15 @@ struct vfio_device_ops { extern struct iommu_group *vfio_iommu_group_get(struct device *dev); extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev); +void vfio_init_group_dev(struct vfio_device *device, struct device *dev, + const struct vfio_device_ops *ops, void *device_data); +int vfio_register_group_dev(struct vfio_device *device); extern int vfio_add_group_dev(struct device *dev, const struct vfio_device_ops *ops, void *device_data); extern void *vfio_del_group_dev(struct device *dev); +void vfio_unregister_group_dev(struct vfio_device *device); extern struct vfio_device *vfio_device_get_from_dev(struct device *dev); extern void vfio_device_put(struct vfio_device *device); extern void *vfio_device_data(struct vfio_device *device); diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 0549ca17ba6f..167a9533d951 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -245,4 +245,7 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) int register_vmap_purge_notifier(struct notifier_block *nb); int unregister_vmap_purge_notifier(struct notifier_block *nb); +/* Allow disabling lazy TLB flushing */ +extern bool lazy_vunmap_enable; + #endif /* _LINUX_VMALLOC_H */ diff --git a/include/linux/wait.h b/include/linux/wait.h index 9b8b0833100a..e9966f3929f6 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -219,6 +219,7 @@ void __wake_up_pollfree(struct wait_queue_head *wq_head); #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE) +#define wake_up_sync(x) __wake_up_sync((x), TASK_NORMAL) /* * Wakeup macros to be used to report events to the targets. @@ -534,10 +535,11 @@ do { \ \ hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ HRTIMER_MODE_REL); \ - if ((timeout) != KTIME_MAX) \ - hrtimer_start_range_ns(&__t.timer, timeout, \ - current->timer_slack_ns, \ - HRTIMER_MODE_REL); \ + if ((timeout) != KTIME_MAX) { \ + hrtimer_set_expires_range_ns(&__t.timer, timeout, \ + current->timer_slack_ns); \ + hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \ + } \ \ __ret = ___wait_event(wq_head, condition, state, 0, 0, \ if (!__t.task) { \ diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index a3083529b698..2e53ee1c8db4 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -175,7 +175,8 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, * * @sd: pointer to &struct v4l2_subdev * @client: pointer to struct i2c_client - * @devname: the name of the device; if NULL, the I²C device's name will be used + * @devname: the name of the device; if NULL, the I²C device drivers's name + * will be used * @postfix: sub-device specific string to put right after the I²C device name; * may be NULL */ diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h index aceb2c360d3f..0e3dac030219 100644 --- a/include/memory/renesas-rpc-if.h +++ b/include/memory/renesas-rpc-if.h @@ -65,6 +65,7 @@ struct rpcif { size_t size; enum rpcif_data_dir dir; u8 bus_size; + u8 xfer_size; void *buffer; u32 xferlen; u32 smcr; diff --git a/include/net/TEST_MAPPING b/include/net/TEST_MAPPING new file mode 100644 index 000000000000..c8614a6ad3d2 --- /dev/null +++ b/include/net/TEST_MAPPING @@ -0,0 +1,7 @@ +{ + "presubmit": [ + { + "name": "CtsNetTestCases" + } + ] +} diff --git a/include/net/addrconf.h b/include/net/addrconf.h index dd2e03d39f38..3e5375054eb8 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -417,6 +417,9 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev) { const struct inet6_dev *idev = __in6_dev_get(dev); + if (unlikely(!idev)) + return true; + return !!idev->cnf.ignore_routes_with_linkdown; } diff --git a/include/net/arp.h b/include/net/arp.h index 4950191f6b2b..4a23a97195f3 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -71,6 +71,7 @@ void arp_send(int type, int ptype, __be32 dest_ip, const unsigned char *src_hw, const unsigned char *th); int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir); void arp_ifdown(struct net_device *dev); +int arp_invalidate(struct net_device *dev, __be32 ip, bool force); struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, struct net_device *dev, __be32 src_ip, diff --git a/include/net/ax25.h b/include/net/ax25.h index 8b7eb46ad72d..aadff553e4b7 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -236,6 +236,7 @@ typedef struct ax25_dev { #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER) ax25_dama_info dama; #endif + refcount_t refcount; } ax25_dev; typedef struct ax25_cb { @@ -290,6 +291,17 @@ static __inline__ void ax25_cb_put(ax25_cb *ax25) } } +static inline void ax25_dev_hold(ax25_dev *ax25_dev) +{ + refcount_inc(&ax25_dev->refcount); +} + +static inline void ax25_dev_put(ax25_dev *ax25_dev) +{ + if (refcount_dec_and_test(&ax25_dev->refcount)) { + kfree(ax25_dev); + } +} static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev) { skb->dev = dev; diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 9125effbf448..355835639ae5 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -180,19 +180,21 @@ void bt_err_ratelimited(const char *fmt, ...); #define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__) #endif +#define bt_dev_name(hdev) ((hdev) ? (hdev)->name : "null") + #define bt_dev_info(hdev, fmt, ...) \ - BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + BT_INFO("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) #define bt_dev_warn(hdev, fmt, ...) \ - BT_WARN("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + BT_WARN("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) #define bt_dev_err(hdev, fmt, ...) \ - BT_ERR("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + BT_ERR("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) #define bt_dev_dbg(hdev, fmt, ...) \ - BT_DBG("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + BT_DBG("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) #define bt_dev_warn_ratelimited(hdev, fmt, ...) \ - bt_warn_ratelimited("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + bt_warn_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) #define bt_dev_err_ratelimited(hdev, fmt, ...) \ - bt_err_ratelimited("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + bt_err_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) /* Connection and socket states */ enum { @@ -420,6 +422,71 @@ out: return NULL; } +/* Shall not be called with lock_sock held */ +static inline struct sk_buff *bt_skb_sendmsg(struct sock *sk, + struct msghdr *msg, + size_t len, size_t mtu, + size_t headroom, size_t tailroom) +{ + struct sk_buff *skb; + size_t size = min_t(size_t, len, mtu); + int err; + + skb = bt_skb_send_alloc(sk, size + headroom + tailroom, + msg->msg_flags & MSG_DONTWAIT, &err); + if (!skb) + return ERR_PTR(err); + + skb_reserve(skb, headroom); + skb_tailroom_reserve(skb, mtu, tailroom); + + if (!copy_from_iter_full(skb_put(skb, size), size, &msg->msg_iter)) { + kfree_skb(skb); + return ERR_PTR(-EFAULT); + } + + skb->priority = sk->sk_priority; + + return skb; +} + +/* Similar to bt_skb_sendmsg but can split the msg into multiple fragments + * accourding to the MTU. + */ +static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk, + struct msghdr *msg, + size_t len, size_t mtu, + size_t headroom, size_t tailroom) +{ + struct sk_buff *skb, **frag; + + skb = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom); + if (IS_ERR_OR_NULL(skb)) + return skb; + + len -= skb->len; + if (!len) + return skb; + + /* Add remaining data over MTU as continuation fragments */ + frag = &skb_shinfo(skb)->frag_list; + while (len) { + struct sk_buff *tmp; + + tmp = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom); + if (IS_ERR(tmp)) { + return skb; + } + + len -= tmp->len; + + *frag = tmp; + frag = &(*frag)->next; + } + + return skb; +} + int bt_to_errno(u16 code); void hci_sock_set_flag(struct sock *sk, int nr); diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 66361705671a..46eeb73c1289 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -36,6 +36,9 @@ /* HCI priority */ #define HCI_PRIO_MAX 7 +/* HCI maximum id value */ +#define HCI_MAX_ID 10000 + /* HCI Core structures */ struct inquiry_data { bdaddr_t bdaddr; diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 8c99677077b6..26459016f150 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -855,6 +855,7 @@ enum { }; void l2cap_chan_hold(struct l2cap_chan *c); +struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c); void l2cap_chan_put(struct l2cap_chan *c); static inline void l2cap_chan_lock(struct l2cap_chan *chan) diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index 1a28f299a4c6..895eae18271f 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -15,8 +15,6 @@ #define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW) #define AD_TIMER_INTERVAL 100 /*msec*/ -#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02} - #define AD_LACP_SLOW 0 #define AD_LACP_FAST 1 diff --git a/include/net/bonding.h b/include/net/bonding.h index 67d676059aa0..d9cc3f5602fb 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -763,6 +763,9 @@ extern struct rtnl_link_ops bond_link_ops; /* exported from bond_sysfs_slave.c */ extern const struct sysfs_ops slave_sysfs_ops; +/* exported from bond_3ad.c */ +extern const u8 lacpdu_mcast_addr[]; + static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb) { atomic_long_inc(&dev->tx_dropped); diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 716b7c5f6fdd..36e5e75e7172 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -31,7 +31,7 @@ extern unsigned int sysctl_net_busy_poll __read_mostly; static inline bool net_busy_loop_on(void) { - return sysctl_net_busy_poll; + return READ_ONCE(sysctl_net_busy_poll); } static inline bool sk_can_busy_loop(const struct sock *sk) diff --git a/include/net/esp.h b/include/net/esp.h index 90cd02ff77ef..9c5637d41d95 100644 --- a/include/net/esp.h +++ b/include/net/esp.h @@ -4,8 +4,6 @@ #include -#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER) - struct ip_esp_hdr; static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb) diff --git a/include/net/flow.h b/include/net/flow.h index b2531df3f65f..39d0cedcddee 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -195,11 +195,21 @@ static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4) return container_of(fl4, struct flowi, u.ip4); } +static inline struct flowi_common *flowi4_to_flowi_common(struct flowi4 *fl4) +{ + return &(flowi4_to_flowi(fl4)->u.__fl_common); +} + static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6) { return container_of(fl6, struct flowi, u.ip6); } +static inline struct flowi_common *flowi6_to_flowi_common(struct flowi6 *fl6) +{ + return &(flowi6_to_flowi(fl6)->u.__fl_common); +} + static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn) { return container_of(fldn, struct flowi, u.dn); diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index cc10b10dc3a1..5eecf4436965 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -59,6 +59,8 @@ struct flow_dissector_key_vlan { __be16 vlan_tci; }; __be16 vlan_tpid; + __be16 vlan_eth_type; + u16 padding; }; struct flow_dissector_mpls_lse { diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 010d58159887..9a58274e6217 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -568,5 +568,6 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, enum tc_setup_type type, void *data, struct flow_block_offload *bo, void (*cleanup)(struct flow_block_cb *block_cb)); +bool flow_indr_dev_exists(void); #endif /* _NET_FLOW_OFFLOAD_H */ diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index d0d188c3294b..03b64bf876a4 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -15,6 +15,22 @@ #ifndef IEEE802154_NETDEVICE_H #define IEEE802154_NETDEVICE_H +#define IEEE802154_REQUIRED_SIZE(struct_type, member) \ + (offsetof(typeof(struct_type), member) + \ + sizeof(((typeof(struct_type) *)(NULL))->member)) + +#define IEEE802154_ADDR_OFFSET \ + offsetof(typeof(struct sockaddr_ieee802154), addr) + +#define IEEE802154_MIN_NAMELEN (IEEE802154_ADDR_OFFSET + \ + IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, addr_type)) + +#define IEEE802154_NAMELEN_SHORT (IEEE802154_ADDR_OFFSET + \ + IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, short_addr)) + +#define IEEE802154_NAMELEN_LONG (IEEE802154_ADDR_OFFSET + \ + IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, hwaddr)) + #include #include #include @@ -165,6 +181,33 @@ static inline void ieee802154_devaddr_to_raw(void *raw, __le64 addr) memcpy(raw, &temp, IEEE802154_ADDR_LEN); } +static inline int +ieee802154_sockaddr_check_size(struct sockaddr_ieee802154 *daddr, int len) +{ + struct ieee802154_addr_sa *sa; + int ret = 0; + + sa = &daddr->addr; + if (len < IEEE802154_MIN_NAMELEN) + return -EINVAL; + switch (sa->addr_type) { + case IEEE802154_ADDR_NONE: + break; + case IEEE802154_ADDR_SHORT: + if (len < IEEE802154_NAMELEN_SHORT) + ret = -EINVAL; + break; + case IEEE802154_ADDR_LONG: + if (len < IEEE802154_NAMELEN_LONG) + ret = -EINVAL; + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + static inline void ieee802154_addr_from_sa(struct ieee802154_addr *a, const struct ieee802154_addr_sa *sa) { diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 8bf5906073bc..e03ba8e80781 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -64,6 +64,14 @@ struct inet6_ifaddr { struct hlist_node addr_lst; struct list_head if_list; + /* + * Used to safely traverse idev->addr_list in process context + * if the idev->lock needed to protect idev->addr_list cannot be held. + * In that case, add the items to this list temporarily and iterate + * without holding idev->lock. + * See addrconf_ifdown and dev_forward_change. + */ + struct list_head if_list_aux; struct list_head tmp_list; struct inet6_ifaddr *ifpub; diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index 81b965953036..56f1286583d3 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -103,15 +103,24 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, const int dif); int inet6_hash(struct sock *sk); + +static inline bool inet6_match(struct net *net, const struct sock *sk, + const struct in6_addr *saddr, + const struct in6_addr *daddr, + const __portpair ports, + const int dif, const int sdif) +{ + if (!net_eq(sock_net(sk), net) || + sk->sk_family != AF_INET6 || + sk->sk_portpair != ports || + !ipv6_addr_equal(&sk->sk_v6_daddr, saddr) || + !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) + return false; + + /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */ + return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, + sdif); +} #endif /* IS_ENABLED(CONFIG_IPV6) */ -#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif, __sdif) \ - (((__sk)->sk_portpair == (__ports)) && \ - ((__sk)->sk_family == AF_INET6) && \ - ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \ - ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \ - (((__sk)->sk_bound_dev_if == (__dif)) || \ - ((__sk)->sk_bound_dev_if == (__sdif))) && \ - net_eq(sock_net(__sk), (__net))) - #endif /* _INET6_HASHTABLES_H */ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 8177cb94958c..5f7ee70dd488 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -322,7 +322,7 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); -#define TCP_PINGPONG_THRESH 3 +#define TCP_PINGPONG_THRESH 1 static inline void inet_csk_enter_pingpong_mode(struct sock *sk) { @@ -339,14 +339,6 @@ static inline bool inet_csk_in_pingpong_mode(struct sock *sk) return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH; } -static inline void inet_csk_inc_pingpong_cnt(struct sock *sk) -{ - struct inet_connection_sock *icsk = inet_csk(sk); - - if (icsk->icsk_ack.pingpong < U8_MAX) - icsk->icsk_ack.pingpong++; -} - static inline bool inet_csk_has_ulp(struct sock *sk) { return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops; diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index ca6a3ea9057e..c9e387d174c6 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -197,17 +197,6 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) hashinfo->ehash_locks = NULL; } -static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if, - int dif, int sdif) -{ -#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) - return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept, - bound_dev_if, dif, sdif); -#else - return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); -#endif -} - struct inet_bind_bucket * inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, struct inet_bind_hashbucket *head, @@ -289,7 +278,6 @@ static inline struct sock *inet_lookup_listener(struct net *net, ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport))) #endif -#if (BITS_PER_LONG == 64) #ifdef __BIG_ENDIAN #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ const __addrpair __name = (__force __addrpair) ( \ @@ -301,24 +289,20 @@ static inline struct sock *inet_lookup_listener(struct net *net, (((__force __u64)(__be32)(__daddr)) << 32) | \ ((__force __u64)(__be32)(__saddr))) #endif /* __BIG_ENDIAN */ -#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \ - (((__sk)->sk_portpair == (__ports)) && \ - ((__sk)->sk_addrpair == (__cookie)) && \ - (((__sk)->sk_bound_dev_if == (__dif)) || \ - ((__sk)->sk_bound_dev_if == (__sdif))) && \ - net_eq(sock_net(__sk), (__net))) -#else /* 32-bit arch */ -#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ - const int __name __deprecated __attribute__((unused)) -#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \ - (((__sk)->sk_portpair == (__ports)) && \ - ((__sk)->sk_daddr == (__saddr)) && \ - ((__sk)->sk_rcv_saddr == (__daddr)) && \ - (((__sk)->sk_bound_dev_if == (__dif)) || \ - ((__sk)->sk_bound_dev_if == (__sdif))) && \ - net_eq(sock_net(__sk), (__net))) -#endif /* 64-bit arch */ +static inline bool INET_MATCH(struct net *net, const struct sock *sk, + const __addrpair cookie, const __portpair ports, + int dif, int sdif) +{ + if (!net_eq(sock_net(sk), net) || + sk->sk_portpair != ports || + sk->sk_addrpair != cookie) + return false; + + /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */ + return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, + sdif); +} /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need * not check it for lookups anymore, thanks Alexey. -DaveM @@ -419,7 +403,7 @@ static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr) } int __inet_hash_connect(struct inet_timewait_death_row *death_row, - struct sock *sk, u32 port_offset, + struct sock *sk, u64 port_offset, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, struct inet_timewait_sock **)); diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 89163ef8cf4b..f0faf9d0e7fb 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -107,7 +107,8 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) { - if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) + if (!sk->sk_mark && + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)) return skb->mark; return sk->sk_mark; @@ -116,14 +117,15 @@ static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) static inline int inet_request_bound_dev_if(const struct sock *sk, struct sk_buff *skb) { + int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); #ifdef CONFIG_NET_L3_MASTER_DEV struct net *net = sock_net(sk); - if (!sk->sk_bound_dev_if && net->ipv4.sysctl_tcp_l3mdev_accept) + if (!bound_dev_if && READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept)) return l3mdev_master_ifindex_by_index(net, skb->skb_iif); #endif - return sk->sk_bound_dev_if; + return bound_dev_if; } static inline int inet_sk_bound_l3mdev(const struct sock *sk) @@ -131,7 +133,7 @@ static inline int inet_sk_bound_l3mdev(const struct sock *sk) #ifdef CONFIG_NET_L3_MASTER_DEV struct net *net = sock_net(sk); - if (!net->ipv4.sysctl_tcp_l3mdev_accept) + if (!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept)) return l3mdev_master_ifindex_by_index(net, sk->sk_bound_dev_if); #endif @@ -147,6 +149,17 @@ static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if, return bound_dev_if == dif || bound_dev_if == sdif; } +static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if, + int dif, int sdif) +{ +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept), + bound_dev_if, dif, sdif); +#else + return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); +#endif +} + struct inet_cork { unsigned int flags; __be32 addr; @@ -369,7 +382,7 @@ static inline bool inet_get_convert_csum(struct sock *sk) static inline bool inet_can_nonlocal_bind(struct net *net, struct inet_sock *inet) { - return net->ipv4.sysctl_ip_nonlocal_bind || + return READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind) || inet->freebind || inet->transparent; } diff --git a/include/net/ip.h b/include/net/ip.h index db3a2eb144b3..5834f2d950c1 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -360,7 +360,7 @@ static inline bool sysctl_dev_name_is_allowed(const char *name) static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port) { - return port < net->ipv4.sysctl_ip_prot_sock; + return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock); } #else @@ -392,7 +392,7 @@ void ipfrag_init(void); void ip_static_sysctl_init(void); #define IP4_REPLY_MARK(net, mark) \ - ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0) + (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0) static inline bool ip_is_fragment(const struct iphdr *iph) { @@ -453,7 +453,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, struct net *net = dev_net(dst->dev); unsigned int mtu; - if (net->ipv4.sysctl_ip_fwd_use_pmtu || + if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) || ip_mtu_locked(dst) || !forwarding) return dst_mtu(dst); @@ -558,7 +558,7 @@ static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow, BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) != offsetof(typeof(flow->addrs), v4addrs.src) + sizeof(flow->addrs.v4addrs.src)); - memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs)); + memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs)); flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; } diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 028eaea1c854..42d50856fcf2 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -57,7 +57,7 @@ struct ip6_tnl { /* These fields used only by GRE */ __u32 i_seqno; /* The last seen seqno */ - __u32 o_seqno; /* The last output seqno */ + atomic_t o_seqno; /* The last output seqno */ int hlen; /* tun_hlen + encap_hlen */ int tun_hlen; /* Precalculated header length */ int encap_hlen; /* Encap header length (FOU,GUE) */ diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 61620677b034..c3e55a9ae585 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -113,7 +113,7 @@ struct ip_tunnel { /* These four fields used only by GRE */ u32 i_seqno; /* The last seen seqno */ - u32 o_seqno; /* The last output seqno */ + atomic_t o_seqno; /* The last output seqno */ int tun_hlen; /* Precalculated header length */ /* These four fields used only by ERSPAN */ diff --git a/include/net/ipv6.h b/include/net/ipv6.h index bd1f396cc9c7..e2b706b6943d 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -839,7 +839,7 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow, BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) != offsetof(typeof(flow->addrs), v6addrs.src) + sizeof(flow->addrs.v6addrs.src)); - memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs)); + memcpy(&flow->addrs.v6addrs, &iph->addrs, sizeof(flow->addrs.v6addrs)); flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; } diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 09f2efea0b97..5805fe4947f3 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -59,8 +59,13 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb) int ret = NF_ACCEPT; if (ct) { - if (!nf_ct_is_confirmed(ct)) + if (!nf_ct_is_confirmed(ct)) { ret = __nf_conntrack_confirm(skb); + + if (ret == NF_ACCEPT) + ct = (struct nf_conn *)skb_nfct(skb); + } + if (likely(ret == NF_ACCEPT)) nf_ct_deliver_cached_events(ct); } diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 76bfb6cd5815..e66fee99ed3e 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -176,13 +176,18 @@ struct nft_ctx { bool report; }; -struct nft_data_desc { - enum nft_data_types type; - unsigned int len; +enum nft_data_desc_flags { + NFT_DATA_DESC_SETELEM = (1 << 0), }; -int nft_data_init(const struct nft_ctx *ctx, - struct nft_data *data, unsigned int size, +struct nft_data_desc { + enum nft_data_types type; + unsigned int size; + unsigned int len; + unsigned int flags; +}; + +int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data, struct nft_data_desc *desc, const struct nlattr *nla); void nft_data_hold(const struct nft_data *data, enum nft_data_types type); void nft_data_release(const struct nft_data *data, enum nft_data_types type); @@ -203,11 +208,11 @@ int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest); unsigned int nft_parse_register(const struct nlattr *attr); int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg); -int nft_validate_register_load(enum nft_registers reg, unsigned int len); -int nft_validate_register_store(const struct nft_ctx *ctx, - enum nft_registers reg, - const struct nft_data *data, - enum nft_data_types type, unsigned int len); +int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len); +int nft_parse_register_store(const struct nft_ctx *ctx, + const struct nlattr *attr, u8 *dreg, + const struct nft_data *data, + enum nft_data_types type, unsigned int len); /** * struct nft_userdata - user defined data associated with an object @@ -1013,7 +1018,6 @@ struct nft_stats { struct nft_hook { struct list_head list; - bool inactive; struct nf_hook_ops ops; struct rcu_head rcu; }; diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index 8657e6815b07..ce75121782bf 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h @@ -26,21 +26,29 @@ void nf_tables_core_module_exit(void); struct nft_bitwise_fast_expr { u32 mask; u32 xor; - enum nft_registers sreg:8; - enum nft_registers dreg:8; + u8 sreg; + u8 dreg; }; struct nft_cmp_fast_expr { u32 data; u32 mask; - enum nft_registers sreg:8; + u8 sreg; + u8 len; + bool inv; +}; + +struct nft_cmp16_fast_expr { + struct nft_data data; + struct nft_data mask; + u8 sreg; u8 len; bool inv; }; struct nft_immediate_expr { struct nft_data data; - enum nft_registers dreg:8; + u8 dreg; u8 dlen; }; @@ -55,19 +63,20 @@ static inline u32 nft_cmp_fast_mask(unsigned int len) } extern const struct nft_expr_ops nft_cmp_fast_ops; +extern const struct nft_expr_ops nft_cmp16_fast_ops; struct nft_payload { enum nft_payload_bases base:8; u8 offset; u8 len; - enum nft_registers dreg:8; + u8 dreg; }; struct nft_payload_set { enum nft_payload_bases base:8; u8 offset; u8 len; - enum nft_registers sreg:8; + u8 sreg; u8 csum_type; u8 csum_offset; u8 csum_flags; diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h index 7a453a35a41d..1058f38e2aca 100644 --- a/include/net/netfilter/nf_tables_offload.h +++ b/include/net/netfilter/nf_tables_offload.h @@ -91,7 +91,7 @@ int nft_flow_rule_offload_commit(struct net *net); NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \ memset(&(__reg)->mask, 0xff, (__reg)->len); -int nft_chain_offload_priority(struct nft_base_chain *basechain); +bool nft_chain_offload_support(const struct nft_base_chain *basechain); int nft_offload_init(void); void nft_offload_exit(void); diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h index 628b6fa579cd..237f3757637e 100644 --- a/include/net/netfilter/nft_fib.h +++ b/include/net/netfilter/nft_fib.h @@ -5,7 +5,7 @@ #include struct nft_fib { - enum nft_registers dreg:8; + u8 dreg; u8 result; u32 flags; }; diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h index 07e2fd507963..2dce55c736f4 100644 --- a/include/net/netfilter/nft_meta.h +++ b/include/net/netfilter/nft_meta.h @@ -7,8 +7,8 @@ struct nft_meta { enum nft_meta_keys key:8; union { - enum nft_registers dreg:8; - enum nft_registers sreg:8; + u8 dreg; + u8 sreg; }; }; diff --git a/include/net/protocol.h b/include/net/protocol.h index 2b778e1d2d8f..0fd2df844fc7 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h @@ -35,8 +35,6 @@ /* This is used to register protocols. */ struct net_protocol { - int (*early_demux)(struct sk_buff *skb); - int (*early_demux_handler)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb); /* This returns an error if we weren't able to handle the error. */ @@ -53,8 +51,6 @@ struct net_protocol { #if IS_ENABLED(CONFIG_IPV6) struct inet6_protocol { - void (*early_demux)(struct sk_buff *skb); - void (*early_demux_handler)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb); /* This returns an error if we weren't able to handle the error. */ diff --git a/include/net/raw.h b/include/net/raw.h index 8ad8df594853..c51a635671a7 100644 --- a/include/net/raw.h +++ b/include/net/raw.h @@ -75,7 +75,7 @@ static inline bool raw_sk_bound_dev_eq(struct net *net, int bound_dev_if, int dif, int sdif) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) - return inet_bound_dev_eq(!!net->ipv4.sysctl_raw_l3mdev_accept, + return inet_bound_dev_eq(READ_ONCE(net->ipv4.sysctl_raw_l3mdev_accept), bound_dev_if, dif, sdif); #else return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); diff --git a/include/net/route.h b/include/net/route.h index a07c277cd33e..2551f3f03b37 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -165,7 +165,7 @@ static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi sk ? inet_sk_flowi_flags(sk) : 0, daddr, saddr, dport, sport, sock_net_uid(net, sk)); if (sk) - security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); + security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); return ip_route_output_flow(net, fl4, sk); } @@ -322,7 +322,7 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4, ip_rt_put(rt); flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr); } - security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); + security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); return ip_route_output_flow(net, fl4, sk); } @@ -338,7 +338,7 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable flowi4_update_output(fl4, sk->sk_bound_dev_if, RT_CONN_FLAGS(sk), fl4->daddr, fl4->saddr); - security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); + security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); return ip_route_output_flow(sock_net(sk), fl4, sk); } return rt; diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 20f676de14ce..882d918dd50d 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -166,37 +166,17 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc) if (spin_trylock(&qdisc->seqlock)) goto nolock_empty; - /* Paired with smp_mb__after_atomic() to make sure - * STATE_MISSED checking is synchronized with clearing - * in pfifo_fast_dequeue(). + /* No need to insist if the MISSED flag was already set. + * Note that test_and_set_bit() also gives us memory ordering + * guarantees wrt potential earlier enqueue() and below + * spin_trylock(), both of which are necessary to prevent races */ - smp_mb__before_atomic(); - - /* If the MISSED flag is set, it means other thread has - * set the MISSED flag before second spin_trylock(), so - * we can return false here to avoid multi cpus doing - * the set_bit() and second spin_trylock() concurrently. - */ - if (test_bit(__QDISC_STATE_MISSED, &qdisc->state)) + if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state)) return false; - /* Set the MISSED flag before the second spin_trylock(), - * if the second spin_trylock() return false, it means - * other cpu holding the lock will do dequeuing for us - * or it will see the MISSED flag set after releasing - * lock and reschedule the net_tx_action() to do the - * dequeuing. - */ - set_bit(__QDISC_STATE_MISSED, &qdisc->state); - - /* spin_trylock() only has load-acquire semantic, so use - * smp_mb__after_atomic() to ensure STATE_MISSED is set - * before doing the second spin_trylock(). - */ - smp_mb__after_atomic(); - - /* Retry again in case other CPU may not see the new flag - * after it releases the lock at the end of qdisc_run_end(). + /* Try to take the lock again to make sure that we will either + * grab it or the CPU that still has it will see MISSED set + * when testing it in qdisc_run_end() */ if (!spin_trylock(&qdisc->seqlock)) return false; @@ -220,6 +200,12 @@ static inline void qdisc_run_end(struct Qdisc *qdisc) if (qdisc->flags & TCQ_F_NOLOCK) { spin_unlock(&qdisc->seqlock); + /* spin_unlock() only has store-release semantic. The unlock + * and test_bit() ordering is a store-load ordering, so a full + * memory barrier is needed here. + */ + smp_mb(); + if (unlikely(test_bit(__QDISC_STATE_MISSED, &qdisc->state))) { clear_bit(__QDISC_STATE_MISSED, &qdisc->state); @@ -1195,7 +1181,6 @@ static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh) static inline void qdisc_reset_queue(struct Qdisc *sch) { __qdisc_reset_queue(&sch->q); - sch->qstats.backlog = 0; } static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h index 01a70b27e026..65058faea4db 100644 --- a/include/net/sctp/stream_sched.h +++ b/include/net/sctp/stream_sched.h @@ -26,6 +26,8 @@ struct sctp_sched_ops { int (*init)(struct sctp_stream *stream); /* Init a stream */ int (*init_sid)(struct sctp_stream *stream, __u16 sid, gfp_t gfp); + /* free a stream */ + void (*free_sid)(struct sctp_stream *stream, __u16 sid); /* Frees the entire thing */ void (*free)(struct sctp_stream *stream); diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h index d7d2495f83c2..dac91aa38c5a 100644 --- a/include/net/secure_seq.h +++ b/include/net/secure_seq.h @@ -4,8 +4,8 @@ #include -u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); -u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, +u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); +u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport); u32 secure_tcp_seq(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport); diff --git a/include/net/sock.h b/include/net/sock.h index eb64ca0f01f3..c604052e4aa3 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -162,9 +162,6 @@ typedef __u64 __bitwise __addrpair; * for struct sock and struct inet_timewait_sock. */ struct sock_common { - /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned - * address on 64bit arches : cf INET_MATCH() - */ union { __addrpair skc_addrpair; struct { @@ -426,7 +423,7 @@ struct sock { #ifdef CONFIG_XFRM struct xfrm_policy __rcu *sk_policy[2]; #endif - struct dst_entry *sk_rx_dst; + struct dst_entry __rcu *sk_rx_dst; struct dst_entry __rcu *sk_dst_cache; atomic_t sk_omem_alloc; int sk_sndbuf; @@ -551,14 +548,26 @@ enum sk_pacing { SK_PACING_FQ = 2, }; -/* Pointer stored in sk_user_data might not be suitable for copying - * when cloning the socket. For instance, it can point to a reference - * counted object. sk_user_data bottom bit is set if pointer must not - * be copied. +/* flag bits in sk_user_data + * + * - SK_USER_DATA_NOCOPY: Pointer stored in sk_user_data might + * not be suitable for copying when cloning the socket. For instance, + * it can point to a reference counted object. sk_user_data bottom + * bit is set if pointer must not be copied. + * + * - SK_USER_DATA_BPF: Mark whether sk_user_data field is + * managed/owned by a BPF reuseport array. This bit should be set + * when sk_user_data's sk is added to the bpf's reuseport_array. + * + * - SK_USER_DATA_PSOCK: Mark whether pointer stored in + * sk_user_data points to psock type. This bit should be set + * when sk_user_data is assigned to a psock object. */ #define SK_USER_DATA_NOCOPY 1UL -#define SK_USER_DATA_BPF 2UL /* Managed by BPF */ -#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF) +#define SK_USER_DATA_BPF 2UL +#define SK_USER_DATA_PSOCK 4UL +#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF |\ + SK_USER_DATA_PSOCK) /** * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied @@ -571,24 +580,40 @@ static inline bool sk_user_data_is_nocopy(const struct sock *sk) #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) +/** + * __rcu_dereference_sk_user_data_with_flags - return the pointer + * only if argument flags all has been set in sk_user_data. Otherwise + * return NULL + * + * @sk: socket + * @flags: flag bits + */ +static inline void * +__rcu_dereference_sk_user_data_with_flags(const struct sock *sk, + uintptr_t flags) +{ + uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk)); + + WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK); + + if ((sk_user_data & flags) == flags) + return (void *)(sk_user_data & SK_USER_DATA_PTRMASK); + return NULL; +} + #define rcu_dereference_sk_user_data(sk) \ + __rcu_dereference_sk_user_data_with_flags(sk, 0) +#define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \ ({ \ - void *__tmp = rcu_dereference(__sk_user_data((sk))); \ - (void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK); \ + uintptr_t __tmp1 = (uintptr_t)(ptr), \ + __tmp2 = (uintptr_t)(flags); \ + WARN_ON_ONCE(__tmp1 & ~SK_USER_DATA_PTRMASK); \ + WARN_ON_ONCE(__tmp2 & SK_USER_DATA_PTRMASK); \ + rcu_assign_pointer(__sk_user_data((sk)), \ + __tmp1 | __tmp2); \ }) #define rcu_assign_sk_user_data(sk, ptr) \ -({ \ - uintptr_t __tmp = (uintptr_t)(ptr); \ - WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \ - rcu_assign_pointer(__sk_user_data((sk)), __tmp); \ -}) -#define rcu_assign_sk_user_data_nocopy(sk, ptr) \ -({ \ - uintptr_t __tmp = (uintptr_t)(ptr); \ - WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \ - rcu_assign_pointer(__sk_user_data((sk)), \ - __tmp | SK_USER_DATA_NOCOPY); \ -}) + __rcu_assign_sk_user_data_with_flags(sk, ptr, 0) /* * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK @@ -1466,7 +1491,7 @@ void __sk_mem_reclaim(struct sock *sk, int amount); /* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */ static inline long sk_prot_mem_limits(const struct sock *sk, int index) { - long val = sk->sk_prot->sysctl_mem[index]; + long val = READ_ONCE(sk->sk_prot->sysctl_mem[index]); #if PAGE_SIZE > SK_MEM_QUANTUM val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT; @@ -1489,19 +1514,23 @@ static inline bool sk_has_account(struct sock *sk) static inline bool sk_wmem_schedule(struct sock *sk, int size) { + int delta; + if (!sk_has_account(sk)) return true; - return size <= sk->sk_forward_alloc || - __sk_mem_schedule(sk, size, SK_MEM_SEND); + delta = size - sk->sk_forward_alloc; + return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_SEND); } static inline bool sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) { + int delta; + if (!sk_has_account(sk)) return true; - return size <= sk->sk_forward_alloc || - __sk_mem_schedule(sk, size, SK_MEM_RECV) || + delta = size - sk->sk_forward_alloc; + return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) || skb_pfmemalloc(skb); } @@ -2698,18 +2727,18 @@ static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto) { /* Does this proto have per netns sysctl_wmem ? */ if (proto->sysctl_wmem_offset) - return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset); + return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset)); - return *proto->sysctl_wmem; + return READ_ONCE(*proto->sysctl_wmem); } static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto) { /* Does this proto have per netns sysctl_rmem ? */ if (proto->sysctl_rmem_offset) - return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset); + return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset)); - return *proto->sysctl_rmem; + return READ_ONCE(*proto->sysctl_rmem); } /* Default TCP Small queue budget is ~1 ms of data (1sec >> 10) diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index 505f1e18e9bf..3eac185ae2e8 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -38,21 +38,20 @@ extern struct sock *reuseport_select_sock(struct sock *sk, extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog); extern int reuseport_detach_prog(struct sock *sk); -static inline bool reuseport_has_conns(struct sock *sk, bool set) +static inline bool reuseport_has_conns(struct sock *sk) { struct sock_reuseport *reuse; bool ret = false; rcu_read_lock(); reuse = rcu_dereference(sk->sk_reuseport_cb); - if (reuse) { - if (set) - reuse->has_conns = 1; - ret = reuse->has_conns; - } + if (reuse && reuse->has_conns) + ret = true; rcu_read_unlock(); return ret; } +void reuseport_has_conns_set(struct sock *sk); + #endif /* _SOCK_REUSEPORT_H */ diff --git a/include/net/switchdev.h b/include/net/switchdev.h index 8528015590e4..afdf8bd1b4fe 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -38,6 +38,7 @@ enum switchdev_attr_id { SWITCHDEV_ATTR_ID_PORT_MROUTER, SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME, SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING, + SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL, SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, #if IS_ENABLED(CONFIG_BRIDGE_MRP) @@ -57,6 +58,7 @@ struct switchdev_attr { bool mrouter; /* PORT_MROUTER */ clock_t ageing_time; /* BRIDGE_AGEING_TIME */ bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */ + u16 vlan_protocol; /* BRIDGE_VLAN_PROTOCOL */ bool mc_disabled; /* MC_DISABLED */ #if IS_ENABLED(CONFIG_BRIDGE_MRP) u8 mrp_port_role; /* MRP_PORT_ROLE */ diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h index 748cf87a4d7e..3e02709a1df6 100644 --- a/include/net/tc_act/tc_pedit.h +++ b/include/net/tc_act/tc_pedit.h @@ -14,6 +14,7 @@ struct tcf_pedit { struct tc_action common; unsigned char tcfp_nkeys; unsigned char tcfp_flags; + u32 tcfp_off_max_hint; struct tc_pedit_key *tcfp_keys; struct tcf_pedit_key_ex *tcfp_keys_ex; }; diff --git a/include/net/tcp.h b/include/net/tcp.h index eff611da5780..f936fcb213f2 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -471,6 +471,7 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, u32 cookie); struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, + const struct tcp_request_sock_ops *af_ops, struct sock *sk, struct sk_buff *skb); #ifdef CONFIG_SYN_COOKIES @@ -609,6 +610,7 @@ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); void tcp_reset(struct sock *sk); void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); void tcp_fin(struct sock *sk); +void tcp_check_space(struct sock *sk); /* tcp_timer.c */ void tcp_init_xmit_timers(struct sock *); @@ -943,7 +945,7 @@ extern const struct inet_connection_sock_af_ops ipv6_specific; INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb)); -INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb)); +void tcp_v6_early_demux(struct sk_buff *skb); #endif @@ -1274,11 +1276,14 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); + if (tp->is_cwnd_limited) + return true; + /* If in slow start, ensure cwnd grows to twice what was ACKed. */ if (tcp_in_slow_start(tp)) return tp->snd_cwnd < 2 * tp->max_packets_out; - return tp->is_cwnd_limited; + return false; } /* BBR congestion control needs pacing. @@ -1383,8 +1388,8 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); s32 delta; - if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out || - ca_ops->cong_control) + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) || + tp->packets_out || ca_ops->cong_control) return; delta = tcp_jiffies32 - tp->lsndtime; if (delta > inet_csk(sk)->icsk_rto) @@ -1399,7 +1404,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, static inline int tcp_win_from_space(const struct sock *sk, int space) { - int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale; + int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale); return tcp_adv_win_scale <= 0 ? (space>>(-tcp_adv_win_scale)) : @@ -1450,21 +1455,24 @@ static inline int keepalive_intvl_when(const struct tcp_sock *tp) { struct net *net = sock_net((struct sock *)tp); - return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl; + return tp->keepalive_intvl ? : + READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl); } static inline int keepalive_time_when(const struct tcp_sock *tp) { struct net *net = sock_net((struct sock *)tp); - return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time; + return tp->keepalive_time ? : + READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time); } static inline int keepalive_probes(const struct tcp_sock *tp) { struct net *net = sock_net((struct sock *)tp); - return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes; + return tp->keepalive_probes ? : + READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes); } static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) @@ -1477,7 +1485,8 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) static inline int tcp_fin_time(const struct sock *sk) { - int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout; + int fin_timeout = tcp_sk(sk)->linger2 ? : + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout); const int rto = inet_csk(sk)->icsk_rto; if (fin_timeout < (rto << 2) - (rto >> 1)) @@ -1972,7 +1981,7 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) { struct net *net = sock_net((struct sock *)tp); - return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat; + return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat); } /* @wake is one when sk_stream_write_space() calls us. diff --git a/include/net/tls.h b/include/net/tls.h index c875c23ad173..419a67c50fc2 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -721,7 +721,7 @@ int tls_sw_fallback_init(struct sock *sk, struct tls_crypto_info *crypto_info); #ifdef CONFIG_TLS_DEVICE -void tls_device_init(void); +int tls_device_init(void); void tls_device_cleanup(void); void tls_device_sk_destruct(struct sock *sk); int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); @@ -741,7 +741,7 @@ static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk) return tls_get_ctx(sk)->rx_conf == TLS_HW; } #else -static inline void tls_device_init(void) {} +static inline int tls_device_init(void) { return 0; } static inline void tls_device_cleanup(void) {} static inline int diff --git a/include/net/udp.h b/include/net/udp.h index 4017f257628f..388e68c7bca0 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -176,6 +176,7 @@ INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int)); struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, struct udphdr *uh, struct sock *sk); int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); +void udp_v6_early_demux(struct sk_buff *skb); struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, netdev_features_t features, bool is_ipv6); @@ -259,7 +260,7 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if, int dif, int sdif) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) - return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept, + return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_udp_l3mdev_accept), bound_dev_if, dif, sdif); #else return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index 7a9a23e7a604..c9a47d3d8f50 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -86,7 +86,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, struct xdp_umem *umem); int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, u16 queue_id, u16 flags); -int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem, +int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, struct net_device *dev, u16 queue_id); void xp_destroy(struct xsk_buff_pool *pool); void xp_release(struct xdp_buff_xsk *xskb); diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index fac8e89aed81..310e0dbffda9 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -249,7 +249,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *, struct fc_frame *); /* libfcoe funcs */ -u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], unsigned int, unsigned int); +u64 fcoe_wwn_from_mac(unsigned char mac[ETH_ALEN], unsigned int scheme, + unsigned int port); int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *, const struct libfc_function_template *, int init_fcp); u32 fcoe_fc_crc(struct fc_frame *fp); diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 2b5f97224f69..fa00e2543ad6 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -421,6 +421,7 @@ extern int iscsi_conn_start(struct iscsi_cls_conn *); extern void iscsi_conn_stop(struct iscsi_cls_conn *, int); extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *, int); +extern void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active); extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err); extern void iscsi_session_failure(struct iscsi_session *session, enum iscsi_err err); diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index b6398c9d2c5c..4b7f14078570 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -211,7 +211,7 @@ static inline unsigned int scsi_get_resid(struct scsi_cmnd *cmd) for_each_sg(scsi_sglist(cmd), sg, nseg, __i) static inline int scsi_sg_copy_from_buffer(struct scsi_cmnd *cmd, - void *buf, int buflen) + const void *buf, int buflen) { return sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, buflen); diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index f28bb20d6271..037c77fb5dc5 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -82,6 +82,7 @@ struct iscsi_transport { void (*destroy_session) (struct iscsi_cls_session *session); struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess, uint32_t cid); + void (*unbind_conn) (struct iscsi_cls_conn *conn, bool is_active); int (*bind_conn) (struct iscsi_cls_session *session, struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, int is_leading); @@ -196,15 +197,25 @@ enum iscsi_connection_state { ISCSI_CONN_BOUND, }; +#define ISCSI_CLS_CONN_BIT_CLEANUP 1 + struct iscsi_cls_conn { struct list_head conn_list; /* item in connlist */ - struct list_head conn_list_err; /* item in connlist_err */ void *dd_data; /* LLD private data */ struct iscsi_transport *transport; uint32_t cid; /* connection id */ + /* + * This protects the conn startup and binding/unbinding of the ep to + * the conn. Unbinding includes ep_disconnect and stop_conn. + */ struct mutex ep_mutex; struct iscsi_endpoint *ep; + /* Used when accessing flags and queueing work. */ + spinlock_t lock; + unsigned long flags; + struct work_struct cleanup_work; + struct device dev; /* sysfs transport/container device */ enum iscsi_connection_state state; }; @@ -443,6 +454,7 @@ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time); extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size); extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep); extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle); +extern void iscsi_put_endpoint(struct iscsi_endpoint *ep); extern int iscsi_block_scsi_eh(struct scsi_cmnd *cmd); extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *t, diff --git a/include/sound/core.h b/include/sound/core.h index b5ecbf936ffb..578d1c6b7d43 100644 --- a/include/sound/core.h +++ b/include/sound/core.h @@ -456,4 +456,12 @@ snd_pci_quirk_lookup_id(u16 vendor, u16 device, } #endif +/* async signal helpers */ +struct snd_fasync; + +int snd_fasync_helper(int fd, struct file *file, int on, + struct snd_fasync **fasyncp); +void snd_kill_fasync(struct snd_fasync *fasync, int signal, int poll); +void snd_fasync_free(struct snd_fasync *fasync); + #endif /* __SOUND_CORE_H */ diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index df293bc7f03b..e927889c7fd5 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -52,6 +52,8 @@ TRACE_DEFINE_ENUM(CP_DISCARD); TRACE_DEFINE_ENUM(CP_TRIMMED); TRACE_DEFINE_ENUM(CP_PAUSE); TRACE_DEFINE_ENUM(CP_RESIZE); +TRACE_DEFINE_ENUM(EX_READ); +TRACE_DEFINE_ENUM(EX_BLOCK_AGE); #define show_block_type(type) \ __print_symbolic(type, \ @@ -162,6 +164,11 @@ TRACE_DEFINE_ENUM(CP_RESIZE); { COMPRESS_ZSTD, "ZSTD" }, \ { COMPRESS_LZORLE, "LZO-RLE" }) +#define show_extent_type(type) \ + __print_symbolic(type, \ + { EX_READ, "Read" }, \ + { EX_BLOCK_AGE, "Block Age" }) + struct f2fs_sb_info; struct f2fs_io_info; struct extent_info; @@ -1526,28 +1533,31 @@ TRACE_EVENT(f2fs_issue_flush, TRACE_EVENT(f2fs_lookup_extent_tree_start, - TP_PROTO(struct inode *inode, unsigned int pgofs), + TP_PROTO(struct inode *inode, unsigned int pgofs, enum extent_type type), - TP_ARGS(inode, pgofs), + TP_ARGS(inode, pgofs, type), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(unsigned int, pgofs) + __field(enum extent_type, type) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->pgofs = pgofs; + __entry->type = type; ), - TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u", + TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, type = %s", show_dev_ino(__entry), - __entry->pgofs) + __entry->pgofs, + show_extent_type(__entry->type)) ); -TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end, +TRACE_EVENT_CONDITION(f2fs_lookup_read_extent_tree_end, TP_PROTO(struct inode *inode, unsigned int pgofs, struct extent_info *ei), @@ -1561,8 +1571,8 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end, __field(ino_t, ino) __field(unsigned int, pgofs) __field(unsigned int, fofs) - __field(u32, blk) __field(unsigned int, len) + __field(u32, blk) ), TP_fast_assign( @@ -1570,25 +1580,65 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end, __entry->ino = inode->i_ino; __entry->pgofs = pgofs; __entry->fofs = ei->fofs; - __entry->blk = ei->blk; __entry->len = ei->len; + __entry->blk = ei->blk; ), TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, " - "ext_info(fofs: %u, blk: %u, len: %u)", + "read_ext_info(fofs: %u, len: %u, blk: %u)", show_dev_ino(__entry), __entry->pgofs, __entry->fofs, - __entry->blk, - __entry->len) + __entry->len, + __entry->blk) ); -TRACE_EVENT(f2fs_update_extent_tree_range, +TRACE_EVENT_CONDITION(f2fs_lookup_age_extent_tree_end, - TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr, - unsigned int len), + TP_PROTO(struct inode *inode, unsigned int pgofs, + struct extent_info *ei), - TP_ARGS(inode, pgofs, blkaddr, len), + TP_ARGS(inode, pgofs, ei), + + TP_CONDITION(ei), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(unsigned int, pgofs) + __field(unsigned int, fofs) + __field(unsigned int, len) + __field(unsigned long long, age) + __field(unsigned long long, blocks) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pgofs = pgofs; + __entry->fofs = ei->fofs; + __entry->len = ei->len; + __entry->age = ei->age; + __entry->blocks = ei->last_blocks; + ), + + TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, " + "age_ext_info(fofs: %u, len: %u, age: %llu, blocks: %llu)", + show_dev_ino(__entry), + __entry->pgofs, + __entry->fofs, + __entry->len, + __entry->age, + __entry->blocks) +); + +TRACE_EVENT(f2fs_update_read_extent_tree_range, + + TP_PROTO(struct inode *inode, unsigned int pgofs, unsigned int len, + block_t blkaddr, + unsigned int c_len), + + TP_ARGS(inode, pgofs, len, blkaddr, c_len), TP_STRUCT__entry( __field(dev_t, dev) @@ -1596,70 +1646,115 @@ TRACE_EVENT(f2fs_update_extent_tree_range, __field(unsigned int, pgofs) __field(u32, blk) __field(unsigned int, len) + __field(unsigned int, c_len) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->pgofs = pgofs; - __entry->blk = blkaddr; __entry->len = len; + __entry->blk = blkaddr; + __entry->c_len = c_len; ), TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, " - "blkaddr = %u, len = %u", + "len = %u, blkaddr = %u, c_len = %u", show_dev_ino(__entry), __entry->pgofs, + __entry->len, __entry->blk, - __entry->len) + __entry->c_len) +); + +TRACE_EVENT(f2fs_update_age_extent_tree_range, + + TP_PROTO(struct inode *inode, unsigned int pgofs, unsigned int len, + unsigned long long age, + unsigned long long last_blks), + + TP_ARGS(inode, pgofs, len, age, last_blks), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(unsigned int, pgofs) + __field(unsigned int, len) + __field(unsigned long long, age) + __field(unsigned long long, blocks) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pgofs = pgofs; + __entry->len = len; + __entry->age = age; + __entry->blocks = last_blks; + ), + + TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, " + "len = %u, age = %llu, blocks = %llu", + show_dev_ino(__entry), + __entry->pgofs, + __entry->len, + __entry->age, + __entry->blocks) ); TRACE_EVENT(f2fs_shrink_extent_tree, TP_PROTO(struct f2fs_sb_info *sbi, unsigned int node_cnt, - unsigned int tree_cnt), + unsigned int tree_cnt, enum extent_type type), - TP_ARGS(sbi, node_cnt, tree_cnt), + TP_ARGS(sbi, node_cnt, tree_cnt, type), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned int, node_cnt) __field(unsigned int, tree_cnt) + __field(enum extent_type, type) ), TP_fast_assign( __entry->dev = sbi->sb->s_dev; __entry->node_cnt = node_cnt; __entry->tree_cnt = tree_cnt; + __entry->type = type; ), - TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u", + TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u, type = %s", show_dev(__entry->dev), __entry->node_cnt, - __entry->tree_cnt) + __entry->tree_cnt, + show_extent_type(__entry->type)) ); TRACE_EVENT(f2fs_destroy_extent_tree, - TP_PROTO(struct inode *inode, unsigned int node_cnt), + TP_PROTO(struct inode *inode, unsigned int node_cnt, + enum extent_type type), - TP_ARGS(inode, node_cnt), + TP_ARGS(inode, node_cnt, type), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(unsigned int, node_cnt) + __field(enum extent_type, type) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->node_cnt = node_cnt; + __entry->type = type; ), - TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u", + TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u, type = %s", show_dev_ino(__entry), - __entry->node_cnt) + __entry->node_cnt, + show_extent_type(__entry->type)) ); DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes, diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index 9f0d3b7d56b0..0dd30de00e5b 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -12,11 +12,11 @@ struct io_wq_work; /** * io_uring_create - called after a new io_uring context was prepared * - * @fd: corresponding file descriptor - * @ctx: pointer to a ring context structure + * @fd: corresponding file descriptor + * @ctx: pointer to a ring context structure * @sq_entries: actual SQ size * @cq_entries: actual CQ size - * @flags: SQ ring flags, provided to io_uring_setup(2) + * @flags: SQ ring flags, provided to io_uring_setup(2) * * Allows to trace io_uring creation and provide pointer to a context, that can * be used later to find correlated events. @@ -49,15 +49,15 @@ TRACE_EVENT(io_uring_create, ); /** - * io_uring_register - called after a buffer/file/eventfd was succesfully + * io_uring_register - called after a buffer/file/eventfd was successfully * registered for a ring * - * @ctx: pointer to a ring context structure - * @opcode: describes which operation to perform + * @ctx: pointer to a ring context structure + * @opcode: describes which operation to perform * @nr_user_files: number of registered files * @nr_user_bufs: number of registered buffers * @cq_ev_fd: whether eventfs registered or not - * @ret: return code + * @ret: return code * * Allows to trace fixed files/buffers/eventfds, that could be registered to * avoid an overhead of getting references to them for every operation. This @@ -142,16 +142,16 @@ TRACE_EVENT(io_uring_queue_async_work, TP_ARGS(ctx, rw, req, work, flags), TP_STRUCT__entry ( - __field( void *, ctx ) - __field( int, rw ) - __field( void *, req ) + __field( void *, ctx ) + __field( int, rw ) + __field( void *, req ) __field( struct io_wq_work *, work ) __field( unsigned int, flags ) ), TP_fast_assign( __entry->ctx = ctx; - __entry->rw = rw; + __entry->rw = rw; __entry->req = req; __entry->work = work; __entry->flags = flags; @@ -196,10 +196,10 @@ TRACE_EVENT(io_uring_defer, /** * io_uring_link - called before the io_uring request added into link_list of - * another request + * another request * - * @ctx: pointer to a ring context structure - * @req: pointer to a linked request + * @ctx: pointer to a ring context structure + * @req: pointer to a linked request * @target_req: pointer to a previous request, that would contain @req * * Allows to track linked requests, to understand dependencies between requests @@ -212,8 +212,8 @@ TRACE_EVENT(io_uring_link, TP_ARGS(ctx, req, target_req), TP_STRUCT__entry ( - __field( void *, ctx ) - __field( void *, req ) + __field( void *, ctx ) + __field( void *, req ) __field( void *, target_req ) ), @@ -244,7 +244,7 @@ TRACE_EVENT(io_uring_cqring_wait, TP_ARGS(ctx, min_events), TP_STRUCT__entry ( - __field( void *, ctx ) + __field( void *, ctx ) __field( int, min_events ) ), @@ -272,7 +272,7 @@ TRACE_EVENT(io_uring_fail_link, TP_ARGS(req, link), TP_STRUCT__entry ( - __field( void *, req ) + __field( void *, req ) __field( void *, link ) ), @@ -290,38 +290,42 @@ TRACE_EVENT(io_uring_fail_link, * @ctx: pointer to a ring context structure * @user_data: user data associated with the request * @res: result of the request + * @cflags: completion flags * */ TRACE_EVENT(io_uring_complete, - TP_PROTO(void *ctx, u64 user_data, long res), + TP_PROTO(void *ctx, u64 user_data, int res, unsigned cflags), - TP_ARGS(ctx, user_data, res), + TP_ARGS(ctx, user_data, res, cflags), TP_STRUCT__entry ( __field( void *, ctx ) __field( u64, user_data ) - __field( long, res ) + __field( int, res ) + __field( unsigned, cflags ) ), TP_fast_assign( __entry->ctx = ctx; __entry->user_data = user_data; __entry->res = res; + __entry->cflags = cflags; ), - TP_printk("ring %p, user_data 0x%llx, result %ld", + TP_printk("ring %p, user_data 0x%llx, result %d, cflags %x", __entry->ctx, (unsigned long long)__entry->user_data, - __entry->res) + __entry->res, __entry->cflags) ); - /** * io_uring_submit_sqe - called before submitting one SQE * * @ctx: pointer to a ring context structure + * @req: pointer to a submitted request * @opcode: opcode of request * @user_data: user data associated with the request + * @flags request flags * @force_nonblock: whether a context blocking or not * @sq_thread: true if sq_thread has submitted this SQE * @@ -330,41 +334,60 @@ TRACE_EVENT(io_uring_complete, */ TRACE_EVENT(io_uring_submit_sqe, - TP_PROTO(void *ctx, u8 opcode, u64 user_data, bool force_nonblock, - bool sq_thread), + TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, u32 flags, + bool force_nonblock, bool sq_thread), - TP_ARGS(ctx, opcode, user_data, force_nonblock, sq_thread), + TP_ARGS(ctx, req, opcode, user_data, flags, force_nonblock, sq_thread), TP_STRUCT__entry ( __field( void *, ctx ) + __field( void *, req ) __field( u8, opcode ) __field( u64, user_data ) + __field( u32, flags ) __field( bool, force_nonblock ) __field( bool, sq_thread ) ), TP_fast_assign( __entry->ctx = ctx; + __entry->req = req; __entry->opcode = opcode; __entry->user_data = user_data; + __entry->flags = flags; __entry->force_nonblock = force_nonblock; __entry->sq_thread = sq_thread; ), - TP_printk("ring %p, op %d, data 0x%llx, non block %d, sq_thread %d", - __entry->ctx, __entry->opcode, - (unsigned long long) __entry->user_data, - __entry->force_nonblock, __entry->sq_thread) + TP_printk("ring %p, req %p, op %d, data 0x%llx, flags %u, " + "non block %d, sq_thread %d", __entry->ctx, __entry->req, + __entry->opcode, (unsigned long long)__entry->user_data, + __entry->flags, __entry->force_nonblock, __entry->sq_thread) ); +/* + * io_uring_poll_arm - called after arming a poll wait if successful + * + * @ctx: pointer to a ring context structure + * @req: pointer to the armed request + * @opcode: opcode of request + * @user_data: user data associated with the request + * @mask: request poll events mask + * @events: registered events of interest + * + * Allows to track which fds are waiting for and what are the events of + * interest. + */ TRACE_EVENT(io_uring_poll_arm, - TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask, int events), + TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, + int mask, int events), - TP_ARGS(ctx, opcode, user_data, mask, events), + TP_ARGS(ctx, req, opcode, user_data, mask, events), TP_STRUCT__entry ( __field( void *, ctx ) + __field( void *, req ) __field( u8, opcode ) __field( u64, user_data ) __field( int, mask ) @@ -373,16 +396,17 @@ TRACE_EVENT(io_uring_poll_arm, TP_fast_assign( __entry->ctx = ctx; + __entry->req = req; __entry->opcode = opcode; __entry->user_data = user_data; __entry->mask = mask; __entry->events = events; ), - TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x, events 0x%x", - __entry->ctx, __entry->opcode, - (unsigned long long) __entry->user_data, - __entry->mask, __entry->events) + TP_printk("ring %p, req %p, op %d, data 0x%llx, mask 0x%x, events 0x%x", + __entry->ctx, __entry->req, __entry->opcode, + (unsigned long long) __entry->user_data, + __entry->mask, __entry->events) ); TRACE_EVENT(io_uring_poll_wake, @@ -437,27 +461,40 @@ TRACE_EVENT(io_uring_task_add, __entry->mask) ); +/* + * io_uring_task_run - called when task_work_run() executes the poll events + * notification callbacks + * + * @ctx: pointer to a ring context structure + * @req: pointer to the armed request + * @opcode: opcode of request + * @user_data: user data associated with the request + * + * Allows to track when notified poll events are processed + */ TRACE_EVENT(io_uring_task_run, - TP_PROTO(void *ctx, u8 opcode, u64 user_data), + TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data), - TP_ARGS(ctx, opcode, user_data), + TP_ARGS(ctx, req, opcode, user_data), TP_STRUCT__entry ( __field( void *, ctx ) + __field( void *, req ) __field( u8, opcode ) __field( u64, user_data ) ), TP_fast_assign( __entry->ctx = ctx; + __entry->req = req; __entry->opcode = opcode; __entry->user_data = user_data; ), - TP_printk("ring %p, op %d, data 0x%llx", - __entry->ctx, __entry->opcode, - (unsigned long long) __entry->user_data) + TP_printk("ring %p, req %p, op %d, data 0x%llx", + __entry->ctx, __entry->req, __entry->opcode, + (unsigned long long) __entry->user_data) ); #endif /* _TRACE_IO_URING_H */ diff --git a/include/trace/events/libata.h b/include/trace/events/libata.h index ab69434e2329..72e785a903b6 100644 --- a/include/trace/events/libata.h +++ b/include/trace/events/libata.h @@ -249,6 +249,7 @@ DECLARE_EVENT_CLASS(ata_qc_complete_template, __entry->hob_feature = qc->result_tf.hob_feature; __entry->nsect = qc->result_tf.nsect; __entry->hob_nsect = qc->result_tf.hob_nsect; + __entry->flags = qc->flags; ), TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s status=%s " \ diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index d428f0137c49..a26dbefdf294 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -48,7 +48,9 @@ {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \ {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \ {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\ - {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\ + {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\ + {(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"}, \ + {(unsigned long)__GFP_SKIP_KASAN_POISON,"__GFP_SKIP_KASAN_POISON"}\ #define show_gfp_flags(flags) \ (flags) ? __print_flags(flags, "|", \ diff --git a/include/trace/events/random.h b/include/trace/events/random.h deleted file mode 100644 index 9570a10cb949..000000000000 --- a/include/trace/events/random.h +++ /dev/null @@ -1,330 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM random - -#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_RANDOM_H - -#include -#include - -TRACE_EVENT(add_device_randomness, - TP_PROTO(int bytes, unsigned long IP), - - TP_ARGS(bytes, IP), - - TP_STRUCT__entry( - __field( int, bytes ) - __field(unsigned long, IP ) - ), - - TP_fast_assign( - __entry->bytes = bytes; - __entry->IP = IP; - ), - - TP_printk("bytes %d caller %pS", - __entry->bytes, (void *)__entry->IP) -); - -DECLARE_EVENT_CLASS(random__mix_pool_bytes, - TP_PROTO(const char *pool_name, int bytes, unsigned long IP), - - TP_ARGS(pool_name, bytes, IP), - - TP_STRUCT__entry( - __field( const char *, pool_name ) - __field( int, bytes ) - __field(unsigned long, IP ) - ), - - TP_fast_assign( - __entry->pool_name = pool_name; - __entry->bytes = bytes; - __entry->IP = IP; - ), - - TP_printk("%s pool: bytes %d caller %pS", - __entry->pool_name, __entry->bytes, (void *)__entry->IP) -); - -DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes, - TP_PROTO(const char *pool_name, int bytes, unsigned long IP), - - TP_ARGS(pool_name, bytes, IP) -); - -DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock, - TP_PROTO(const char *pool_name, int bytes, unsigned long IP), - - TP_ARGS(pool_name, bytes, IP) -); - -TRACE_EVENT(credit_entropy_bits, - TP_PROTO(const char *pool_name, int bits, int entropy_count, - unsigned long IP), - - TP_ARGS(pool_name, bits, entropy_count, IP), - - TP_STRUCT__entry( - __field( const char *, pool_name ) - __field( int, bits ) - __field( int, entropy_count ) - __field(unsigned long, IP ) - ), - - TP_fast_assign( - __entry->pool_name = pool_name; - __entry->bits = bits; - __entry->entropy_count = entropy_count; - __entry->IP = IP; - ), - - TP_printk("%s pool: bits %d entropy_count %d caller %pS", - __entry->pool_name, __entry->bits, - __entry->entropy_count, (void *)__entry->IP) -); - -TRACE_EVENT(push_to_pool, - TP_PROTO(const char *pool_name, int pool_bits, int input_bits), - - TP_ARGS(pool_name, pool_bits, input_bits), - - TP_STRUCT__entry( - __field( const char *, pool_name ) - __field( int, pool_bits ) - __field( int, input_bits ) - ), - - TP_fast_assign( - __entry->pool_name = pool_name; - __entry->pool_bits = pool_bits; - __entry->input_bits = input_bits; - ), - - TP_printk("%s: pool_bits %d input_pool_bits %d", - __entry->pool_name, __entry->pool_bits, - __entry->input_bits) -); - -TRACE_EVENT(debit_entropy, - TP_PROTO(const char *pool_name, int debit_bits), - - TP_ARGS(pool_name, debit_bits), - - TP_STRUCT__entry( - __field( const char *, pool_name ) - __field( int, debit_bits ) - ), - - TP_fast_assign( - __entry->pool_name = pool_name; - __entry->debit_bits = debit_bits; - ), - - TP_printk("%s: debit_bits %d", __entry->pool_name, - __entry->debit_bits) -); - -TRACE_EVENT(add_input_randomness, - TP_PROTO(int input_bits), - - TP_ARGS(input_bits), - - TP_STRUCT__entry( - __field( int, input_bits ) - ), - - TP_fast_assign( - __entry->input_bits = input_bits; - ), - - TP_printk("input_pool_bits %d", __entry->input_bits) -); - -TRACE_EVENT(add_disk_randomness, - TP_PROTO(dev_t dev, int input_bits), - - TP_ARGS(dev, input_bits), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( int, input_bits ) - ), - - TP_fast_assign( - __entry->dev = dev; - __entry->input_bits = input_bits; - ), - - TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev), - MINOR(__entry->dev), __entry->input_bits) -); - -TRACE_EVENT(xfer_secondary_pool, - TP_PROTO(const char *pool_name, int xfer_bits, int request_bits, - int pool_entropy, int input_entropy), - - TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy, - input_entropy), - - TP_STRUCT__entry( - __field( const char *, pool_name ) - __field( int, xfer_bits ) - __field( int, request_bits ) - __field( int, pool_entropy ) - __field( int, input_entropy ) - ), - - TP_fast_assign( - __entry->pool_name = pool_name; - __entry->xfer_bits = xfer_bits; - __entry->request_bits = request_bits; - __entry->pool_entropy = pool_entropy; - __entry->input_entropy = input_entropy; - ), - - TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d " - "input_entropy %d", __entry->pool_name, __entry->xfer_bits, - __entry->request_bits, __entry->pool_entropy, - __entry->input_entropy) -); - -DECLARE_EVENT_CLASS(random__get_random_bytes, - TP_PROTO(int nbytes, unsigned long IP), - - TP_ARGS(nbytes, IP), - - TP_STRUCT__entry( - __field( int, nbytes ) - __field(unsigned long, IP ) - ), - - TP_fast_assign( - __entry->nbytes = nbytes; - __entry->IP = IP; - ), - - TP_printk("nbytes %d caller %pS", __entry->nbytes, (void *)__entry->IP) -); - -DEFINE_EVENT(random__get_random_bytes, get_random_bytes, - TP_PROTO(int nbytes, unsigned long IP), - - TP_ARGS(nbytes, IP) -); - -DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch, - TP_PROTO(int nbytes, unsigned long IP), - - TP_ARGS(nbytes, IP) -); - -DECLARE_EVENT_CLASS(random__extract_entropy, - TP_PROTO(const char *pool_name, int nbytes, int entropy_count, - unsigned long IP), - - TP_ARGS(pool_name, nbytes, entropy_count, IP), - - TP_STRUCT__entry( - __field( const char *, pool_name ) - __field( int, nbytes ) - __field( int, entropy_count ) - __field(unsigned long, IP ) - ), - - TP_fast_assign( - __entry->pool_name = pool_name; - __entry->nbytes = nbytes; - __entry->entropy_count = entropy_count; - __entry->IP = IP; - ), - - TP_printk("%s pool: nbytes %d entropy_count %d caller %pS", - __entry->pool_name, __entry->nbytes, __entry->entropy_count, - (void *)__entry->IP) -); - - -DEFINE_EVENT(random__extract_entropy, extract_entropy, - TP_PROTO(const char *pool_name, int nbytes, int entropy_count, - unsigned long IP), - - TP_ARGS(pool_name, nbytes, entropy_count, IP) -); - -DEFINE_EVENT(random__extract_entropy, extract_entropy_user, - TP_PROTO(const char *pool_name, int nbytes, int entropy_count, - unsigned long IP), - - TP_ARGS(pool_name, nbytes, entropy_count, IP) -); - -TRACE_EVENT(random_read, - TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left), - - TP_ARGS(got_bits, need_bits, pool_left, input_left), - - TP_STRUCT__entry( - __field( int, got_bits ) - __field( int, need_bits ) - __field( int, pool_left ) - __field( int, input_left ) - ), - - TP_fast_assign( - __entry->got_bits = got_bits; - __entry->need_bits = need_bits; - __entry->pool_left = pool_left; - __entry->input_left = input_left; - ), - - TP_printk("got_bits %d still_needed_bits %d " - "blocking_pool_entropy_left %d input_entropy_left %d", - __entry->got_bits, __entry->got_bits, __entry->pool_left, - __entry->input_left) -); - -TRACE_EVENT(urandom_read, - TP_PROTO(int got_bits, int pool_left, int input_left), - - TP_ARGS(got_bits, pool_left, input_left), - - TP_STRUCT__entry( - __field( int, got_bits ) - __field( int, pool_left ) - __field( int, input_left ) - ), - - TP_fast_assign( - __entry->got_bits = got_bits; - __entry->pool_left = pool_left; - __entry->input_left = input_left; - ), - - TP_printk("got_bits %d nonblocking_pool_entropy_left %d " - "input_entropy_left %d", __entry->got_bits, - __entry->pool_left, __entry->input_left) -); - -TRACE_EVENT(prandom_u32, - - TP_PROTO(unsigned int ret), - - TP_ARGS(ret), - - TP_STRUCT__entry( - __field( unsigned int, ret) - ), - - TP_fast_assign( - __entry->ret = ret; - ), - - TP_printk("ret=%u" , __entry->ret) -); - -#endif /* _TRACE_RANDOM_H */ - -/* This part must be outside protection */ -#include diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 4a3ab0ed6e06..221856f2d295 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -583,7 +583,7 @@ TRACE_EVENT(rxrpc_client, TP_fast_assign( __entry->conn = conn ? conn->debug_id : 0; __entry->channel = channel; - __entry->usage = conn ? atomic_read(&conn->usage) : -2; + __entry->usage = conn ? refcount_read(&conn->ref) : -2; __entry->op = op; __entry->cid = conn ? conn->proto.cid : 0; ), @@ -1509,7 +1509,7 @@ TRACE_EVENT(rxrpc_call_reset, __entry->call_serial = call->rx_serial; __entry->conn_serial = call->conn->hi_serial; __entry->tx_seq = call->tx_hard_ack; - __entry->rx_seq = call->ackr_seen; + __entry->rx_seq = call->rx_hard_ack; ), TP_printk("c=%08x %08x:%08x r=%08x/%08x tx=%08x rx=%08x", diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h index a966d4b5ab37..905b151bc3dd 100644 --- a/include/trace/events/sock.h +++ b/include/trace/events/sock.h @@ -98,7 +98,7 @@ TRACE_EVENT(sock_exceed_buf_limit, TP_STRUCT__entry( __array(char, name, 32) - __field(long *, sysctl_mem) + __array(long, sysctl_mem, 3) __field(long, allocated) __field(int, sysctl_rmem) __field(int, rmem_alloc) @@ -110,7 +110,9 @@ TRACE_EVENT(sock_exceed_buf_limit, TP_fast_assign( strncpy(__entry->name, prot->name, 32); - __entry->sysctl_mem = prot->sysctl_mem; + __entry->sysctl_mem[0] = READ_ONCE(prot->sysctl_mem[0]); + __entry->sysctl_mem[1] = READ_ONCE(prot->sysctl_mem[1]); + __entry->sysctl_mem[2] = READ_ONCE(prot->sysctl_mem[2]); __entry->allocated = allocated; __entry->sysctl_rmem = sk_get_rmem0(sk, prot); __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); diff --git a/include/trace/events/spmi.h b/include/trace/events/spmi.h index 8b60efe18ba6..a6819fd85cdf 100644 --- a/include/trace/events/spmi.h +++ b/include/trace/events/spmi.h @@ -21,15 +21,15 @@ TRACE_EVENT(spmi_write_begin, __field ( u8, sid ) __field ( u16, addr ) __field ( u8, len ) - __dynamic_array ( u8, buf, len + 1 ) + __dynamic_array ( u8, buf, len ) ), TP_fast_assign( __entry->opcode = opcode; __entry->sid = sid; __entry->addr = addr; - __entry->len = len + 1; - memcpy(__get_dynamic_array(buf), buf, len + 1); + __entry->len = len; + memcpy(__get_dynamic_array(buf), buf, len); ), TP_printk("opc=%d sid=%02d addr=0x%04x len=%d buf=0x[%*phD]", @@ -92,7 +92,7 @@ TRACE_EVENT(spmi_read_end, __field ( u16, addr ) __field ( int, ret ) __field ( u8, len ) - __dynamic_array ( u8, buf, len + 1 ) + __dynamic_array ( u8, buf, len ) ), TP_fast_assign( @@ -100,8 +100,8 @@ TRACE_EVENT(spmi_read_end, __entry->sid = sid; __entry->addr = addr; __entry->ret = ret; - __entry->len = len + 1; - memcpy(__get_dynamic_array(buf), buf, len + 1); + __entry->len = len; + memcpy(__get_dynamic_array(buf), buf, len); ), TP_printk("opc=%d sid=%02d addr=0x%04x ret=%d len=%02d buf=0x[%*phD]", diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 23db248a7fdb..8220369ee610 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1006,7 +1006,6 @@ DEFINE_RPC_XPRT_LIFETIME_EVENT(connect); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force); -DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_cleanup); DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy); DECLARE_EVENT_CLASS(rpc_xprt_event, @@ -1874,17 +1873,18 @@ DECLARE_EVENT_CLASS(svc_deferred_event, TP_STRUCT__entry( __field(const void *, dr) __field(u32, xid) - __string(addr, dr->xprt->xpt_remotebuf) + __array(__u8, addr, INET6_ADDRSTRLEN + 10) ), TP_fast_assign( __entry->dr = dr; __entry->xid = be32_to_cpu(*(__be32 *)(dr->args + (dr->xprt_hlen>>2))); - __assign_str(addr, dr->xprt->xpt_remotebuf); + snprintf(__entry->addr, sizeof(__entry->addr) - 1, + "%pISpc", (struct sockaddr *)&dr->addr); ), - TP_printk("addr=%s dr=%p xid=0x%08x", __get_str(addr), __entry->dr, + TP_printk("addr=%s dr=%p xid=0x%08x", __entry->addr, __entry->dr, __entry->xid) ); diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index 2070df64958e..b4feeb4b216a 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -283,7 +283,7 @@ TRACE_EVENT(mm_vmscan_lru_isolate, __field(unsigned long, nr_scanned) __field(unsigned long, nr_skipped) __field(unsigned long, nr_taken) - __field(isolate_mode_t, isolate_mode) + __field(unsigned int, isolate_mode) __field(int, lru) ), @@ -294,7 +294,7 @@ TRACE_EVENT(mm_vmscan_lru_isolate, __entry->nr_scanned = nr_scanned; __entry->nr_skipped = nr_skipped; __entry->nr_taken = nr_taken; - __entry->isolate_mode = isolate_mode; + __entry->isolate_mode = (__force unsigned int)isolate_mode; __entry->lru = lru; ), diff --git a/include/trace/hooks/binder.h b/include/trace/hooks/binder.h index 2b9e7e09a13d..fd877ec5b67f 100644 --- a/include/trace/hooks/binder.h +++ b/include/trace/hooks/binder.h @@ -11,19 +11,23 @@ * Following tracepoints are not exported in tracefs and provide a * mechanism for vendor modules to hook and extend functionality */ -#ifdef __GENKSYMS__ +#if defined(__GENKSYMS__) || !IS_ENABLED(CONFIG_ANDROID_BINDER_IPC) struct binder_alloc; struct binder_proc; struct binder_thread; struct binder_transaction; -struct task_struct; -struct seq_file; struct binder_transaction_data; #else /* struct binder_alloc */ #include <../drivers/android/binder_alloc.h> /* struct binder_proc, struct binder_thread, struct binder_transaction */ #include <../drivers/android/binder_internal.h> +#endif + +#ifdef __GENKSYMS__ +struct task_struct; +struct seq_file; +#else /* struct task_struct */ #include /* struct seq_file */ @@ -31,6 +35,7 @@ struct binder_transaction_data; /* struct binder_transaction_data */ #include #endif /* __GENKSYMS__ */ + DECLARE_HOOK(android_vh_binder_transaction_init, TP_PROTO(struct binder_transaction *t), TP_ARGS(t)); @@ -70,11 +75,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_binder_transaction, DECLARE_HOOK(android_vh_binder_preset, TP_PROTO(struct hlist_head *hhead, struct mutex *lock), TP_ARGS(hhead, lock)); -DECLARE_HOOK(android_vh_binder_proc_transaction_entry, - TP_PROTO(struct binder_proc *proc, struct binder_transaction *t, - struct binder_thread **thread, int node_debug_id, bool pending_async, - bool sync, bool *skip), - TP_ARGS(proc, t, thread, node_debug_id, pending_async, sync, skip)); DECLARE_HOOK(android_vh_binder_proc_transaction, TP_PROTO(struct task_struct *caller_task, struct task_struct *binder_proc_task, struct task_struct *binder_th_task, int node_debug_id, @@ -85,10 +85,6 @@ DECLARE_HOOK(android_vh_binder_proc_transaction_end, struct task_struct *binder_th_task, unsigned int code, bool pending_async, bool sync), TP_ARGS(caller_task, binder_proc_task, binder_th_task, code, pending_async, sync)); -DECLARE_HOOK(android_vh_binder_select_worklist_ilocked, - TP_PROTO(struct list_head **list, struct binder_thread *thread, struct binder_proc *proc, - int wait_for_proc_work), - TP_ARGS(list, thread, proc, wait_for_proc_work)); DECLARE_HOOK(android_vh_binder_new_ref, TP_PROTO(struct task_struct *proc, uint32_t ref_desc, int node_debug_id), TP_ARGS(proc, ref_desc, node_debug_id)); diff --git a/include/trace/hooks/block.h b/include/trace/hooks/block.h index 964fff355602..a5a7ac70a2ee 100644 --- a/include/trace/hooks/block.h +++ b/include/trace/hooks/block.h @@ -10,7 +10,7 @@ #include #include -#ifdef __GENKSYMS__ +#if defined(__GENKSYMS__) || !defined(CONFIG_BLOCK) struct blk_mq_tags; struct blk_mq_alloc_data; struct blk_mq_tag_set; diff --git a/include/trace/hooks/cgroup.h b/include/trace/hooks/cgroup.h index 68818ad29600..dfe79e5a3646 100644 --- a/include/trace/hooks/cgroup.h +++ b/include/trace/hooks/cgroup.h @@ -9,12 +9,16 @@ #include struct cgroup_taskset; -#ifdef __GENKSYMS__ +#if defined(__GENKSYMS__) || !IS_ENABLED(CONFIG_CGROUPS) struct cgroup_subsys; -struct task_struct; #else /* struct cgroup_subsys */ #include +#endif + +#ifdef __GENKSYMS__ +struct task_struct; +#else /* struct task_struct */ #include #endif /* __GENKSYMS__ */ diff --git a/include/trace/hooks/dma_noalias.h b/include/trace/hooks/dma_noalias.h new file mode 100644 index 000000000000..2dc7b04d9ab2 --- /dev/null +++ b/include/trace/hooks/dma_noalias.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM dma_noalias + +#define TRACE_INCLUDE_PATH trace/hooks + +#if !defined(_TRACE_HOOK_DMA_NOALIAS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_HOOK_DMA_NOALIAS_H + +#include + +DECLARE_RESTRICTED_HOOK(android_rvh_setup_dma_ops, + TP_PROTO(struct device *dev), + TP_ARGS(dev), 1); + +#endif /*_TRACE_HOOK_DMA_NOALIAS_H */ + +/* This part must be outside protection */ +#include diff --git a/include/trace/hooks/dmabuf.h b/include/trace/hooks/dmabuf.h new file mode 100644 index 000000000000..0182960efca2 --- /dev/null +++ b/include/trace/hooks/dmabuf.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM dmabuf + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH trace/hooks + +#if !defined(_TRACE_HOOK_DMABUF_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_HOOK_DMABUF_H + +#include + +struct dma_buf_sysfs_entry; +DECLARE_RESTRICTED_HOOK(android_rvh_dma_buf_stats_teardown, + TP_PROTO(struct dma_buf_sysfs_entry *sysfs_entry, bool *skip_sysfs_release), + TP_ARGS(sysfs_entry, skip_sysfs_release), 1); +#endif /* _TRACE_HOOK_DMABUF_H */ +/* This part must be outside protection */ +#include + diff --git a/include/trace/hooks/dtask.h b/include/trace/hooks/dtask.h index 9890bfe5c41d..956e8421755c 100644 --- a/include/trace/hooks/dtask.h +++ b/include/trace/hooks/dtask.h @@ -68,9 +68,18 @@ DECLARE_HOOK(android_vh_mutex_unlock_slowpath, DECLARE_HOOK(android_vh_mutex_unlock_slowpath_end, TP_PROTO(struct mutex *lock, struct task_struct *next), TP_ARGS(lock, next)); -DECLARE_HOOK(android_vh_mutex_start_check_new_owner, - TP_PROTO(struct mutex *lock), - TP_ARGS(lock)); +DECLARE_HOOK(android_vh_record_mutex_lock_starttime, + TP_PROTO(struct task_struct *tsk, unsigned long settime_jiffies), + TP_ARGS(tsk, settime_jiffies)); +DECLARE_HOOK(android_vh_record_rtmutex_lock_starttime, + TP_PROTO(struct task_struct *tsk, unsigned long settime_jiffies), + TP_ARGS(tsk, settime_jiffies)); +DECLARE_HOOK(android_vh_record_rwsem_lock_starttime, + TP_PROTO(struct task_struct *tsk, unsigned long settime_jiffies), + TP_ARGS(tsk, settime_jiffies)); +DECLARE_HOOK(android_vh_record_pcpu_rwsem_starttime, + TP_PROTO(struct task_struct *tsk, unsigned long settime_jiffies), + TP_ARGS(tsk, settime_jiffies)); /* macro versions of hooks are no longer required */ diff --git a/include/trace/hooks/futex.h b/include/trace/hooks/futex.h index f8bf394ea013..829fe5605b02 100644 --- a/include/trace/hooks/futex.h +++ b/include/trace/hooks/futex.h @@ -8,6 +8,10 @@ #include #include #include +#ifndef __GENKSYMS__ +#include +#endif + /* * Following tracepoints are not exported in tracefs and provide a * mechanism for vendor modules to hook and extend functionality @@ -22,6 +26,36 @@ DECLARE_HOOK(android_vh_futex_sleep_start, TP_PROTO(struct task_struct *p), TP_ARGS(p)); +DECLARE_HOOK(android_vh_do_futex, + TP_PROTO(int cmd, + unsigned int *flags, + u32 __user *uaddr2), + TP_ARGS(cmd, flags, uaddr2)); + +DECLARE_HOOK(android_vh_futex_wait_start, + TP_PROTO(unsigned int flags, + u32 bitset), + TP_ARGS(flags, bitset)); + +DECLARE_HOOK(android_vh_futex_wait_end, + TP_PROTO(unsigned int flags, + u32 bitset), + TP_ARGS(flags, bitset)); + +DECLARE_HOOK(android_vh_futex_wake_traverse_plist, + TP_PROTO(struct plist_head *chain, int *target_nr, + union futex_key key, u32 bitset), + TP_ARGS(chain, target_nr, key, bitset)); + +DECLARE_HOOK(android_vh_futex_wake_this, + TP_PROTO(int ret, int nr_wake, int target_nr, + struct task_struct *p), + TP_ARGS(ret, nr_wake, target_nr, p)); + +DECLARE_HOOK(android_vh_futex_wake_up_q_finish, + TP_PROTO(int nr_wake, int target_nr), + TP_ARGS(nr_wake, target_nr)); + /* macro versions of hooks are no longer required */ #endif /* _TRACE_HOOK_FUTEX_H */ diff --git a/include/trace/hooks/logbuf.h b/include/trace/hooks/logbuf.h index 6aeb10a4661b..3794192554cb 100644 --- a/include/trace/hooks/logbuf.h +++ b/include/trace/hooks/logbuf.h @@ -10,7 +10,7 @@ #include #include -#ifdef __GENKSYMS__ +#if defined(__GENKSYMS__) || !IS_ENABLED(CONFIG_PRINTK) struct printk_record; struct printk_ringbuffer; #else diff --git a/include/trace/hooks/mm.h b/include/trace/hooks/mm.h index 91d9172c6b4a..a4b855e0a81a 100644 --- a/include/trace/hooks/mm.h +++ b/include/trace/hooks/mm.h @@ -1,4 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#ifdef PROTECT_TRACE_INCLUDE_PATH +#undef PROTECT_TRACE_INCLUDE_PATH + +#include +#include +#include + +#else /* PROTECT_TRACE_INCLUDE_PATH */ + #undef TRACE_SYSTEM #define TRACE_SYSTEM mm @@ -13,6 +22,7 @@ #include #include #include +#include #ifdef __GENKSYMS__ struct slabinfo; @@ -34,6 +44,7 @@ struct readahead_control; #endif /* __GENKSYMS__ */ struct cma; struct swap_slots_cache; +struct page_vma_mapped_walk; DECLARE_RESTRICTED_HOOK(android_rvh_set_skip_swapcache_flags, TP_PROTO(gfp_t *flags), @@ -87,6 +98,12 @@ DECLARE_HOOK(android_vh_include_reserved_zone, DECLARE_HOOK(android_vh_show_mem, TP_PROTO(unsigned int filter, nodemask_t *nodemask), TP_ARGS(filter, nodemask)); +DECLARE_HOOK(android_vh_alloc_pages_slowpath_begin, + TP_PROTO(gfp_t gfp_mask, unsigned int order, unsigned long *pdata), + TP_ARGS(gfp_mask, order, pdata)); +DECLARE_HOOK(android_vh_alloc_pages_slowpath_end, + TP_PROTO(gfp_t gfp_mask, unsigned int order, unsigned long data), + TP_ARGS(gfp_mask, order, data)); struct dirty_throttle_control; DECLARE_HOOK(android_vh_mm_dirty_limits, TP_PROTO(struct dirty_throttle_control *const gdtc, bool strictlimit, @@ -133,11 +150,37 @@ DECLARE_HOOK(android_vh_mmap_region, DECLARE_HOOK(android_vh_try_to_unmap_one, TP_PROTO(struct vm_area_struct *vma, struct page *page, unsigned long addr, bool ret), TP_ARGS(vma, page, addr, ret)); +DECLARE_HOOK(android_vh_do_page_trylock, + TP_PROTO(struct page *page, struct rw_semaphore *sem, + bool *got_lock, bool *success), + TP_ARGS(page, sem, got_lock, success)); DECLARE_HOOK(android_vh_drain_all_pages_bypass, TP_PROTO(gfp_t gfp_mask, unsigned int order, unsigned long alloc_flags, int migratetype, unsigned long did_some_progress, bool *bypass), TP_ARGS(gfp_mask, order, alloc_flags, migratetype, did_some_progress, bypass)); +DECLARE_HOOK(android_vh_update_page_mapcount, + TP_PROTO(struct page *page, bool inc_size, bool compound, + bool *first_mapping, bool *success), + TP_ARGS(page, inc_size, compound, first_mapping, success)); +DECLARE_HOOK(android_vh_add_page_to_lrulist, + TP_PROTO(struct page *page, bool compound, enum lru_list lru), + TP_ARGS(page, compound, lru)); +DECLARE_HOOK(android_vh_del_page_from_lrulist, + TP_PROTO(struct page *page, bool compound, enum lru_list lru), + TP_ARGS(page, compound, lru)); +DECLARE_HOOK(android_vh_show_mapcount_pages, + TP_PROTO(void *unused), + TP_ARGS(unused)); +DECLARE_HOOK(android_vh_do_traversal_lruvec, + TP_PROTO(struct lruvec *lruvec), + TP_ARGS(lruvec)); +DECLARE_HOOK(android_vh_page_should_be_protected, + TP_PROTO(struct page *page, bool *should_protect), + TP_ARGS(page, should_protect)); +DECLARE_HOOK(android_vh_mark_page_accessed, + TP_PROTO(struct page *page), + TP_ARGS(page)); DECLARE_HOOK(android_vh_cma_drain_all_pages_bypass, TP_PROTO(unsigned int migratetype, bool *bypass), TP_ARGS(migratetype, bypass)); @@ -150,6 +193,9 @@ DECLARE_HOOK(android_vh_subpage_dma_contig_alloc, DECLARE_HOOK(android_vh_ra_tuning_max_page, TP_PROTO(struct readahead_control *ractl, unsigned long *max_page), TP_ARGS(ractl, max_page)); +DECLARE_RESTRICTED_HOOK(android_rvh_handle_pte_fault_end, + TP_PROTO(struct vm_fault *vmf, unsigned long highest_memmap_pfn), + TP_ARGS(vmf, highest_memmap_pfn), 1); DECLARE_HOOK(android_vh_handle_pte_fault_end, TP_PROTO(struct vm_fault *vmf, unsigned long highest_memmap_pfn), TP_ARGS(vmf, highest_memmap_pfn)); @@ -180,20 +226,37 @@ DECLARE_HOOK(android_vh_count_swpout_vm_event, DECLARE_HOOK(android_vh_swap_slot_cache_active, TP_PROTO(bool swap_slot_cache_active), TP_ARGS(swap_slot_cache_active)); +DECLARE_RESTRICTED_HOOK(android_rvh_drain_slots_cache_cpu, + TP_PROTO(struct swap_slots_cache *cache, unsigned int type, + bool free_slots, bool *skip), + TP_ARGS(cache, type, free_slots, skip), 1); DECLARE_HOOK(android_vh_drain_slots_cache_cpu, TP_PROTO(struct swap_slots_cache *cache, unsigned int type, bool free_slots, bool *skip), TP_ARGS(cache, type, free_slots, skip)); +DECLARE_RESTRICTED_HOOK(android_rvh_alloc_swap_slot_cache, + TP_PROTO(struct swap_slots_cache *cache, int *ret, bool *skip), + TP_ARGS(cache, ret, skip), 1); DECLARE_HOOK(android_vh_alloc_swap_slot_cache, TP_PROTO(struct swap_slots_cache *cache, int *ret, bool *skip), TP_ARGS(cache, ret, skip)); +DECLARE_RESTRICTED_HOOK(android_rvh_free_swap_slot, + TP_PROTO(swp_entry_t entry, struct swap_slots_cache *cache, bool *skip), + TP_ARGS(entry, cache, skip), 1); DECLARE_HOOK(android_vh_free_swap_slot, TP_PROTO(swp_entry_t entry, struct swap_slots_cache *cache, bool *skip), TP_ARGS(entry, cache, skip)); +DECLARE_RESTRICTED_HOOK(android_rvh_get_swap_page, + TP_PROTO(struct page *page, swp_entry_t *entry, + struct swap_slots_cache *cache, bool *found), + TP_ARGS(page, entry, cache, found), 1); DECLARE_HOOK(android_vh_get_swap_page, TP_PROTO(struct page *page, swp_entry_t *entry, struct swap_slots_cache *cache, bool *found), TP_ARGS(page, entry, cache, found)); +DECLARE_HOOK(android_vh_madvise_cold_or_pageout, + TP_PROTO(struct vm_area_struct *vma, bool *allow_shared), + TP_ARGS(vma, allow_shared)); DECLARE_HOOK(android_vh_page_isolated_for_reclaim, TP_PROTO(struct mm_struct *mm, struct page *page), TP_ARGS(mm, page)); @@ -209,15 +272,44 @@ DECLARE_HOOK(android_vh_init_swap_info_struct, DECLARE_HOOK(android_vh_si_swapinfo, TP_PROTO(struct swap_info_struct *si, bool *skip), TP_ARGS(si, skip)); +DECLARE_RESTRICTED_HOOK(android_rvh_alloc_si, + TP_PROTO(struct swap_info_struct **p, bool *skip), + TP_ARGS(p, skip), 1); DECLARE_HOOK(android_vh_alloc_si, TP_PROTO(struct swap_info_struct **p, bool *skip), TP_ARGS(p, skip)); DECLARE_HOOK(android_vh_free_pages, TP_PROTO(struct page *page, unsigned int order), TP_ARGS(page, order)); +DECLARE_HOOK(android_vh_set_shmem_page_flag, + TP_PROTO(struct page *page), + TP_ARGS(page)); +DECLARE_HOOK(android_vh_remove_vmalloc_stack, + TP_PROTO(struct vm_struct *vm), + TP_ARGS(vm)); +DECLARE_HOOK(android_vh_alloc_pages_reclaim_bypass, + TP_PROTO(gfp_t gfp_mask, int order, int alloc_flags, + int migratetype, struct page **page), + TP_ARGS(gfp_mask, order, alloc_flags, migratetype, page)); +DECLARE_HOOK(android_vh_alloc_pages_failure_bypass, + TP_PROTO(gfp_t gfp_mask, int order, int alloc_flags, + int migratetype, struct page **page), + TP_ARGS(gfp_mask, order, alloc_flags, migratetype, page)); +DECLARE_HOOK(android_vh_test_clear_look_around_ref, + TP_PROTO(struct page *page), + TP_ARGS(page)); +DECLARE_HOOK(android_vh_look_around_migrate_page, + TP_PROTO(struct page *old_page, struct page *new_page), + TP_ARGS(old_page, new_page)); +DECLARE_HOOK(android_vh_look_around, + TP_PROTO(struct page_vma_mapped_walk *pvmw, struct page *page, + struct vm_area_struct *vma, int *referenced), + TP_ARGS(pvmw, page, vma, referenced)); /* macro versions of hooks are no longer required */ #endif /* _TRACE_HOOK_MM_H */ /* This part must be outside protection */ #include + +#endif /* PROTECT_TRACE_INCLUDE_PATH */ diff --git a/include/trace/hooks/mmc_core.h b/include/trace/hooks/mmc_core.h index b4b93b2f4c9e..eb012e9c8147 100644 --- a/include/trace/hooks/mmc_core.h +++ b/include/trace/hooks/mmc_core.h @@ -10,13 +10,17 @@ #include #include -#ifdef __GENKSYMS__ +#if defined(__GENKSYMS__) || !IS_ENABLED(CONFIG_MMC_SDHCI) struct sdhci_host; -struct mmc_card; -struct mmc_host; #else /* struct sdhci_host */ #include <../drivers/mmc/host/sdhci.h> +#endif + +#ifdef __GENKSYMS__ +struct mmc_card; +struct mmc_host; +#else /* struct mmc_card */ #include /* struct mmc_host */ diff --git a/include/trace/hooks/psi.h b/include/trace/hooks/psi.h index 3842118af3c3..3200b639ad3f 100644 --- a/include/trace/hooks/psi.h +++ b/include/trace/hooks/psi.h @@ -12,7 +12,7 @@ #if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS) -#ifdef __GENKSYMS__ +#if defined(__GENKSYMS__) || !defined(CONFIG_PSI) struct psi_group; struct psi_trigger; #else diff --git a/include/trace/hooks/restore_incpath.h b/include/trace/hooks/restore_incpath.h new file mode 100644 index 000000000000..3a97e4572007 --- /dev/null +++ b/include/trace/hooks/restore_incpath.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Include this file from a header declaring vendor hooks to preserve and later + * restore TRACE_INCLUDE_PATH value. Typical usage: + * + * #ifdef PROTECT_TRACE_INCLUDE_PATH + * #undef PROTECT_TRACE_INCLUDE_PATH + * + * #include + * #include + * #include + * + * #else + * + * + * + * #endif + * + * The header that includes vendor hooks header file should define + * PROTECT_TRACE_INCLUDE_PATH before including the vendor hook file like this: + * + * #define PROTECT_TRACE_INCLUDE_PATH + * #include + */ +#ifdef STORED_TRACE_INCLUDE_PATH +# undef TRACE_INCLUDE_PATH +# define TRACE_INCLUDE_PATH STORED_TRACE_INCLUDE_PATH +# undef STORED_TRACE_INCLUDE_PATH +#else +# undef TRACE_INCLUDE_PATH +#endif + diff --git a/include/trace/hooks/save_incpath.h b/include/trace/hooks/save_incpath.h new file mode 100644 index 000000000000..3b4621a47275 --- /dev/null +++ b/include/trace/hooks/save_incpath.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Include this file from a header declaring vendor hooks to preserve and later + * restore TRACE_INCLUDE_PATH value. Typical usage: + * + * #ifdef PROTECT_TRACE_INCLUDE_PATH + * #undef PROTECT_TRACE_INCLUDE_PATH + * + * #include + * #include + * #include + * + * #else + * + * + * + * #endif + * + * The header that includes vendor hooks header file should define + * PROTECT_TRACE_INCLUDE_PATH before including the vendor hook file like this: + * + * #define PROTECT_TRACE_INCLUDE_PATH + * #include + */ +#ifdef TRACE_INCLUDE_PATH +#define STORED_TRACE_INCLUDE_PATH TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_PATH +#endif + diff --git a/include/trace/hooks/sched.h b/include/trace/hooks/sched.h index 9f16fffd89e4..5f63128bb5c6 100644 --- a/include/trace/hooks/sched.h +++ b/include/trace/hooks/sched.h @@ -391,6 +391,13 @@ DECLARE_HOOK(android_vh_setscheduler_uclamp, TP_PROTO(struct task_struct *tsk, int clamp_id, unsigned int value), TP_ARGS(tsk, clamp_id, value)); +DECLARE_HOOK(android_vh_mmput, + TP_PROTO(void *unused), + TP_ARGS(unused)); + +DECLARE_HOOK(android_vh_sched_pelt_multiplier, + TP_PROTO(unsigned int old, unsigned int cur, int *ret), + TP_ARGS(old, cur, ret)); /* macro versions of hooks are no longer required */ #endif /* _TRACE_HOOK_SCHED_H */ diff --git a/include/trace/hooks/thermal.h b/include/trace/hooks/thermal.h index 5e61ecd38425..c438ab6ef5b6 100644 --- a/include/trace/hooks/thermal.h +++ b/include/trace/hooks/thermal.h @@ -24,6 +24,11 @@ DECLARE_HOOK(android_vh_thermal_pm_notify_suspend, TP_PROTO(struct thermal_zone_device *tz, int *irq_wakeable), TP_ARGS(tz, irq_wakeable)); +struct thermal_cooling_device; +DECLARE_HOOK(android_vh_disable_thermal_cooling_stats, + TP_PROTO(struct thermal_cooling_device *cdev, bool *disable_stats), + TP_ARGS(cdev, disable_stats)); + #endif /* _TRACE_HOOK_THERMAL_H */ /* This part must be outside protection */ #include diff --git a/include/trace/hooks/typec.h b/include/trace/hooks/typec.h index db06fb8c30e9..825cb37bc8b2 100644 --- a/include/trace/hooks/typec.h +++ b/include/trace/hooks/typec.h @@ -6,12 +6,13 @@ #define _TRACE_HOOK_TYPEC_H #include #include +#include #include /* * Following tracepoints are not exported in tracefs and provide a * mechanism for vendor modules to hook and extend functionality */ -#ifdef __GENKSYMS__ +#if defined(__GENKSYMS__) || !IS_ENABLED(CONFIG_TYPEC_TCPCI) struct tcpci_data; #else /* struct tcpci_data */ diff --git a/include/trace/hooks/ufshcd.h b/include/trace/hooks/ufshcd.h index 8483dc966c6c..906e02740f3f 100644 --- a/include/trace/hooks/ufshcd.h +++ b/include/trace/hooks/ufshcd.h @@ -10,7 +10,7 @@ * Following tracepoints are not exported in tracefs and provide a * mechanism for vendor modules to hook and extend functionality */ -#ifdef __GENKSYMS__ +#if defined(__GENKSYMS__) || !IS_ENABLED(CONFIG_SCSI_UFSHCD) struct ufs_hba; struct ufshcd_lrb; struct uic_command; diff --git a/include/trace/hooks/vmscan.h b/include/trace/hooks/vmscan.h index 4f1d6ab9f498..4bdb18c36ac6 100644 --- a/include/trace/hooks/vmscan.h +++ b/include/trace/hooks/vmscan.h @@ -28,12 +28,21 @@ DECLARE_RESTRICTED_HOOK(android_rvh_set_balance_anon_file_reclaim, DECLARE_HOOK(android_vh_page_referenced_check_bypass, TP_PROTO(struct page *page, unsigned long nr_to_scan, int lru, bool *bypass), TP_ARGS(page, nr_to_scan, lru, bypass)); +DECLARE_HOOK(android_vh_page_trylock_get_result, + TP_PROTO(struct page *page, bool *trylock_fail), + TP_ARGS(page, trylock_fail)); +DECLARE_HOOK(android_vh_handle_failed_page_trylock, + TP_PROTO(struct list_head *page_list), + TP_ARGS(page_list)); +DECLARE_HOOK(android_vh_page_trylock_set, + TP_PROTO(struct page *page), + TP_ARGS(page)); +DECLARE_HOOK(android_vh_page_trylock_clear, + TP_PROTO(struct page *page), + TP_ARGS(page)); DECLARE_HOOK(android_vh_shrink_node_memcgs, TP_PROTO(struct mem_cgroup *memcg, bool *skip), TP_ARGS(memcg, skip)); -DECLARE_HOOK(android_vh_tune_memcg_scan_type, - TP_PROTO(struct mem_cgroup *memcg, char *scan_type), - TP_ARGS(memcg, scan_type)); DECLARE_HOOK(android_vh_inactive_is_low, TP_PROTO(unsigned long gb, unsigned long *inactive_ratio, enum lru_list inactive_lru, bool *skip), @@ -41,6 +50,9 @@ DECLARE_HOOK(android_vh_inactive_is_low, DECLARE_HOOK(android_vh_snapshot_refaults, TP_PROTO(struct lruvec *target_lruvec), TP_ARGS(target_lruvec)); +DECLARE_HOOK(android_vh_check_page_look_around_ref, + TP_PROTO(struct page *page, int *skip), + TP_ARGS(page, skip)); #endif /* _TRACE_HOOK_VMSCAN_H */ /* This part must be outside protection */ #include diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 7785961d82ba..d74c076e9e2b 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -400,16 +400,18 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#define ALIGN_STRUCTFIELD(type) ((int)(offsetof(struct {char a; type b;}, b))) + #undef __field_ext #define __field_ext(_type, _item, _filter_type) { \ .type = #_type, .name = #_item, \ - .size = sizeof(_type), .align = __alignof__(_type), \ + .size = sizeof(_type), .align = ALIGN_STRUCTFIELD(_type), \ .is_signed = is_signed_type(_type), .filter_type = _filter_type }, #undef __field_struct_ext #define __field_struct_ext(_type, _item, _filter_type) { \ .type = #_type, .name = #_item, \ - .size = sizeof(_type), .align = __alignof__(_type), \ + .size = sizeof(_type), .align = ALIGN_STRUCTFIELD(_type), \ 0, .filter_type = _filter_type }, #undef __field @@ -421,7 +423,7 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \ #undef __array #define __array(_type, _item, _len) { \ .type = #_type"["__stringify(_len)"]", .name = #_item, \ - .size = sizeof(_type[_len]), .align = __alignof__(_type), \ + .size = sizeof(_type[_len]), .align = ALIGN_STRUCTFIELD(_type), \ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }, #undef __dynamic_array diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index 2d3f015e2ae3..7badb670f24f 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -319,6 +319,7 @@ enum transaction_flags { TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ TF_CLEAR_BUF = 0x20, /* clear buffer on txn complete */ + TF_UPDATE_TXN = 0x40, /* update the outdated pending async txn */ }; struct binder_transaction_data { diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index cd2d8279a5e4..cb4e8e6e86a9 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -182,7 +182,7 @@ #define AUDIT_MAX_KEY_LEN 256 #define AUDIT_BITMASK_SIZE 64 #define AUDIT_WORD(nr) ((__u32)((nr)/32)) -#define AUDIT_BIT(nr) (1 << ((nr) - AUDIT_WORD(nr)*32)) +#define AUDIT_BIT(nr) (1U << ((nr) - AUDIT_WORD(nr)*32)) #define AUDIT_SYSCALL_CLASSES 16 #define AUDIT_CLASS_DIR_WRITE 0 diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b43a86d05494..2a234023821e 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -4180,7 +4180,8 @@ struct bpf_sock { __u32 src_ip4; __u32 src_ip6[4]; __u32 src_port; /* host byte order */ - __u32 dst_port; /* network byte order */ + __be16 dst_port; /* network byte order */ + __u16 :16; /* zero padding */ __u32 dst_ip4; __u32 dst_ip6[4]; __u32 state; @@ -5006,7 +5007,10 @@ struct bpf_pidns_info { /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ struct bpf_sk_lookup { - __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ + union { + __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ + __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */ + }; __u32 family; /* Protocol family (AF_INET, AF_INET6) */ __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ diff --git a/include/uapi/linux/can/error.h b/include/uapi/linux/can/error.h index 34633283de64..a1000cb63063 100644 --- a/include/uapi/linux/can/error.h +++ b/include/uapi/linux/can/error.h @@ -120,6 +120,9 @@ #define CAN_ERR_TRX_CANL_SHORT_TO_GND 0x70 /* 0111 0000 */ #define CAN_ERR_TRX_CANL_SHORT_TO_CANH 0x80 /* 1000 0000 */ -/* controller specific additional information / data[5..7] */ +/* data[5] is reserved (do not use) */ + +/* TX error counter / data[6] */ +/* RX error counter / data[7] */ #endif /* _UAPI_CAN_ERROR_H */ diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h index c55935b64ccc..590f8aea2b6d 100644 --- a/include/uapi/linux/can/isotp.h +++ b/include/uapi/linux/can/isotp.h @@ -137,20 +137,16 @@ struct can_isotp_ll_options { #define CAN_ISOTP_WAIT_TX_DONE 0x400 /* wait for tx completion */ #define CAN_ISOTP_SF_BROADCAST 0x800 /* 1-to-N functional addressing */ -/* default values */ +/* protocol machine default values */ #define CAN_ISOTP_DEFAULT_FLAGS 0 #define CAN_ISOTP_DEFAULT_EXT_ADDRESS 0x00 #define CAN_ISOTP_DEFAULT_PAD_CONTENT 0xCC /* prevent bit-stuffing */ -#define CAN_ISOTP_DEFAULT_FRAME_TXTIME 0 +#define CAN_ISOTP_DEFAULT_FRAME_TXTIME 50000 /* 50 micro seconds */ #define CAN_ISOTP_DEFAULT_RECV_BS 0 #define CAN_ISOTP_DEFAULT_RECV_STMIN 0x00 #define CAN_ISOTP_DEFAULT_RECV_WFTMAX 0 -#define CAN_ISOTP_DEFAULT_LL_MTU CAN_MTU -#define CAN_ISOTP_DEFAULT_LL_TX_DL CAN_MAX_DLEN -#define CAN_ISOTP_DEFAULT_LL_TX_FLAGS 0 - /* * Remark on CAN_ISOTP_DEFAULT_RECV_* values: * @@ -162,4 +158,24 @@ struct can_isotp_ll_options { * consistency and copied directly into the flow control (FC) frame. */ +/* link layer default values => make use of Classical CAN frames */ + +#define CAN_ISOTP_DEFAULT_LL_MTU CAN_MTU +#define CAN_ISOTP_DEFAULT_LL_TX_DL CAN_MAX_DLEN +#define CAN_ISOTP_DEFAULT_LL_TX_FLAGS 0 + +/* + * The CAN_ISOTP_DEFAULT_FRAME_TXTIME has become a non-zero value as + * it only makes sense for isotp implementation tests to run without + * a N_As value. As user space applications usually do not set the + * frame_txtime element of struct can_isotp_options the new in-kernel + * default is very likely overwritten with zero when the sockopt() + * CAN_ISOTP_OPTS is invoked. + * To make sure that a N_As value of zero is only set intentional the + * value '0' is now interpreted as 'do not change the current value'. + * When a frame_txtime of zero is required for testing purposes this + * CAN_ISOTP_FRAME_TXTIME_ZERO u32 value has to be set in frame_txtime. + */ +#define CAN_ISOTP_FRAME_TXTIME_ZERO 0xFFFFFFFF + #endif /* !_UAPI_CAN_ISOTP_H */ diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h index 741985d12f7d..9ae1b3aecb10 100644 --- a/include/uapi/linux/capability.h +++ b/include/uapi/linux/capability.h @@ -425,7 +425,7 @@ struct vfs_ns_cap_data { */ #define CAP_TO_INDEX(x) ((x) >> 5) /* 1 << 5 == bits in __u32 */ -#define CAP_TO_MASK(x) (1 << ((x) & 31)) /* mask for indexed __u32 */ +#define CAP_TO_MASK(x) (1U << ((x) & 31)) /* mask for indexed __u32 */ #endif /* _UAPI_LINUX_CAPABILITY_H */ diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h index 6236c644624d..37aeab6d101a 100644 --- a/include/uapi/linux/dma-buf.h +++ b/include/uapi/linux/dma-buf.h @@ -44,8 +44,8 @@ struct dma_buf_sync { * between them in actual uapi, they're just different numbers. */ #define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *) -#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, u32) -#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, u64) +#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32) +#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64) struct dma_buf_sync_partial { __u64 flags; diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h index 8a3432d0f0dc..e687658843b1 100644 --- a/include/uapi/linux/eventpoll.h +++ b/include/uapi/linux/eventpoll.h @@ -41,6 +41,12 @@ #define EPOLLMSG (__force __poll_t)0x00000400 #define EPOLLRDHUP (__force __poll_t)0x00002000 +/* + * Internal flag - wakeup generated by io_uring, used to detect recursion back + * into the io_uring poll handler. + */ +#define EPOLL_URING_WAKE ((__force __poll_t)(1U << 27)) + /* Set exclusive wakeup mode for the target file descriptor */ #define EPOLLEXCLUSIVE ((__force __poll_t)(1U << 28)) diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 98d8e06dea22..6481db937002 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -42,23 +42,25 @@ struct io_uring_sqe { __u32 statx_flags; __u32 fadvise_advice; __u32 splice_flags; + __u32 rename_flags; + __u32 unlink_flags; + __u32 hardlink_flags; }; __u64 user_data; /* data to be passed back at completion time */ + /* pack this to avoid bogus arm OABI complaints */ union { - struct { - /* pack this to avoid bogus arm OABI complaints */ - union { - /* index into fixed buffers, if used */ - __u16 buf_index; - /* for grouped buffer selection */ - __u16 buf_group; - } __attribute__((packed)); - /* personality to use, if used */ - __u16 personality; - __s32 splice_fd_in; - }; - __u64 __pad2[3]; + /* index into fixed buffers, if used */ + __u16 buf_index; + /* for grouped buffer selection */ + __u16 buf_group; + } __attribute__((packed)); + /* personality to use, if used */ + __u16 personality; + union { + __s32 splice_fd_in; + __u32 file_index; }; + __u64 __pad2[2]; }; enum { @@ -132,6 +134,9 @@ enum { IORING_OP_PROVIDE_BUFFERS, IORING_OP_REMOVE_BUFFERS, IORING_OP_TEE, + IORING_OP_SHUTDOWN, + IORING_OP_RENAMEAT, + IORING_OP_UNLINKAT, /* this goes last, obviously */ IORING_OP_LAST, @@ -145,14 +150,34 @@ enum { /* * sqe->timeout_flags */ -#define IORING_TIMEOUT_ABS (1U << 0) - +#define IORING_TIMEOUT_ABS (1U << 0) +#define IORING_TIMEOUT_UPDATE (1U << 1) +#define IORING_TIMEOUT_BOOTTIME (1U << 2) +#define IORING_TIMEOUT_REALTIME (1U << 3) +#define IORING_LINK_TIMEOUT_UPDATE (1U << 4) +#define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME) +#define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE) /* * sqe->splice_flags * extends splice(2) flags */ #define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */ +/* + * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the + * command flags for POLL_ADD are stored in sqe->len. + * + * IORING_POLL_ADD_MULTI Multishot poll. Sets IORING_CQE_F_MORE if + * the poll handler will continue to report + * CQEs on behalf of the same SQE. + * + * IORING_POLL_UPDATE Update existing poll request, matching + * sqe->addr as the old user_data field. + */ +#define IORING_POLL_ADD_MULTI (1U << 0) +#define IORING_POLL_UPDATE_EVENTS (1U << 1) +#define IORING_POLL_UPDATE_USER_DATA (1U << 2) + /* * IO completion data structure (Completion Queue Entry) */ @@ -166,8 +191,10 @@ struct io_uring_cqe { * cqe->flags * * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID + * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries */ #define IORING_CQE_F_BUFFER (1U << 0) +#define IORING_CQE_F_MORE (1U << 1) enum { IORING_CQE_BUFFER_SHIFT = 16, @@ -226,6 +253,7 @@ struct io_cqring_offsets { #define IORING_ENTER_GETEVENTS (1U << 0) #define IORING_ENTER_SQ_WAKEUP (1U << 1) #define IORING_ENTER_SQ_WAIT (1U << 2) +#define IORING_ENTER_EXT_ARG (1U << 3) /* * Passed in for io_uring_setup(2). Copied back with updated info on success @@ -253,6 +281,10 @@ struct io_uring_params { #define IORING_FEAT_CUR_PERSONALITY (1U << 4) #define IORING_FEAT_FAST_POLL (1U << 5) #define IORING_FEAT_POLL_32BITS (1U << 6) +#define IORING_FEAT_SQPOLL_NONFIXED (1U << 7) +#define IORING_FEAT_EXT_ARG (1U << 8) +#define IORING_FEAT_NATIVE_WORKERS (1U << 9) +#define IORING_FEAT_RSRC_TAGS (1U << 10) /* * io_uring_register(2) opcodes and arguments @@ -272,16 +304,62 @@ enum { IORING_REGISTER_RESTRICTIONS = 11, IORING_REGISTER_ENABLE_RINGS = 12, + /* extended with tagging */ + IORING_REGISTER_FILES2 = 13, + IORING_REGISTER_FILES_UPDATE2 = 14, + IORING_REGISTER_BUFFERS2 = 15, + IORING_REGISTER_BUFFERS_UPDATE = 16, + + /* set/clear io-wq thread affinities */ + IORING_REGISTER_IOWQ_AFF = 17, + IORING_UNREGISTER_IOWQ_AFF = 18, + + /* set/get max number of io-wq workers */ + IORING_REGISTER_IOWQ_MAX_WORKERS = 19, + /* this goes last */ IORING_REGISTER_LAST }; +/* io-wq worker categories */ +enum { + IO_WQ_BOUND, + IO_WQ_UNBOUND, +}; + +/* deprecated, see struct io_uring_rsrc_update */ struct io_uring_files_update { __u32 offset; __u32 resv; __aligned_u64 /* __s32 * */ fds; }; +struct io_uring_rsrc_register { + __u32 nr; + __u32 resv; + __u64 resv2; + __aligned_u64 data; + __aligned_u64 tags; +}; + +struct io_uring_rsrc_update { + __u32 offset; + __u32 resv; + __aligned_u64 data; +}; + +struct io_uring_rsrc_update2 { + __u32 offset; + __u32 resv; + __aligned_u64 data; + __aligned_u64 tags; + __u32 nr; + __u32 resv2; +}; + +/* Skip updating fd indexes set to this value in the fd table */ +#define IORING_REGISTER_FILES_SKIP (-2) + #define IO_URING_OP_SUPPORTED (1U << 0) struct io_uring_probe_op { @@ -329,4 +407,11 @@ enum { IORING_RESTRICTION_LAST }; +struct io_uring_getevents_arg { + __u64 sigmask; + __u32 sigmask_sz; + __u32 pad; + __u64 ts; +}; + #endif diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h index e42d13b55cf3..64beb32a2102 100644 --- a/include/uapi/linux/ip.h +++ b/include/uapi/linux/ip.h @@ -100,8 +100,14 @@ struct iphdr { __u8 ttl; __u8 protocol; __sum16 check; - __be32 saddr; - __be32 daddr; +#ifndef __GENKSYMS__ + __struct_group(/* no tag */, addrs, /* no attrs */, +#endif + __be32 saddr; + __be32 daddr; +#ifndef __GENKSYMS__ + ); +#endif /*The options start here. */ }; diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index 13e8751bf24a..bfe9542953af 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -130,8 +130,14 @@ struct ipv6hdr { __u8 nexthdr; __u8 hop_limit; - struct in6_addr saddr; - struct in6_addr daddr; +#ifndef __GENKSYMS__ + __struct_group(/* no tag */, addrs, /* no attrs */, +#endif + struct in6_addr saddr; + struct in6_addr daddr; +#ifndef __GENKSYMS__ + ); +#endif }; diff --git a/include/uapi/linux/netfilter/xt_IDLETIMER.h b/include/uapi/linux/netfilter/xt_IDLETIMER.h index 07ae4e189044..104ac3250edd 100644 --- a/include/uapi/linux/netfilter/xt_IDLETIMER.h +++ b/include/uapi/linux/netfilter/xt_IDLETIMER.h @@ -1,6 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* - * linux/include/linux/netfilter/xt_IDLETIMER.h - * * Header file for Xtables timer target module. * * Copyright (C) 2004, 2010 Nokia Corporation @@ -10,20 +9,6 @@ * by Luciano Coelho * * Contact: Luciano Coelho - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA */ #ifndef _XT_IDLETIMER_H diff --git a/include/uapi/linux/openat2.h b/include/uapi/linux/openat2.h index 58b1eb711360..a5feb7604948 100644 --- a/include/uapi/linux/openat2.h +++ b/include/uapi/linux/openat2.h @@ -35,5 +35,9 @@ struct open_how { #define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".." be scoped inside the dirfd (similar to chroot(2)). */ +#define RESOLVE_CACHED 0x20 /* Only complete if resolution can be + completed through cached lookup. May + return -EAGAIN if that's not + possible. */ #endif /* _UAPI_LINUX_OPENAT2_H */ diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 142b184eca8b..7e0d526dd96f 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -837,6 +837,13 @@ #define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */ #define PCI_EXT_CAP_PWR_SIZEOF 16 +/* Root Complex Event Collector Endpoint Association */ +#define PCI_RCEC_RCIEP_BITMAP 4 /* Associated Bitmap for RCiEPs */ +#define PCI_RCEC_BUSN 8 /* RCEC Associated Bus Numbers */ +#define PCI_RCEC_BUSN_REG_VER 0x02 /* Least version with BUSN present */ +#define PCI_RCEC_BUSN_NEXT(x) (((x) >> 8) & 0xff) +#define PCI_RCEC_BUSN_LAST(x) (((x) >> 16) & 0xff) + /* Vendor-Specific (VSEC, PCI_EXT_CAP_ID_VNDR) */ #define PCI_VNDR_HEADER 4 /* Vendor-Specific Header */ #define PCI_VNDR_HEADER_ID(x) ((x) & 0xffff) diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h index ee8220f8dcf5..c3725b492263 100644 --- a/include/uapi/linux/stddef.h +++ b/include/uapi/linux/stddef.h @@ -1,6 +1,31 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_STDDEF_H +#define _UAPI_LINUX_STDDEF_H + #include #ifndef __always_inline #define __always_inline inline #endif + +/** + * __struct_group() - Create a mirrored named and anonyomous struct + * + * @TAG: The tag name for the named sub-struct (usually empty) + * @NAME: The identifier name of the mirrored sub-struct + * @ATTRS: Any struct attributes (usually empty) + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical layout + * and size: one anonymous and one named. The former's members can be used + * normally without sub-struct naming, and the latter can be used to + * reason about the start, end, and size of the group of struct members. + * The named struct can also be explicitly tagged for layer reuse, as well + * as both having struct attributes appended. + */ +#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ + union { \ + struct { MEMBERS } ATTRS; \ + struct TAG { MEMBERS } ATTRS NAME; \ + } +#endif diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index d900af7e9c8d..514db71fb83d 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -1556,7 +1556,8 @@ struct v4l2_bt_timings { ((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt)) #define V4L2_DV_BT_BLANKING_HEIGHT(bt) \ ((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \ - (bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch) + ((bt)->interlaced ? \ + ((bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch) : 0)) #define V4L2_DV_BT_FRAME_HEIGHT(bt) \ ((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt)) diff --git a/include/video/of_display_timing.h b/include/video/of_display_timing.h index e1126a74882a..eff166fdd81b 100644 --- a/include/video/of_display_timing.h +++ b/include/video/of_display_timing.h @@ -8,6 +8,8 @@ #ifndef __LINUX_OF_DISPLAY_TIMING_H #define __LINUX_OF_DISPLAY_TIMING_H +#include + struct device_node; struct display_timing; struct display_timings; diff --git a/init/Kconfig b/init/Kconfig index 9b61c23b45de..2c92f9428121 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -68,6 +68,11 @@ config CC_HAS_ASM_GOTO_OUTPUT depends on CC_HAS_ASM_GOTO def_bool $(success,echo 'int foo(int x) { asm goto ("": "=r"(x) ::: bar); return x; bar: return 0; }' | $(CC) -x c - -c -o /dev/null) +config CC_HAS_ASM_GOTO_TIED_OUTPUT + depends on CC_HAS_ASM_GOTO_OUTPUT + # Detect buggy gcc and clang, fixed in gcc-11 clang-14. + def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null) + config TOOLS_SUPPORT_RELR def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh) @@ -126,6 +131,20 @@ config COMPILE_TEST here. If you are a user/distributor, say N here to exclude useless drivers to be distributed. +config WERROR + bool "Compile the kernel with warnings as errors" + default y + help + A kernel build should not cause any compiler warnings, and this + enables the '-Werror' flag to enforce that rule by default. + + However, if you have a new (or very old) compiler with odd and + unusual warnings, or you have some architecture with problems, + you may need to disable this config option in order to + successfully build the kernel. + + If in doubt, say Y. + config UAPI_HEADER_TEST bool "Compile test UAPI headers" depends on HEADERS_INSTALL && CC_CAN_LINK diff --git a/init/main.c b/init/main.c index a53604d17408..45ca3521898e 100644 --- a/init/main.c +++ b/init/main.c @@ -965,21 +965,18 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) softirq_init(); timekeeping_init(); kfence_init(); + time_init(); /* * For best initial stack canary entropy, prepare it after: * - setup_arch() for any UEFI RNG entropy and boot cmdline access - * - timekeeping_init() for ktime entropy used in rand_initialize() - * - rand_initialize() to get any arch-specific entropy like RDRAND - * - add_latent_entropy() to get any latent entropy - * - adding command line entropy + * - timekeeping_init() for ktime entropy used in random_init() + * - time_init() for making random_get_entropy() work on some platforms + * - random_init() to initialize the RNG from from early entropy sources */ - rand_initialize(); - add_latent_entropy(); - add_device_randomness(command_line, strlen(command_line)); + random_init(command_line); boot_init_stack_canary(); - time_init(); perf_event_init(); profile_init(); call_function_init(); @@ -1117,7 +1114,7 @@ static int __init initcall_blacklist(char *str) } } while (str_entry); - return 0; + return 1; } static bool __init_or_module initcall_blacklisted(initcall_t fn) @@ -1572,7 +1569,9 @@ static noinline void __init kernel_init_freeable(void); bool rodata_enabled __ro_after_init = true; static int __init set_debug_rodata(char *str) { - return strtobool(str, &rodata_enabled); + if (strtobool(str, &rodata_enabled)) + pr_warn("Invalid option string for rodata: '%s'\n", str); + return 1; } __setup("rodata=", set_debug_rodata); #endif diff --git a/io_uring/Makefile b/io_uring/Makefile new file mode 100644 index 000000000000..3680425df947 --- /dev/null +++ b/io_uring/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for io_uring + +obj-$(CONFIG_IO_URING) += io_uring.o +obj-$(CONFIG_IO_WQ) += io-wq.o diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c new file mode 100644 index 000000000000..6031fb319d87 --- /dev/null +++ b/io_uring/io-wq.c @@ -0,0 +1,1398 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Basic worker thread pool for io_uring + * + * Copyright (C) 2019 Jens Axboe + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "io-wq.h" + +#define WORKER_IDLE_TIMEOUT (5 * HZ) + +enum { + IO_WORKER_F_UP = 1, /* up and active */ + IO_WORKER_F_RUNNING = 2, /* account as running */ + IO_WORKER_F_FREE = 4, /* worker on free list */ + IO_WORKER_F_BOUND = 8, /* is doing bounded work */ +}; + +enum { + IO_WQ_BIT_EXIT = 0, /* wq exiting */ +}; + +enum { + IO_ACCT_STALLED_BIT = 0, /* stalled on hash */ +}; + +/* + * One for each thread in a wqe pool + */ +struct io_worker { + refcount_t ref; + unsigned flags; + struct hlist_nulls_node nulls_node; + struct list_head all_list; + struct task_struct *task; + struct io_wqe *wqe; + + struct io_wq_work *cur_work; + spinlock_t lock; + + struct completion ref_done; + + unsigned long create_state; + struct callback_head create_work; + int create_index; + + union { + struct rcu_head rcu; + struct work_struct work; + }; +}; + +#if BITS_PER_LONG == 64 +#define IO_WQ_HASH_ORDER 6 +#else +#define IO_WQ_HASH_ORDER 5 +#endif + +#define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER) + +struct io_wqe_acct { + unsigned nr_workers; + unsigned max_workers; + int index; + atomic_t nr_running; + struct io_wq_work_list work_list; + unsigned long flags; +}; + +enum { + IO_WQ_ACCT_BOUND, + IO_WQ_ACCT_UNBOUND, + IO_WQ_ACCT_NR, +}; + +/* + * Per-node worker thread pool + */ +struct io_wqe { + raw_spinlock_t lock; + struct io_wqe_acct acct[2]; + + int node; + + struct hlist_nulls_head free_list; + struct list_head all_list; + + struct wait_queue_entry wait; + + struct io_wq *wq; + struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS]; + + cpumask_var_t cpu_mask; +}; + +/* + * Per io_wq state + */ +struct io_wq { + unsigned long state; + + free_work_fn *free_work; + io_wq_work_fn *do_work; + + struct io_wq_hash *hash; + + atomic_t worker_refs; + struct completion worker_done; + + struct hlist_node cpuhp_node; + + struct task_struct *task; + + struct io_wqe *wqes[]; +}; + +static enum cpuhp_state io_wq_online; + +struct io_cb_cancel_data { + work_cancel_fn *fn; + void *data; + int nr_running; + int nr_pending; + bool cancel_all; +}; + +static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index); +static void io_wqe_dec_running(struct io_worker *worker); +static bool io_acct_cancel_pending_work(struct io_wqe *wqe, + struct io_wqe_acct *acct, + struct io_cb_cancel_data *match); +static void create_worker_cb(struct callback_head *cb); +static void io_wq_cancel_tw_create(struct io_wq *wq); + +static bool io_worker_get(struct io_worker *worker) +{ + return refcount_inc_not_zero(&worker->ref); +} + +static void io_worker_release(struct io_worker *worker) +{ + if (refcount_dec_and_test(&worker->ref)) + complete(&worker->ref_done); +} + +static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound) +{ + return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND]; +} + +static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe, + struct io_wq_work *work) +{ + return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND)); +} + +static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker) +{ + return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND); +} + +static void io_worker_ref_put(struct io_wq *wq) +{ + if (atomic_dec_and_test(&wq->worker_refs)) + complete(&wq->worker_done); +} + +static void io_worker_cancel_cb(struct io_worker *worker) +{ + struct io_wqe_acct *acct = io_wqe_get_acct(worker); + struct io_wqe *wqe = worker->wqe; + struct io_wq *wq = wqe->wq; + + atomic_dec(&acct->nr_running); + raw_spin_lock(&worker->wqe->lock); + acct->nr_workers--; + raw_spin_unlock(&worker->wqe->lock); + io_worker_ref_put(wq); + clear_bit_unlock(0, &worker->create_state); + io_worker_release(worker); +} + +static bool io_task_worker_match(struct callback_head *cb, void *data) +{ + struct io_worker *worker; + + if (cb->func != create_worker_cb) + return false; + worker = container_of(cb, struct io_worker, create_work); + return worker == data; +} + +static void io_worker_exit(struct io_worker *worker) +{ + struct io_wqe *wqe = worker->wqe; + struct io_wq *wq = wqe->wq; + + while (1) { + struct callback_head *cb = task_work_cancel_match(wq->task, + io_task_worker_match, worker); + + if (!cb) + break; + io_worker_cancel_cb(worker); + } + + if (refcount_dec_and_test(&worker->ref)) + complete(&worker->ref_done); + wait_for_completion(&worker->ref_done); + + raw_spin_lock(&wqe->lock); + if (worker->flags & IO_WORKER_F_FREE) + hlist_nulls_del_rcu(&worker->nulls_node); + list_del_rcu(&worker->all_list); + preempt_disable(); + io_wqe_dec_running(worker); + worker->flags = 0; + current->flags &= ~PF_IO_WORKER; + preempt_enable(); + raw_spin_unlock(&wqe->lock); + + kfree_rcu(worker, rcu); + io_worker_ref_put(wqe->wq); + do_exit(0); +} + +static inline bool io_acct_run_queue(struct io_wqe_acct *acct) +{ + if (!wq_list_empty(&acct->work_list) && + !test_bit(IO_ACCT_STALLED_BIT, &acct->flags)) + return true; + return false; +} + +/* + * Check head of free list for an available worker. If one isn't available, + * caller must create one. + */ +static bool io_wqe_activate_free_worker(struct io_wqe *wqe, + struct io_wqe_acct *acct) + __must_hold(RCU) +{ + struct hlist_nulls_node *n; + struct io_worker *worker; + + /* + * Iterate free_list and see if we can find an idle worker to + * activate. If a given worker is on the free_list but in the process + * of exiting, keep trying. + */ + hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) { + if (!io_worker_get(worker)) + continue; + if (io_wqe_get_acct(worker) != acct) { + io_worker_release(worker); + continue; + } + if (wake_up_process(worker->task)) { + io_worker_release(worker); + return true; + } + io_worker_release(worker); + } + + return false; +} + +/* + * We need a worker. If we find a free one, we're good. If not, and we're + * below the max number of workers, create one. + */ +static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) +{ + /* + * Most likely an attempt to queue unbounded work on an io_wq that + * wasn't setup with any unbounded workers. + */ + if (unlikely(!acct->max_workers)) + pr_warn_once("io-wq is not configured for unbound workers"); + + raw_spin_lock(&wqe->lock); + if (acct->nr_workers >= acct->max_workers) { + raw_spin_unlock(&wqe->lock); + return true; + } + acct->nr_workers++; + raw_spin_unlock(&wqe->lock); + atomic_inc(&acct->nr_running); + atomic_inc(&wqe->wq->worker_refs); + return create_io_worker(wqe->wq, wqe, acct->index); +} + +static void io_wqe_inc_running(struct io_worker *worker) +{ + struct io_wqe_acct *acct = io_wqe_get_acct(worker); + + atomic_inc(&acct->nr_running); +} + +static void create_worker_cb(struct callback_head *cb) +{ + struct io_worker *worker; + struct io_wq *wq; + struct io_wqe *wqe; + struct io_wqe_acct *acct; + bool do_create = false; + + worker = container_of(cb, struct io_worker, create_work); + wqe = worker->wqe; + wq = wqe->wq; + acct = &wqe->acct[worker->create_index]; + raw_spin_lock(&wqe->lock); + if (acct->nr_workers < acct->max_workers) { + acct->nr_workers++; + do_create = true; + } + raw_spin_unlock(&wqe->lock); + if (do_create) { + create_io_worker(wq, wqe, worker->create_index); + } else { + atomic_dec(&acct->nr_running); + io_worker_ref_put(wq); + } + clear_bit_unlock(0, &worker->create_state); + io_worker_release(worker); +} + +static bool io_queue_worker_create(struct io_worker *worker, + struct io_wqe_acct *acct, + task_work_func_t func) +{ + struct io_wqe *wqe = worker->wqe; + struct io_wq *wq = wqe->wq; + + /* raced with exit, just ignore create call */ + if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) + goto fail; + if (!io_worker_get(worker)) + goto fail; + /* + * create_state manages ownership of create_work/index. We should + * only need one entry per worker, as the worker going to sleep + * will trigger the condition, and waking will clear it once it + * runs the task_work. + */ + if (test_bit(0, &worker->create_state) || + test_and_set_bit_lock(0, &worker->create_state)) + goto fail_release; + + atomic_inc(&wq->worker_refs); + init_task_work(&worker->create_work, func); + worker->create_index = acct->index; + if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) { + /* + * EXIT may have been set after checking it above, check after + * adding the task_work and remove any creation item if it is + * now set. wq exit does that too, but we can have added this + * work item after we canceled in io_wq_exit_workers(). + */ + if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) + io_wq_cancel_tw_create(wq); + io_worker_ref_put(wq); + return true; + } + io_worker_ref_put(wq); + clear_bit_unlock(0, &worker->create_state); +fail_release: + io_worker_release(worker); +fail: + atomic_dec(&acct->nr_running); + io_worker_ref_put(wq); + return false; +} + +static void io_wqe_dec_running(struct io_worker *worker) + __must_hold(wqe->lock) +{ + struct io_wqe_acct *acct = io_wqe_get_acct(worker); + struct io_wqe *wqe = worker->wqe; + + if (!(worker->flags & IO_WORKER_F_UP)) + return; + + if (atomic_dec_and_test(&acct->nr_running) && io_acct_run_queue(acct)) { + atomic_inc(&acct->nr_running); + atomic_inc(&wqe->wq->worker_refs); + raw_spin_unlock(&wqe->lock); + io_queue_worker_create(worker, acct, create_worker_cb); + raw_spin_lock(&wqe->lock); + } +} + +/* + * Worker will start processing some work. Move it to the busy list, if + * it's currently on the freelist + */ +static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker, + struct io_wq_work *work) + __must_hold(wqe->lock) +{ + if (worker->flags & IO_WORKER_F_FREE) { + worker->flags &= ~IO_WORKER_F_FREE; + hlist_nulls_del_init_rcu(&worker->nulls_node); + } +} + +/* + * No work, worker going to sleep. Move to freelist, and unuse mm if we + * have one attached. Dropping the mm may potentially sleep, so we drop + * the lock in that case and return success. Since the caller has to + * retry the loop in that case (we changed task state), we don't regrab + * the lock if we return success. + */ +static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) + __must_hold(wqe->lock) +{ + if (!(worker->flags & IO_WORKER_F_FREE)) { + worker->flags |= IO_WORKER_F_FREE; + hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); + } +} + +static inline unsigned int io_get_work_hash(struct io_wq_work *work) +{ + return work->flags >> IO_WQ_HASH_SHIFT; +} + +static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash) +{ + struct io_wq *wq = wqe->wq; + bool ret = false; + + spin_lock_irq(&wq->hash->wait.lock); + if (list_empty(&wqe->wait.entry)) { + __add_wait_queue(&wq->hash->wait, &wqe->wait); + if (!test_bit(hash, &wq->hash->map)) { + __set_current_state(TASK_RUNNING); + list_del_init(&wqe->wait.entry); + ret = true; + } + } + spin_unlock_irq(&wq->hash->wait.lock); + return ret; +} + +static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct, + struct io_worker *worker) + __must_hold(wqe->lock) +{ + struct io_wq_work_node *node, *prev; + struct io_wq_work *work, *tail; + unsigned int stall_hash = -1U; + struct io_wqe *wqe = worker->wqe; + + wq_list_for_each(node, prev, &acct->work_list) { + unsigned int hash; + + work = container_of(node, struct io_wq_work, list); + + /* not hashed, can run anytime */ + if (!io_wq_is_hashed(work)) { + wq_list_del(&acct->work_list, node, prev); + return work; + } + + hash = io_get_work_hash(work); + /* all items with this hash lie in [work, tail] */ + tail = wqe->hash_tail[hash]; + + /* hashed, can run if not already running */ + if (!test_and_set_bit(hash, &wqe->wq->hash->map)) { + wqe->hash_tail[hash] = NULL; + wq_list_cut(&acct->work_list, &tail->list, prev); + return work; + } + if (stall_hash == -1U) + stall_hash = hash; + /* fast forward to a next hash, for-each will fix up @prev */ + node = &tail->list; + } + + if (stall_hash != -1U) { + bool unstalled; + + /* + * Set this before dropping the lock to avoid racing with new + * work being added and clearing the stalled bit. + */ + set_bit(IO_ACCT_STALLED_BIT, &acct->flags); + raw_spin_unlock(&wqe->lock); + unstalled = io_wait_on_hash(wqe, stall_hash); + raw_spin_lock(&wqe->lock); + if (unstalled) { + clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); + if (wq_has_sleeper(&wqe->wq->hash->wait)) + wake_up(&wqe->wq->hash->wait); + } + } + + return NULL; +} + +static bool io_flush_signals(void) +{ + if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) { + __set_current_state(TASK_RUNNING); + tracehook_notify_signal(); + return true; + } + return false; +} + +static void io_assign_current_work(struct io_worker *worker, + struct io_wq_work *work) +{ + if (work) { + io_flush_signals(); + cond_resched(); + } + + spin_lock(&worker->lock); + worker->cur_work = work; + spin_unlock(&worker->lock); +} + +static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work); + +static void io_worker_handle_work(struct io_worker *worker) + __releases(wqe->lock) +{ + struct io_wqe_acct *acct = io_wqe_get_acct(worker); + struct io_wqe *wqe = worker->wqe; + struct io_wq *wq = wqe->wq; + bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state); + + do { + struct io_wq_work *work; +get_next: + /* + * If we got some work, mark us as busy. If we didn't, but + * the list isn't empty, it means we stalled on hashed work. + * Mark us stalled so we don't keep looking for work when we + * can't make progress, any work completion or insertion will + * clear the stalled flag. + */ + work = io_get_next_work(acct, worker); + if (work) + __io_worker_busy(wqe, worker, work); + + raw_spin_unlock(&wqe->lock); + if (!work) + break; + io_assign_current_work(worker, work); + __set_current_state(TASK_RUNNING); + + /* handle a whole dependent link */ + do { + struct io_wq_work *next_hashed, *linked; + unsigned int hash = io_get_work_hash(work); + + next_hashed = wq_next_work(work); + + if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND)) + work->flags |= IO_WQ_WORK_CANCEL; + wq->do_work(work); + io_assign_current_work(worker, NULL); + + linked = wq->free_work(work); + work = next_hashed; + if (!work && linked && !io_wq_is_hashed(linked)) { + work = linked; + linked = NULL; + } + io_assign_current_work(worker, work); + if (linked) + io_wqe_enqueue(wqe, linked); + + if (hash != -1U && !next_hashed) { + /* serialize hash clear with wake_up() */ + spin_lock_irq(&wq->hash->wait.lock); + clear_bit(hash, &wq->hash->map); + clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); + spin_unlock_irq(&wq->hash->wait.lock); + if (wq_has_sleeper(&wq->hash->wait)) + wake_up(&wq->hash->wait); + raw_spin_lock(&wqe->lock); + /* skip unnecessary unlock-lock wqe->lock */ + if (!work) + goto get_next; + raw_spin_unlock(&wqe->lock); + } + } while (work); + + raw_spin_lock(&wqe->lock); + } while (1); +} + +static int io_wqe_worker(void *data) +{ + struct io_worker *worker = data; + struct io_wqe_acct *acct = io_wqe_get_acct(worker); + struct io_wqe *wqe = worker->wqe; + struct io_wq *wq = wqe->wq; + bool last_timeout = false; + char buf[TASK_COMM_LEN]; + + worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); + + snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid); + set_task_comm(current, buf); + + while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { + long ret; + + set_current_state(TASK_INTERRUPTIBLE); +loop: + raw_spin_lock(&wqe->lock); + if (io_acct_run_queue(acct)) { + io_worker_handle_work(worker); + goto loop; + } + /* timed out, exit unless we're the last worker */ + if (last_timeout && acct->nr_workers > 1) { + acct->nr_workers--; + raw_spin_unlock(&wqe->lock); + __set_current_state(TASK_RUNNING); + break; + } + last_timeout = false; + __io_worker_idle(wqe, worker); + raw_spin_unlock(&wqe->lock); + if (io_flush_signals()) + continue; + ret = schedule_timeout(WORKER_IDLE_TIMEOUT); + if (signal_pending(current)) { + struct ksignal ksig; + + if (!get_signal(&ksig)) + continue; + break; + } + last_timeout = !ret; + } + + if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) { + raw_spin_lock(&wqe->lock); + io_worker_handle_work(worker); + } + + io_worker_exit(worker); + return 0; +} + +/* + * Called when a worker is scheduled in. Mark us as currently running. + */ +void io_wq_worker_running(struct task_struct *tsk) +{ + struct io_worker *worker = tsk->pf_io_worker; + + if (!worker) + return; + if (!(worker->flags & IO_WORKER_F_UP)) + return; + if (worker->flags & IO_WORKER_F_RUNNING) + return; + worker->flags |= IO_WORKER_F_RUNNING; + io_wqe_inc_running(worker); +} + +/* + * Called when worker is going to sleep. If there are no workers currently + * running and we have work pending, wake up a free one or create a new one. + */ +void io_wq_worker_sleeping(struct task_struct *tsk) +{ + struct io_worker *worker = tsk->pf_io_worker; + + if (!worker) + return; + if (!(worker->flags & IO_WORKER_F_UP)) + return; + if (!(worker->flags & IO_WORKER_F_RUNNING)) + return; + + worker->flags &= ~IO_WORKER_F_RUNNING; + + raw_spin_lock(&worker->wqe->lock); + io_wqe_dec_running(worker); + raw_spin_unlock(&worker->wqe->lock); +} + +static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker, + struct task_struct *tsk) +{ + tsk->pf_io_worker = worker; + worker->task = tsk; + set_cpus_allowed_ptr(tsk, wqe->cpu_mask); + tsk->flags |= PF_NO_SETAFFINITY; + + raw_spin_lock(&wqe->lock); + hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); + list_add_tail_rcu(&worker->all_list, &wqe->all_list); + worker->flags |= IO_WORKER_F_FREE; + raw_spin_unlock(&wqe->lock); + wake_up_new_task(tsk); +} + +static bool io_wq_work_match_all(struct io_wq_work *work, void *data) +{ + return true; +} + +static inline bool io_should_retry_thread(long err) +{ + /* + * Prevent perpetual task_work retry, if the task (or its group) is + * exiting. + */ + if (fatal_signal_pending(current)) + return false; + + switch (err) { + case -EAGAIN: + case -ERESTARTSYS: + case -ERESTARTNOINTR: + case -ERESTARTNOHAND: + return true; + default: + return false; + } +} + +static void create_worker_cont(struct callback_head *cb) +{ + struct io_worker *worker; + struct task_struct *tsk; + struct io_wqe *wqe; + + worker = container_of(cb, struct io_worker, create_work); + clear_bit_unlock(0, &worker->create_state); + wqe = worker->wqe; + tsk = create_io_thread(io_wqe_worker, worker, wqe->node); + if (!IS_ERR(tsk)) { + io_init_new_worker(wqe, worker, tsk); + io_worker_release(worker); + return; + } else if (!io_should_retry_thread(PTR_ERR(tsk))) { + struct io_wqe_acct *acct = io_wqe_get_acct(worker); + + atomic_dec(&acct->nr_running); + raw_spin_lock(&wqe->lock); + acct->nr_workers--; + if (!acct->nr_workers) { + struct io_cb_cancel_data match = { + .fn = io_wq_work_match_all, + .cancel_all = true, + }; + + while (io_acct_cancel_pending_work(wqe, acct, &match)) + raw_spin_lock(&wqe->lock); + } + raw_spin_unlock(&wqe->lock); + io_worker_ref_put(wqe->wq); + kfree(worker); + return; + } + + /* re-create attempts grab a new worker ref, drop the existing one */ + io_worker_release(worker); + schedule_work(&worker->work); +} + +static void io_workqueue_create(struct work_struct *work) +{ + struct io_worker *worker = container_of(work, struct io_worker, work); + struct io_wqe_acct *acct = io_wqe_get_acct(worker); + + if (!io_queue_worker_create(worker, acct, create_worker_cont)) + kfree(worker); +} + +static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) +{ + struct io_wqe_acct *acct = &wqe->acct[index]; + struct io_worker *worker; + struct task_struct *tsk; + + __set_current_state(TASK_RUNNING); + + worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node); + if (!worker) { +fail: + atomic_dec(&acct->nr_running); + raw_spin_lock(&wqe->lock); + acct->nr_workers--; + raw_spin_unlock(&wqe->lock); + io_worker_ref_put(wq); + return false; + } + + refcount_set(&worker->ref, 1); + worker->wqe = wqe; + spin_lock_init(&worker->lock); + init_completion(&worker->ref_done); + + if (index == IO_WQ_ACCT_BOUND) + worker->flags |= IO_WORKER_F_BOUND; + + tsk = create_io_thread(io_wqe_worker, worker, wqe->node); + if (!IS_ERR(tsk)) { + io_init_new_worker(wqe, worker, tsk); + } else if (!io_should_retry_thread(PTR_ERR(tsk))) { + kfree(worker); + goto fail; + } else { + INIT_WORK(&worker->work, io_workqueue_create); + schedule_work(&worker->work); + } + + return true; +} + +/* + * Iterate the passed in list and call the specific function for each + * worker that isn't exiting + */ +static bool io_wq_for_each_worker(struct io_wqe *wqe, + bool (*func)(struct io_worker *, void *), + void *data) +{ + struct io_worker *worker; + bool ret = false; + + list_for_each_entry_rcu(worker, &wqe->all_list, all_list) { + if (io_worker_get(worker)) { + /* no task if node is/was offline */ + if (worker->task) + ret = func(worker, data); + io_worker_release(worker); + if (ret) + break; + } + } + + return ret; +} + +static bool io_wq_worker_wake(struct io_worker *worker, void *data) +{ + set_notify_signal(worker->task); + wake_up_process(worker->task); + return false; +} + +static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe) +{ + struct io_wq *wq = wqe->wq; + + do { + work->flags |= IO_WQ_WORK_CANCEL; + wq->do_work(work); + work = wq->free_work(work); + } while (work); +} + +static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work) +{ + struct io_wqe_acct *acct = io_work_get_acct(wqe, work); + unsigned int hash; + struct io_wq_work *tail; + + if (!io_wq_is_hashed(work)) { +append: + wq_list_add_tail(&work->list, &acct->work_list); + return; + } + + hash = io_get_work_hash(work); + tail = wqe->hash_tail[hash]; + wqe->hash_tail[hash] = work; + if (!tail) + goto append; + + wq_list_add_after(&work->list, &tail->list, &acct->work_list); +} + +static bool io_wq_work_match_item(struct io_wq_work *work, void *data) +{ + return work == data; +} + +static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) +{ + struct io_wqe_acct *acct = io_work_get_acct(wqe, work); + unsigned work_flags = work->flags; + bool do_create; + + /* + * If io-wq is exiting for this task, or if the request has explicitly + * been marked as one that should not get executed, cancel it here. + */ + if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) || + (work->flags & IO_WQ_WORK_CANCEL)) { + io_run_cancel(work, wqe); + return; + } + + raw_spin_lock(&wqe->lock); + io_wqe_insert_work(wqe, work); + clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); + + rcu_read_lock(); + do_create = !io_wqe_activate_free_worker(wqe, acct); + rcu_read_unlock(); + + raw_spin_unlock(&wqe->lock); + + if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) || + !atomic_read(&acct->nr_running))) { + bool did_create; + + did_create = io_wqe_create_worker(wqe, acct); + if (likely(did_create)) + return; + + raw_spin_lock(&wqe->lock); + /* fatal condition, failed to create the first worker */ + if (!acct->nr_workers) { + struct io_cb_cancel_data match = { + .fn = io_wq_work_match_item, + .data = work, + .cancel_all = false, + }; + + if (io_acct_cancel_pending_work(wqe, acct, &match)) + raw_spin_lock(&wqe->lock); + } + raw_spin_unlock(&wqe->lock); + } +} + +void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) +{ + struct io_wqe *wqe = wq->wqes[numa_node_id()]; + + io_wqe_enqueue(wqe, work); +} + +/* + * Work items that hash to the same value will not be done in parallel. + * Used to limit concurrent writes, generally hashed by inode. + */ +void io_wq_hash_work(struct io_wq_work *work, void *val) +{ + unsigned int bit; + + bit = hash_ptr(val, IO_WQ_HASH_ORDER); + work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT)); +} + +static bool io_wq_worker_cancel(struct io_worker *worker, void *data) +{ + struct io_cb_cancel_data *match = data; + + /* + * Hold the lock to avoid ->cur_work going out of scope, caller + * may dereference the passed in work. + */ + spin_lock(&worker->lock); + if (worker->cur_work && + match->fn(worker->cur_work, match->data)) { + set_notify_signal(worker->task); + match->nr_running++; + } + spin_unlock(&worker->lock); + + return match->nr_running && !match->cancel_all; +} + +static inline void io_wqe_remove_pending(struct io_wqe *wqe, + struct io_wq_work *work, + struct io_wq_work_node *prev) +{ + struct io_wqe_acct *acct = io_work_get_acct(wqe, work); + unsigned int hash = io_get_work_hash(work); + struct io_wq_work *prev_work = NULL; + + if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) { + if (prev) + prev_work = container_of(prev, struct io_wq_work, list); + if (prev_work && io_get_work_hash(prev_work) == hash) + wqe->hash_tail[hash] = prev_work; + else + wqe->hash_tail[hash] = NULL; + } + wq_list_del(&acct->work_list, &work->list, prev); +} + +static bool io_acct_cancel_pending_work(struct io_wqe *wqe, + struct io_wqe_acct *acct, + struct io_cb_cancel_data *match) + __releases(wqe->lock) +{ + struct io_wq_work_node *node, *prev; + struct io_wq_work *work; + + wq_list_for_each(node, prev, &acct->work_list) { + work = container_of(node, struct io_wq_work, list); + if (!match->fn(work, match->data)) + continue; + io_wqe_remove_pending(wqe, work, prev); + raw_spin_unlock(&wqe->lock); + io_run_cancel(work, wqe); + match->nr_pending++; + /* not safe to continue after unlock */ + return true; + } + + return false; +} + +static void io_wqe_cancel_pending_work(struct io_wqe *wqe, + struct io_cb_cancel_data *match) +{ + int i; +retry: + raw_spin_lock(&wqe->lock); + for (i = 0; i < IO_WQ_ACCT_NR; i++) { + struct io_wqe_acct *acct = io_get_acct(wqe, i == 0); + + if (io_acct_cancel_pending_work(wqe, acct, match)) { + if (match->cancel_all) + goto retry; + return; + } + } + raw_spin_unlock(&wqe->lock); +} + +static void io_wqe_cancel_running_work(struct io_wqe *wqe, + struct io_cb_cancel_data *match) +{ + rcu_read_lock(); + io_wq_for_each_worker(wqe, io_wq_worker_cancel, match); + rcu_read_unlock(); +} + +enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, + void *data, bool cancel_all) +{ + struct io_cb_cancel_data match = { + .fn = cancel, + .data = data, + .cancel_all = cancel_all, + }; + int node; + + /* + * First check pending list, if we're lucky we can just remove it + * from there. CANCEL_OK means that the work is returned as-new, + * no completion will be posted for it. + */ + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + + io_wqe_cancel_pending_work(wqe, &match); + if (match.nr_pending && !match.cancel_all) + return IO_WQ_CANCEL_OK; + } + + /* + * Now check if a free (going busy) or busy worker has the work + * currently running. If we find it there, we'll return CANCEL_RUNNING + * as an indication that we attempt to signal cancellation. The + * completion will run normally in this case. + */ + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + + io_wqe_cancel_running_work(wqe, &match); + if (match.nr_running && !match.cancel_all) + return IO_WQ_CANCEL_RUNNING; + } + + if (match.nr_running) + return IO_WQ_CANCEL_RUNNING; + if (match.nr_pending) + return IO_WQ_CANCEL_OK; + return IO_WQ_CANCEL_NOTFOUND; +} + +static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode, + int sync, void *key) +{ + struct io_wqe *wqe = container_of(wait, struct io_wqe, wait); + int i; + + list_del_init(&wait->entry); + + rcu_read_lock(); + for (i = 0; i < IO_WQ_ACCT_NR; i++) { + struct io_wqe_acct *acct = &wqe->acct[i]; + + if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags)) + io_wqe_activate_free_worker(wqe, acct); + } + rcu_read_unlock(); + return 1; +} + +struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) +{ + int ret, node, i; + struct io_wq *wq; + + if (WARN_ON_ONCE(!data->free_work || !data->do_work)) + return ERR_PTR(-EINVAL); + if (WARN_ON_ONCE(!bounded)) + return ERR_PTR(-EINVAL); + + wq = kzalloc(struct_size(wq, wqes, nr_node_ids), GFP_KERNEL); + if (!wq) + return ERR_PTR(-ENOMEM); + ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node); + if (ret) + goto err_wq; + + refcount_inc(&data->hash->refs); + wq->hash = data->hash; + wq->free_work = data->free_work; + wq->do_work = data->do_work; + + ret = -ENOMEM; + for_each_node(node) { + struct io_wqe *wqe; + int alloc_node = node; + + if (!node_online(alloc_node)) + alloc_node = NUMA_NO_NODE; + wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node); + if (!wqe) + goto err; + wq->wqes[node] = wqe; + if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL)) + goto err; + cpumask_copy(wqe->cpu_mask, cpumask_of_node(node)); + wqe->node = alloc_node; + wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; + wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = + task_rlimit(current, RLIMIT_NPROC); + INIT_LIST_HEAD(&wqe->wait.entry); + wqe->wait.func = io_wqe_hash_wake; + for (i = 0; i < IO_WQ_ACCT_NR; i++) { + struct io_wqe_acct *acct = &wqe->acct[i]; + + acct->index = i; + atomic_set(&acct->nr_running, 0); + INIT_WQ_LIST(&acct->work_list); + } + wqe->wq = wq; + raw_spin_lock_init(&wqe->lock); + INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0); + INIT_LIST_HEAD(&wqe->all_list); + } + + wq->task = get_task_struct(data->task); + atomic_set(&wq->worker_refs, 1); + init_completion(&wq->worker_done); + return wq; +err: + io_wq_put_hash(data->hash); + cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); + for_each_node(node) { + if (!wq->wqes[node]) + continue; + free_cpumask_var(wq->wqes[node]->cpu_mask); + kfree(wq->wqes[node]); + } +err_wq: + kfree(wq); + return ERR_PTR(ret); +} + +static bool io_task_work_match(struct callback_head *cb, void *data) +{ + struct io_worker *worker; + + if (cb->func != create_worker_cb && cb->func != create_worker_cont) + return false; + worker = container_of(cb, struct io_worker, create_work); + return worker->wqe->wq == data; +} + +void io_wq_exit_start(struct io_wq *wq) +{ + set_bit(IO_WQ_BIT_EXIT, &wq->state); +} + +static void io_wq_cancel_tw_create(struct io_wq *wq) +{ + struct callback_head *cb; + + while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) { + struct io_worker *worker; + + worker = container_of(cb, struct io_worker, create_work); + io_worker_cancel_cb(worker); + } +} + +static void io_wq_exit_workers(struct io_wq *wq) +{ + int node; + + if (!wq->task) + return; + + io_wq_cancel_tw_create(wq); + + rcu_read_lock(); + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + + io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL); + } + rcu_read_unlock(); + io_worker_ref_put(wq); + wait_for_completion(&wq->worker_done); + + for_each_node(node) { + spin_lock_irq(&wq->hash->wait.lock); + list_del_init(&wq->wqes[node]->wait.entry); + spin_unlock_irq(&wq->hash->wait.lock); + } + put_task_struct(wq->task); + wq->task = NULL; +} + +static void io_wq_destroy(struct io_wq *wq) +{ + int node; + + cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); + + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + struct io_cb_cancel_data match = { + .fn = io_wq_work_match_all, + .cancel_all = true, + }; + io_wqe_cancel_pending_work(wqe, &match); + free_cpumask_var(wqe->cpu_mask); + kfree(wqe); + } + io_wq_put_hash(wq->hash); + kfree(wq); +} + +void io_wq_put_and_exit(struct io_wq *wq) +{ + WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state)); + + io_wq_exit_workers(wq); + io_wq_destroy(wq); +} + +struct online_data { + unsigned int cpu; + bool online; +}; + +static bool io_wq_worker_affinity(struct io_worker *worker, void *data) +{ + struct online_data *od = data; + + if (od->online) + cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask); + else + cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask); + return false; +} + +static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online) +{ + struct online_data od = { + .cpu = cpu, + .online = online + }; + int i; + + rcu_read_lock(); + for_each_node(i) + io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, &od); + rcu_read_unlock(); + return 0; +} + +static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node) +{ + struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node); + + return __io_wq_cpu_online(wq, cpu, true); +} + +static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node) +{ + struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node); + + return __io_wq_cpu_online(wq, cpu, false); +} + +int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask) +{ + int i; + + rcu_read_lock(); + for_each_node(i) { + struct io_wqe *wqe = wq->wqes[i]; + + if (mask) + cpumask_copy(wqe->cpu_mask, mask); + else + cpumask_copy(wqe->cpu_mask, cpumask_of_node(i)); + } + rcu_read_unlock(); + return 0; +} + +/* + * Set max number of unbounded workers, returns old value. If new_count is 0, + * then just return the old value. + */ +int io_wq_max_workers(struct io_wq *wq, int *new_count) +{ + int prev[IO_WQ_ACCT_NR]; + bool first_node = true; + int i, node; + + BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND); + BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND); + BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2); + + for (i = 0; i < 2; i++) { + if (new_count[i] > task_rlimit(current, RLIMIT_NPROC)) + new_count[i] = task_rlimit(current, RLIMIT_NPROC); + } + + for (i = 0; i < IO_WQ_ACCT_NR; i++) + prev[i] = 0; + + rcu_read_lock(); + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; + struct io_wqe_acct *acct; + + raw_spin_lock(&wqe->lock); + for (i = 0; i < IO_WQ_ACCT_NR; i++) { + acct = &wqe->acct[i]; + if (first_node) + prev[i] = max_t(int, acct->max_workers, prev[i]); + if (new_count[i]) + acct->max_workers = new_count[i]; + } + raw_spin_unlock(&wqe->lock); + first_node = false; + } + rcu_read_unlock(); + + for (i = 0; i < IO_WQ_ACCT_NR; i++) + new_count[i] = prev[i]; + + return 0; +} + +static __init int io_wq_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online", + io_wq_cpu_online, io_wq_cpu_offline); + if (ret < 0) + return ret; + io_wq_online = ret; + return 0; +} +subsys_initcall(io_wq_init); diff --git a/fs/io-wq.h b/io_uring/io-wq.h similarity index 76% rename from fs/io-wq.h rename to io_uring/io-wq.h index 75113bcd5889..300a695d191d 100644 --- a/fs/io-wq.h +++ b/io_uring/io-wq.h @@ -1,7 +1,18 @@ #ifndef INTERNAL_IO_WQ_H #define INTERNAL_IO_WQ_H +#ifdef __GENKSYMS__ +/* + * ANDROID ABI HACK + * + * See the big comment in the linux/io_uring.h file for details. This + * include is not needed for any real functionality, but must be here to + * preserve the CRC of a number of variables and functions. + */ #include +#endif + +#include struct io_wq; @@ -9,16 +20,8 @@ enum { IO_WQ_WORK_CANCEL = 1, IO_WQ_WORK_HASHED = 2, IO_WQ_WORK_UNBOUND = 4, - IO_WQ_WORK_NO_CANCEL = 8, IO_WQ_WORK_CONCURRENT = 16, - IO_WQ_WORK_FILES = 32, - IO_WQ_WORK_FS = 64, - IO_WQ_WORK_MM = 128, - IO_WQ_WORK_CREDS = 256, - IO_WQ_WORK_BLKCG = 512, - IO_WQ_WORK_FSIZE = 1024, - IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ }; @@ -52,6 +55,7 @@ static inline void wq_list_add_after(struct io_wq_work_node *node, static inline void wq_list_add_tail(struct io_wq_work_node *node, struct io_wq_work_list *list) { + node->next = NULL; if (!list->first) { list->last = node; WRITE_ONCE(list->first, node); @@ -59,7 +63,6 @@ static inline void wq_list_add_tail(struct io_wq_work_node *node, list->last->next = node; list->last = node; } - node->next = NULL; } static inline void wq_list_cut(struct io_wq_work_list *list, @@ -95,7 +98,6 @@ static inline void wq_list_del(struct io_wq_work_list *list, struct io_wq_work { struct io_wq_work_node list; - struct io_identity *identity; unsigned flags; }; @@ -107,37 +109,48 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) return container_of(work->list.next, struct io_wq_work, list); } -typedef void (free_work_fn)(struct io_wq_work *); -typedef struct io_wq_work *(io_wq_work_fn)(struct io_wq_work *); +typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *); +typedef void (io_wq_work_fn)(struct io_wq_work *); + +struct io_wq_hash { + refcount_t refs; + unsigned long map; + struct wait_queue_head wait; +}; + +static inline void io_wq_put_hash(struct io_wq_hash *hash) +{ + if (refcount_dec_and_test(&hash->refs)) + kfree(hash); +} struct io_wq_data { - struct user_struct *user; - + struct io_wq_hash *hash; + struct task_struct *task; io_wq_work_fn *do_work; free_work_fn *free_work; }; struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); -bool io_wq_get(struct io_wq *wq, struct io_wq_data *data); -void io_wq_destroy(struct io_wq *wq); +void io_wq_exit_start(struct io_wq *wq); +void io_wq_put_and_exit(struct io_wq *wq); void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); void io_wq_hash_work(struct io_wq_work *work, void *val); +int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask); +int io_wq_max_workers(struct io_wq *wq, int *new_count); + static inline bool io_wq_is_hashed(struct io_wq_work *work) { return work->flags & IO_WQ_WORK_HASHED; } -void io_wq_cancel_all(struct io_wq *wq); - typedef bool (work_cancel_fn)(struct io_wq_work *, void *); enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, void *data, bool cancel_all); -struct task_struct *io_wq_get_task(struct io_wq *wq); - #if defined(CONFIG_IO_WQ) extern void io_wq_worker_sleeping(struct task_struct *); extern void io_wq_worker_running(struct task_struct *); @@ -152,6 +165,7 @@ static inline void io_wq_worker_running(struct task_struct *tsk) static inline bool io_wq_current_is_worker(void) { - return in_task() && (current->flags & PF_IO_WORKER); + return in_task() && (current->flags & PF_IO_WORKER) && + current->pf_io_worker; } #endif diff --git a/fs/io_uring.c b/io_uring/io_uring.c similarity index 51% rename from fs/io_uring.c rename to io_uring/io_uring.c index b9e9046777db..35a9bcd5648b 100644 --- a/fs/io_uring.c +++ b/io_uring/io_uring.c @@ -11,7 +11,7 @@ * before writing the tail (using smp_load_acquire to read the tail will * do). It also needs a smp_mb() before updating CQ head (ordering the * entry load(s) with the head store), pairing with an implicit barrier - * through a control-dependency in io_get_cqring (smp_store_release to + * through a control-dependency in io_get_cqe (smp_store_release to * store head will do). Failure to do so could lead to reading invalid * CQ entries. * @@ -57,7 +57,6 @@ #include #include #include -#include #include #include #include @@ -75,35 +74,43 @@ #include #include #include -#include #include #include #include #include -#include -#include +#include #define CREATE_TRACE_POINTS #include #include -#include "internal.h" +#include "../fs/internal.h" #include "io-wq.h" #define IORING_MAX_ENTRIES 32768 #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) +#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8 -/* - * Shift of 9 is 512 entries, or exactly one page on 64-bit archs - */ -#define IORING_FILE_TABLE_SHIFT 9 -#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT) -#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1) -#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE) +/* only define max */ +#define IORING_MAX_FIXED_FILES (1U << 15) #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \ IORING_REGISTER_LAST + IORING_OP_LAST) +#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3) +#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT) +#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1) + +#define IORING_MAX_REG_BUFFERS (1U << 14) + +#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \ + IOSQE_IO_HARDLINK | IOSQE_ASYNC | \ + IOSQE_BUFFER_SELECT) +#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \ + REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS) + +#define IO_TCTX_REFS_CACHE_NR (1U << 10) + struct io_uring { u32 head ____cacheline_aligned_in_smp; u32 tail ____cacheline_aligned_in_smp; @@ -162,7 +169,7 @@ struct io_rings { * Written by the application, shouldn't be modified by the * kernel. */ - u32 cq_flags; + u32 cq_flags; /* * Number of completion events lost because the queue was full; * this should be avoided by the application by making sure @@ -187,36 +194,65 @@ struct io_rings { struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; }; +enum io_uring_cmd_flags { + IO_URING_F_NONBLOCK = 1, + IO_URING_F_COMPLETE_DEFER = 2, +}; + struct io_mapped_ubuf { u64 ubuf; - size_t len; - struct bio_vec *bvec; + u64 ubuf_end; unsigned int nr_bvecs; unsigned long acct_pages; + struct bio_vec bvec[]; }; -struct fixed_file_table { - struct file **files; +struct io_ring_ctx; + +struct io_overflow_cqe { + struct io_uring_cqe cqe; + struct list_head list; }; -struct fixed_file_ref_node { +struct io_fixed_file { + /* file * with additional FFS_* flags */ + unsigned long file_ptr; +}; + +struct io_rsrc_put { + struct list_head list; + u64 tag; + union { + void *rsrc; + struct file *file; + struct io_mapped_ubuf *buf; + }; +}; + +struct io_file_table { + struct io_fixed_file *files; +}; + +struct io_rsrc_node { struct percpu_ref refs; struct list_head node; - struct list_head file_list; - struct fixed_file_data *file_data; + struct list_head rsrc_list; + struct io_rsrc_data *rsrc_data; struct llist_node llist; bool done; }; -struct fixed_file_data { - struct fixed_file_table *table; +typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc); + +struct io_rsrc_data { struct io_ring_ctx *ctx; - struct fixed_file_ref_node *node; - struct percpu_ref refs; + u64 **tags; + unsigned int nr; + rsrc_put_fn *do_put; + atomic_t refs; struct completion done; - struct list_head ref_list; - spinlock_t lock; + bool quiesce; }; struct io_buffer { @@ -234,33 +270,81 @@ struct io_restriction { bool registered; }; +enum { + IO_SQ_THREAD_SHOULD_STOP = 0, + IO_SQ_THREAD_SHOULD_PARK, +}; + struct io_sq_data { refcount_t refs; + atomic_t park_pending; struct mutex lock; /* ctx's that are using this sqd */ struct list_head ctx_list; - struct list_head ctx_new_list; - struct mutex ctx_lock; struct task_struct *thread; struct wait_queue_head wait; + + unsigned sq_thread_idle; + int sq_cpu; + pid_t task_pid; + pid_t task_tgid; + + unsigned long state; + struct completion exited; +}; + +#define IO_COMPL_BATCH 32 +#define IO_REQ_CACHE_SIZE 32 +#define IO_REQ_ALLOC_BATCH 8 + +struct io_submit_link { + struct io_kiocb *head; + struct io_kiocb *last; +}; + +struct io_submit_state { + struct blk_plug plug; + struct io_submit_link link; + + /* + * io_kiocb alloc cache + */ + void *reqs[IO_REQ_CACHE_SIZE]; + unsigned int free_reqs; + + bool plug_started; + + /* + * Batch completion logic + */ + struct io_kiocb *compl_reqs[IO_COMPL_BATCH]; + unsigned int compl_nr; + /* inline/task_work completion list, under ->uring_lock */ + struct list_head free_list; + + unsigned int ios_left; }; struct io_ring_ctx { + /* const or read-mostly hot data */ struct { struct percpu_ref refs; - } ____cacheline_aligned_in_smp; - struct { + struct io_rings *rings; unsigned int flags; unsigned int compat: 1; - unsigned int limit_mem: 1; - unsigned int cq_overflow_flushed: 1; unsigned int drain_next: 1; unsigned int eventfd_async: 1; unsigned int restricted: 1; - unsigned int sqo_dead: 1; + unsigned int off_timeout_used: 1; + unsigned int drain_active: 1; + } ____cacheline_aligned_in_smp; + + /* submission data */ + struct { + struct mutex uring_lock; /* * Ring buffer of indices into array of io_uring_sqe, which is @@ -274,101 +358,59 @@ struct io_ring_ctx { * array. */ u32 *sq_array; + struct io_uring_sqe *sq_sqes; unsigned cached_sq_head; unsigned sq_entries; - unsigned sq_mask; - unsigned sq_thread_idle; - unsigned cached_sq_dropped; - unsigned cached_cq_overflow; - unsigned long sq_check_overflow; - struct list_head defer_list; - struct list_head timeout_list; - struct list_head cq_overflow_list; - struct io_uring_sqe *sq_sqes; + /* + * Fixed resources fast path, should be accessed only under + * uring_lock, and updated through io_uring_register(2) + */ + struct io_rsrc_node *rsrc_node; + struct io_file_table file_table; + unsigned nr_user_files; + unsigned nr_user_bufs; + struct io_mapped_ubuf **user_bufs; + + struct io_submit_state submit_state; + struct list_head timeout_list; + struct list_head ltimeout_list; + struct list_head cq_overflow_list; + struct xarray io_buffers; + struct xarray personalities; + u32 pers_next; + unsigned sq_thread_idle; } ____cacheline_aligned_in_smp; - struct io_rings *rings; - - /* IO offload */ - struct io_wq *io_wq; - - /* - * For SQPOLL usage - we hold a reference to the parent task, so we - * have access to the ->files - */ - struct task_struct *sqo_task; - - /* Only used for accounting purposes */ - struct mm_struct *mm_account; - -#ifdef CONFIG_BLK_CGROUP - struct cgroup_subsys_state *sqo_blkcg_css; -#endif + /* IRQ completion list, under ->completion_lock */ + struct list_head locked_free_list; + unsigned int locked_free_nr; + const struct cred *sq_creds; /* cred used for __io_sq_thread() */ struct io_sq_data *sq_data; /* if using sq thread polling */ struct wait_queue_head sqo_sq_wait; - struct wait_queue_entry sqo_wait_entry; struct list_head sqd_list; - /* - * If used, fixed file set. Writers must ensure that ->refs is dead, - * readers must ensure that ->refs is alive as long as the file* is - * used. Only updated through io_uring_register(2). - */ - struct fixed_file_data *file_data; - unsigned nr_user_files; - - /* if used, fixed mapped user buffers */ - unsigned nr_user_bufs; - struct io_mapped_ubuf *user_bufs; - - struct user_struct *user; - - const struct cred *creds; - -#ifdef CONFIG_AUDIT - kuid_t loginuid; - unsigned int sessionid; -#endif - - struct completion ref_comp; - struct completion sq_thread_comp; - - /* if all else fails... */ - struct io_kiocb *fallback_req; - -#if defined(CONFIG_UNIX) - struct socket *ring_sock; -#endif - - struct xarray io_buffers; - - struct xarray personalities; - u32 pers_next; + unsigned long check_cq_overflow; struct { unsigned cached_cq_tail; unsigned cq_entries; - unsigned cq_mask; + struct eventfd_ctx *cq_ev_fd; + struct wait_queue_head poll_wait; + struct wait_queue_head cq_wait; + unsigned cq_extra; atomic_t cq_timeouts; unsigned cq_last_tm_flush; - unsigned long cq_check_overflow; - struct wait_queue_head cq_wait; - struct fasync_struct *cq_fasync; - struct eventfd_ctx *cq_ev_fd; - } ____cacheline_aligned_in_smp; - - struct { - struct mutex uring_lock; - wait_queue_head_t wait; } ____cacheline_aligned_in_smp; struct { spinlock_t completion_lock; + spinlock_t timeout_lock; + /* * ->iopoll_list is protected by the ctx->uring_lock for * io_uring instances that don't use IORING_SETUP_SQPOLL. @@ -378,39 +420,98 @@ struct io_ring_ctx { struct list_head iopoll_list; struct hlist_head *cancel_hash; unsigned cancel_hash_bits; - bool poll_multi_file; - - spinlock_t inflight_lock; - struct list_head inflight_list; + bool poll_multi_queue; } ____cacheline_aligned_in_smp; - struct delayed_work file_put_work; - struct llist_head file_put_llist; - - struct work_struct exit_work; struct io_restriction restrictions; + + /* slow path rsrc auxilary data, used by update/register */ + struct { + struct io_rsrc_node *rsrc_backup_node; + struct io_mapped_ubuf *dummy_ubuf; + struct io_rsrc_data *file_data; + struct io_rsrc_data *buf_data; + + struct delayed_work rsrc_put_work; + struct llist_head rsrc_put_llist; + struct list_head rsrc_ref_list; + spinlock_t rsrc_ref_lock; + }; + + /* Keep this last, we don't need it for the fast path */ + struct { + #if defined(CONFIG_UNIX) + struct socket *ring_sock; + #endif + /* hashed buffered write serialization */ + struct io_wq_hash *hash_map; + + /* Only used for accounting purposes */ + struct user_struct *user; + struct mm_struct *mm_account; + + /* ctx exit and cancelation */ + struct llist_head fallback_llist; + struct delayed_work fallback_work; + struct work_struct exit_work; + struct list_head tctx_list; + struct completion ref_comp; + u32 iowq_limits[2]; + bool iowq_limits_set; + }; }; +#ifndef __GENKSYMS__ +/* + * ANDROID ABI HACK + * + * See the big comment in the linux/io_uring.h file for details. This + * structure definition should NOT be used if __GENKSYMS__ is enabled, + * as a "fake" structure definition has already been read in the + * linux/io_uring.h file in order to preserve the Android kernel ABI. + */ +struct io_uring_task { + /* submission side */ + int cached_refs; + struct xarray xa; + struct wait_queue_head wait; + const struct io_ring_ctx *last; + struct io_wq *io_wq; + struct percpu_counter inflight; + atomic_t inflight_tracked; + atomic_t in_idle; + + spinlock_t task_lock; + struct io_wq_work_list task_list; + struct callback_head task_work; + bool task_running; +}; +#endif + /* * First field must be the file pointer in all the * iocb unions! See also 'struct kiocb' in */ struct io_poll_iocb { struct file *file; - union { - struct wait_queue_head *head; - u64 addr; - }; + struct wait_queue_head *head; __poll_t events; - bool done; - bool canceled; struct wait_queue_entry wait; }; +struct io_poll_update { + struct file *file; + u64 old_user_data; + u64 new_user_data; + __poll_t events; + bool update_events; + bool update_user_data; +}; + struct io_close { struct file *file; - struct file *put_file; int fd; + u32 file_slot; }; struct io_timeout_data { @@ -418,6 +519,7 @@ struct io_timeout_data { struct hrtimer timer; struct timespec64 ts; enum hrtimer_mode mode; + u32 flags; }; struct io_accept { @@ -425,6 +527,7 @@ struct io_accept { struct sockaddr __user *addr; int __user *addr_len; int flags; + u32 file_slot; unsigned long nofile; }; @@ -446,11 +549,20 @@ struct io_timeout { u32 off; u32 target_seq; struct list_head list; + /* head of the link, used by linked timeouts only */ + struct io_kiocb *head; + /* for linked completions */ + struct io_kiocb *prev; }; struct io_timeout_rem { struct file *file; u64 addr; + + /* timeout update */ + struct timespec64 ts; + u32 flags; + bool ltimeout; }; struct io_rw { @@ -469,8 +581,9 @@ struct io_connect { struct io_sr_msg { struct file *file; union { - struct user_msghdr __user *umsg; - void __user *buf; + struct compat_msghdr __user *umsg_compat; + struct user_msghdr __user *umsg; + void __user *buf; }; int msg_flags; int bgid; @@ -481,13 +594,13 @@ struct io_sr_msg { struct io_open { struct file *file; int dfd; - bool ignore_nonblock; + u32 file_slot; struct filename *filename; struct open_how how; unsigned long nofile; }; -struct io_files_update { +struct io_rsrc_update { struct file *file; u64 arg; u32 nr_args; @@ -518,10 +631,10 @@ struct io_epoll { struct io_splice { struct file *file_out; - struct file *file_in; loff_t off_out; loff_t off_in; u64 len; + int splice_fd_in; unsigned int flags; }; @@ -543,9 +656,52 @@ struct io_statx { struct statx __user *buffer; }; +struct io_shutdown { + struct file *file; + int how; +}; + +struct io_rename { + struct file *file; + int old_dfd; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; + int flags; +}; + +struct io_unlink { + struct file *file; + int dfd; + int flags; + struct filename *filename; +}; + +struct io_mkdir { + struct file *file; + int dfd; + umode_t mode; + struct filename *filename; +}; + +struct io_symlink { + struct file *file; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; +}; + +struct io_hardlink { + struct file *file; + int old_dfd; + int new_dfd; + struct filename *oldpath; + struct filename *newpath; + int flags; +}; + struct io_completion { struct file *file; - struct list_head list; u32 cflags; }; @@ -555,7 +711,8 @@ struct io_async_connect { struct io_async_msghdr { struct iovec fast_iov[UIO_FASTIOV]; - struct iovec *iov; + /* points to an allocated iov, if NULL we use fast_iov instead */ + struct iovec *free_iov; struct sockaddr __user *uaddr; struct msghdr msg; struct sockaddr_storage addr; @@ -565,6 +722,7 @@ struct io_async_rw { struct iovec fast_iov[UIO_FASTIOV]; const struct iovec *free_iovec; struct iov_iter iter; + struct iov_iter_state iter_state; size_t bytes_done; struct wait_page_queue wpq; }; @@ -577,19 +735,24 @@ enum { REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT, REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT, - REQ_F_LINK_HEAD_BIT, - REQ_F_FAIL_LINK_BIT, + /* first byte is taken by user flags, shift it to not overlap */ + REQ_F_FAIL_BIT = 8, REQ_F_INFLIGHT_BIT, REQ_F_CUR_POS_BIT, REQ_F_NOWAIT_BIT, REQ_F_LINK_TIMEOUT_BIT, - REQ_F_ISREG_BIT, REQ_F_NEED_CLEANUP_BIT, REQ_F_POLLED_BIT, REQ_F_BUFFER_SELECTED_BIT, - REQ_F_NO_FILE_TABLE_BIT, - REQ_F_WORK_INITIALIZED_BIT, - REQ_F_LTIMEOUT_ACTIVE_BIT, + REQ_F_COMPLETE_INLINE_BIT, + REQ_F_REISSUE_BIT, + REQ_F_CREDS_BIT, + REQ_F_REFCOUNT_BIT, + REQ_F_ARM_LTIMEOUT_BIT, + /* keep async read/write and isreg together and in order */ + REQ_F_NOWAIT_READ_BIT, + REQ_F_NOWAIT_WRITE_BIT, + REQ_F_ISREG_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -609,11 +772,9 @@ enum { /* IOSQE_BUFFER_SELECT */ REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT), - /* head of a link */ - REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT), /* fail rest of links */ - REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT), - /* on inflight list */ + REQ_F_FAIL = BIT(REQ_F_FAIL_BIT), + /* on inflight list, should be cancelled and waited on exit reliably */ REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT), /* read/write uses file position */ REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT), @@ -621,20 +782,28 @@ enum { REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT), /* has or had linked timeout */ REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT), - /* regular file */ - REQ_F_ISREG = BIT(REQ_F_ISREG_BIT), /* needs cleanup */ REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT), /* already went through poll handler */ REQ_F_POLLED = BIT(REQ_F_POLLED_BIT), /* buffer already selected */ REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), - /* doesn't need file table for this request */ - REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT), - /* io_wq_work is initialized */ - REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT), - /* linked timeout is active, i.e. prepared by link's head */ - REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT), + /* completion is deferred through io_comp_state */ + REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT), + /* caller should reissue async */ + REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), + /* supports async reads */ + REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT), + /* supports async writes */ + REQ_F_NOWAIT_WRITE = BIT(REQ_F_NOWAIT_WRITE_BIT), + /* regular file */ + REQ_F_ISREG = BIT(REQ_F_ISREG_BIT), + /* has creds assigned */ + REQ_F_CREDS = BIT(REQ_F_CREDS_BIT), + /* skip refcounting if not set */ + REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT), + /* there is a linked timeout that has to be armed */ + REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT), }; struct async_poll { @@ -642,6 +811,21 @@ struct async_poll { struct io_poll_iocb *double_poll; }; +typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked); + +struct io_task_work { + union { + struct io_wq_work_node node; + struct llist_node fallback_node; + }; + io_req_tw_func_t func; +}; + +enum { + IORING_RSRC_FILE = 0, + IORING_RSRC_BUFFER = 1, +}; + /* * NOTE! Each of the iocb union members has the file pointer * as the first entry in their struct definition. So you can @@ -653,6 +837,7 @@ struct io_kiocb { struct file *file; struct io_rw rw; struct io_poll_iocb poll; + struct io_poll_update poll_update; struct io_accept accept; struct io_sync sync; struct io_cancel cancel; @@ -662,13 +847,19 @@ struct io_kiocb { struct io_sr_msg sr_msg; struct io_open open; struct io_close close; - struct io_files_update files_update; + struct io_rsrc_update rsrc_update; struct io_fadvise fadvise; struct io_madvise madvise; struct io_epoll epoll; struct io_splice splice; struct io_provide_buf pbuf; struct io_statx statx; + struct io_shutdown shutdown; + struct io_rename rename; + struct io_unlink unlink; + struct io_mkdir mkdir; + struct io_symlink symlink; + struct io_hardlink hardlink; /* use only after cleaning per-op data, see io_clean_op() */ struct io_completion compl; }; @@ -684,24 +875,33 @@ struct io_kiocb { struct io_ring_ctx *ctx; unsigned int flags; - refcount_t refs; + atomic_t refs; struct task_struct *task; u64 user_data; - struct list_head link_list; + struct io_kiocb *link; + struct percpu_ref *fixed_rsrc_refs; - /* - * 1. used with ctx->iopoll_list with reads/writes - * 2. to track reqs with ->files (see io_op_def::file_table) - */ + /* used with ctx->iopoll_list with reads/writes */ struct list_head inflight_entry; - - struct percpu_ref *fixed_file_refs; - struct callback_head task_work; + struct io_task_work io_task_work; /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ struct hlist_node hash_node; struct async_poll *apoll; struct io_wq_work work; + const struct cred *creds; + + /* store used ubuf, so we can prevent reloading */ + struct io_mapped_ubuf *imu; + /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ + struct io_buffer *kbuf; + atomic_t poll_refs; +}; + +struct io_tctx_node { + struct list_head ctx_node; + struct task_struct *task; + struct io_ring_ctx *ctx; }; struct io_defer_entry { @@ -710,42 +910,9 @@ struct io_defer_entry { u32 seq; }; -#define IO_IOPOLL_BATCH 8 - -struct io_comp_state { - unsigned int nr; - struct list_head list; - struct io_ring_ctx *ctx; -}; - -struct io_submit_state { - struct blk_plug plug; - - /* - * io_kiocb alloc cache - */ - void *reqs[IO_IOPOLL_BATCH]; - unsigned int free_reqs; - - /* - * Batch completion logic - */ - struct io_comp_state comp; - - /* - * File reference cache - */ - struct file *file; - unsigned int fd; - unsigned int has_refs; - unsigned int ios_left; -}; - struct io_op_def { /* needs req->file assigned */ unsigned needs_file : 1; - /* don't fail if file grab fails */ - unsigned needs_file_no_error : 1; /* hash wq insertion if file is a regular file */ unsigned hash_reg_file : 1; /* unbound wq insertion if file is a non-regular file */ @@ -757,11 +924,12 @@ struct io_op_def { unsigned pollout : 1; /* op supports buffer selection */ unsigned buffer_select : 1; - /* must always have async data allocated */ - unsigned needs_async_data : 1; + /* do prep async if is going to be punted */ + unsigned needs_async_setup : 1; + /* should block plug */ + unsigned plug : 1; /* size of async data needed, if any */ unsigned short async_size; - unsigned work_flags; }; static const struct io_op_def io_op_defs[] = { @@ -771,39 +939,36 @@ static const struct io_op_def io_op_defs[] = { .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, - .needs_async_data = 1, + .needs_async_setup = 1, + .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG, }, [IORING_OP_WRITEV] = { .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, .pollout = 1, - .needs_async_data = 1, + .needs_async_setup = 1, + .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FSIZE, }, [IORING_OP_FSYNC] = { .needs_file = 1, - .work_flags = IO_WQ_WORK_BLKCG, }, [IORING_OP_READ_FIXED] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, + .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM, }, [IORING_OP_WRITE_FIXED] = { .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, .pollout = 1, + .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE | - IO_WQ_WORK_MM, }, [IORING_OP_POLL_ADD] = { .needs_file = 1, @@ -812,123 +977,91 @@ static const struct io_op_def io_op_defs[] = { [IORING_OP_POLL_REMOVE] = {}, [IORING_OP_SYNC_FILE_RANGE] = { .needs_file = 1, - .work_flags = IO_WQ_WORK_BLKCG, }, [IORING_OP_SENDMSG] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollout = 1, - .needs_async_data = 1, + .needs_async_setup = 1, .async_size = sizeof(struct io_async_msghdr), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FS, }, [IORING_OP_RECVMSG] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, - .needs_async_data = 1, + .needs_async_setup = 1, .async_size = sizeof(struct io_async_msghdr), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FS, }, [IORING_OP_TIMEOUT] = { - .needs_async_data = 1, .async_size = sizeof(struct io_timeout_data), - .work_flags = IO_WQ_WORK_MM, }, - [IORING_OP_TIMEOUT_REMOVE] = {}, + [IORING_OP_TIMEOUT_REMOVE] = { + /* used by timeout updates' prep() */ + }, [IORING_OP_ACCEPT] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES, }, [IORING_OP_ASYNC_CANCEL] = {}, [IORING_OP_LINK_TIMEOUT] = { - .needs_async_data = 1, .async_size = sizeof(struct io_timeout_data), - .work_flags = IO_WQ_WORK_MM, }, [IORING_OP_CONNECT] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollout = 1, - .needs_async_data = 1, + .needs_async_setup = 1, .async_size = sizeof(struct io_async_connect), - .work_flags = IO_WQ_WORK_MM, }, [IORING_OP_FALLOCATE] = { .needs_file = 1, - .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE, - }, - [IORING_OP_OPENAT] = { - .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FS, - }, - [IORING_OP_CLOSE] = { - .needs_file = 1, - .needs_file_no_error = 1, - .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG, - }, - [IORING_OP_FILES_UPDATE] = { - .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM, - }, - [IORING_OP_STATX] = { - .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM | - IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG, }, + [IORING_OP_OPENAT] = {}, + [IORING_OP_CLOSE] = {}, + [IORING_OP_FILES_UPDATE] = {}, + [IORING_OP_STATX] = {}, [IORING_OP_READ] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, + .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG, }, [IORING_OP_WRITE] = { .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, .pollout = 1, + .plug = 1, .async_size = sizeof(struct io_async_rw), - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | - IO_WQ_WORK_FSIZE, }, [IORING_OP_FADVISE] = { .needs_file = 1, - .work_flags = IO_WQ_WORK_BLKCG, - }, - [IORING_OP_MADVISE] = { - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG, }, + [IORING_OP_MADVISE] = {}, [IORING_OP_SEND] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollout = 1, - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG, }, [IORING_OP_RECV] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollin = 1, .buffer_select = 1, - .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG, }, [IORING_OP_OPENAT2] = { - .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_FS | - IO_WQ_WORK_BLKCG, }, [IORING_OP_EPOLL_CTL] = { .unbound_nonreg_file = 1, - .work_flags = IO_WQ_WORK_FILES, }, [IORING_OP_SPLICE] = { .needs_file = 1, .hash_reg_file = 1, .unbound_nonreg_file = 1, - .work_flags = IO_WQ_WORK_BLKCG, }, [IORING_OP_PROVIDE_BUFFERS] = {}, [IORING_OP_REMOVE_BUFFERS] = {}, @@ -937,43 +1070,47 @@ static const struct io_op_def io_op_defs[] = { .hash_reg_file = 1, .unbound_nonreg_file = 1, }, + [IORING_OP_SHUTDOWN] = { + .needs_file = 1, + }, + [IORING_OP_RENAMEAT] = {}, + [IORING_OP_UNLINKAT] = {}, }; -enum io_mem_account { - ACCT_LOCKED, - ACCT_PINNED, -}; +/* requests with any of those set should undergo io_disarm_next() */ +#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL) -static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node); -static struct fixed_file_ref_node *alloc_fixed_file_ref_node( - struct io_ring_ctx *ctx); +static bool io_disarm_next(struct io_kiocb *req); +static void io_uring_del_tctx_node(unsigned long index); +static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, + struct task_struct *task, + bool cancel_all); +static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); + +static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags); -static void __io_complete_rw(struct io_kiocb *req, long res, long res2, - struct io_comp_state *cs); -static void io_cqring_fill_event(struct io_kiocb *req, long res); static void io_put_req(struct io_kiocb *req); -static void io_put_req_deferred(struct io_kiocb *req, int nr); -static void io_double_put_req(struct io_kiocb *req); -static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req); -static void __io_queue_linked_timeout(struct io_kiocb *req); +static void io_put_req_deferred(struct io_kiocb *req); +static void io_dismantle_req(struct io_kiocb *req); static void io_queue_linked_timeout(struct io_kiocb *req); -static int __io_sqe_files_update(struct io_ring_ctx *ctx, - struct io_uring_files_update *ip, - unsigned nr_args); -static void __io_clean_op(struct io_kiocb *req); -static struct file *io_file_get(struct io_submit_state *state, +static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type, + struct io_uring_rsrc_update2 *up, + unsigned nr_args); +static void io_clean_op(struct io_kiocb *req); +static struct file *io_file_get(struct io_ring_ctx *ctx, struct io_kiocb *req, int fd, bool fixed); -static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs); -static void io_file_put_work(struct work_struct *work); +static void __io_queue_sqe(struct io_kiocb *req); +static void io_rsrc_put_work(struct work_struct *work); -static ssize_t io_import_iovec(int rw, struct io_kiocb *req, - struct iovec **iovec, struct iov_iter *iter, - bool needs_lock); -static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, - const struct iovec *fast_iov, - struct iov_iter *iter, bool force); -static void io_req_drop_files(struct io_kiocb *req); static void io_req_task_queue(struct io_kiocb *req); +static void io_submit_flush_completions(struct io_ring_ctx *ctx); +static int io_req_prep_async(struct io_kiocb *req); + +static int io_install_fixed_file(struct io_kiocb *req, struct file *file, + unsigned int issue_flags, u32 slot_index); +static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags); + +static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer); static struct kmem_cache *req_cachep; @@ -992,21 +1129,67 @@ struct sock *io_uring_get_socket(struct file *file) } EXPORT_SYMBOL(io_uring_get_socket); -static inline void io_clean_op(struct io_kiocb *req) +static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) { - if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED)) - __io_clean_op(req); + if (!*locked) { + mutex_lock(&ctx->uring_lock); + *locked = true; + } } -static inline bool __io_match_files(struct io_kiocb *req, - struct files_struct *files) +#define io_for_each_link(pos, head) \ + for (pos = (head); pos; pos = pos->link) + +/* + * Shamelessly stolen from the mm implementation of page reference checking, + * see commit f958d7b528b1 for details. + */ +#define req_ref_zero_or_close_to_overflow(req) \ + ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u) + +static inline bool req_ref_inc_not_zero(struct io_kiocb *req) { - if (req->file && req->file->f_op == &io_uring_fops) + WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); + return atomic_inc_not_zero(&req->refs); +} + +static inline bool req_ref_put_and_test(struct io_kiocb *req) +{ + if (likely(!(req->flags & REQ_F_REFCOUNT))) return true; - return ((req->flags & REQ_F_WORK_INITIALIZED) && - (req->work.flags & IO_WQ_WORK_FILES)) && - req->work.identity->files == files; + WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); + return atomic_dec_and_test(&req->refs); +} + +static inline void req_ref_get(struct io_kiocb *req) +{ + WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); + WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); + atomic_inc(&req->refs); +} + +static inline void __io_req_set_refcount(struct io_kiocb *req, int nr) +{ + if (!(req->flags & REQ_F_REFCOUNT)) { + req->flags |= REQ_F_REFCOUNT; + atomic_set(&req->refs, nr); + } +} + +static inline void io_req_set_refcount(struct io_kiocb *req) +{ + __io_req_set_refcount(req, 1); +} + +static inline void io_req_set_rsrc_node(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + + if (!req->fixed_rsrc_refs) { + req->fixed_rsrc_refs = &ctx->rsrc_node->refs; + percpu_ref_get(req->fixed_rsrc_refs); + } } static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl) @@ -1021,157 +1204,71 @@ static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl) percpu_ref_put(ref); } -static bool io_match_task(struct io_kiocb *head, - struct task_struct *task, - struct files_struct *files) +static bool io_match_task(struct io_kiocb *head, struct task_struct *task, + bool cancel_all) + __must_hold(&req->ctx->timeout_lock) { - struct io_kiocb *link; + struct io_kiocb *req; - if (task && head->task != task) { - /* in terms of cancelation, always match if req task is dead */ - if (head->task->flags & PF_EXITING) - return true; + if (task && head->task != task) return false; - } - if (!files) + if (cancel_all) return true; - if (__io_match_files(head, files)) - return true; - if (head->flags & REQ_F_LINK_HEAD) { - list_for_each_entry(link, &head->link_list, link_list) { - if (__io_match_files(link, files)) - return true; - } + + io_for_each_link(req, head) { + if (req->flags & REQ_F_INFLIGHT) + return true; } return false; } - -static void io_sq_thread_drop_mm(void) +static bool io_match_linked(struct io_kiocb *head) { - struct mm_struct *mm = current->mm; + struct io_kiocb *req; - if (mm) { - kthread_unuse_mm(mm); - mmput(mm); - current->mm = NULL; + io_for_each_link(req, head) { + if (req->flags & REQ_F_INFLIGHT) + return true; } -} - -static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx) -{ - struct mm_struct *mm; - - if (current->flags & PF_EXITING) - return -EFAULT; - if (current->mm) - return 0; - - /* Should never happen */ - if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL))) - return -EFAULT; - - task_lock(ctx->sqo_task); - mm = ctx->sqo_task->mm; - if (unlikely(!mm || !mmget_not_zero(mm))) - mm = NULL; - task_unlock(ctx->sqo_task); - - if (mm) { - kthread_use_mm(mm); - return 0; - } - - return -EFAULT; -} - -static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx, - struct io_kiocb *req) -{ - if (!(io_op_defs[req->opcode].work_flags & IO_WQ_WORK_MM)) - return 0; - return __io_sq_thread_acquire_mm(ctx); -} - -static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx, - struct cgroup_subsys_state **cur_css) - -{ -#ifdef CONFIG_BLK_CGROUP - /* puts the old one when swapping */ - if (*cur_css != ctx->sqo_blkcg_css) { - kthread_associate_blkcg(ctx->sqo_blkcg_css); - *cur_css = ctx->sqo_blkcg_css; - } -#endif -} - -static void io_sq_thread_unassociate_blkcg(void) -{ -#ifdef CONFIG_BLK_CGROUP - kthread_associate_blkcg(NULL); -#endif -} - -static inline void req_set_fail_links(struct io_kiocb *req) -{ - if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK) - req->flags |= REQ_F_FAIL_LINK; + return false; } /* - * None of these are dereferenced, they are simply used to check if any of - * them have changed. If we're under current and check they are still the - * same, we're fine to grab references to them for actual out-of-line use. + * As io_match_task() but protected against racing with linked timeouts. + * User must not hold timeout_lock. */ -static void io_init_identity(struct io_identity *id) +static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, + bool cancel_all) { - id->files = current->files; - id->mm = current->mm; -#ifdef CONFIG_BLK_CGROUP - rcu_read_lock(); - id->blkcg_css = blkcg_css(); - rcu_read_unlock(); -#endif - id->creds = current_cred(); - id->nsproxy = current->nsproxy; - id->fs = current->fs; - id->fsize = rlimit(RLIMIT_FSIZE); -#ifdef CONFIG_AUDIT - id->loginuid = current->loginuid; - id->sessionid = current->sessionid; -#endif - refcount_set(&id->count, 1); + bool matched; + + if (task && head->task != task) + return false; + if (cancel_all) + return true; + + if (head->flags & REQ_F_LINK_TIMEOUT) { + struct io_ring_ctx *ctx = head->ctx; + + /* protect against races with linked timeouts */ + spin_lock_irq(&ctx->timeout_lock); + matched = io_match_linked(head); + spin_unlock_irq(&ctx->timeout_lock); + } else { + matched = io_match_linked(head); + } + return matched; } -static inline void __io_req_init_async(struct io_kiocb *req) +static inline void req_set_fail(struct io_kiocb *req) { - memset(&req->work, 0, sizeof(req->work)); - req->flags |= REQ_F_WORK_INITIALIZED; + req->flags |= REQ_F_FAIL; } -/* - * Note: must call io_req_init_async() for the first time you - * touch any members of io_wq_work. - */ -static inline void io_req_init_async(struct io_kiocb *req) +static inline void req_fail_link_node(struct io_kiocb *req, int res) { - struct io_uring_task *tctx = req->task->io_uring; - - if (req->flags & REQ_F_WORK_INITIALIZED) - return; - - __io_req_init_async(req); - - /* Grab a ref if this isn't our static identity */ - req->work.identity = tctx->identity; - if (tctx->identity != &tctx->__identity) - refcount_inc(&req->work.identity->count); -} - -static inline bool io_async_submit(struct io_ring_ctx *ctx) -{ - return ctx->flags & IORING_SETUP_SQPOLL; + req_set_fail(req); + req->result = res; } static void io_ring_ctx_ref_free(struct percpu_ref *ref) @@ -1186,6 +1283,27 @@ static inline bool io_is_timeout_noseq(struct io_kiocb *req) return !req->timeout.off; } +static void io_fallback_req_func(struct work_struct *work) +{ + struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, + fallback_work.work); + struct llist_node *node = llist_del_all(&ctx->fallback_llist); + struct io_kiocb *req, *tmp; + bool locked = false; + + percpu_ref_get(&ctx->refs); + llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node) + req->io_task_work.func(req, &locked); + + if (locked) { + if (ctx->submit_state.compl_nr) + io_submit_flush_completions(ctx); + mutex_unlock(&ctx->uring_lock); + } + percpu_ref_put(&ctx->refs); + +} + static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) { struct io_ring_ctx *ctx; @@ -1195,10 +1313,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) if (!ctx) return NULL; - ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL); - if (!ctx->fallback_req) - goto err; - /* * Use 5 bits less than the max cq entries, that should give us around * 32 entries per hash list if totally full and uniformly spread. @@ -1214,6 +1328,12 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) goto err; __hash_init(ctx->cancel_hash, 1U << hash_bits); + ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL); + if (!ctx->dummy_ubuf) + goto err; + /* set invalid range, so io_import_fixed() fails meeting it */ + ctx->dummy_ubuf->ubuf = -1UL; + if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) goto err; @@ -1221,232 +1341,109 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) ctx->flags = p->flags; init_waitqueue_head(&ctx->sqo_sq_wait); INIT_LIST_HEAD(&ctx->sqd_list); - init_waitqueue_head(&ctx->cq_wait); + init_waitqueue_head(&ctx->poll_wait); INIT_LIST_HEAD(&ctx->cq_overflow_list); init_completion(&ctx->ref_comp); - init_completion(&ctx->sq_thread_comp); xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1); xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); mutex_init(&ctx->uring_lock); - init_waitqueue_head(&ctx->wait); + init_waitqueue_head(&ctx->cq_wait); spin_lock_init(&ctx->completion_lock); + spin_lock_init(&ctx->timeout_lock); INIT_LIST_HEAD(&ctx->iopoll_list); INIT_LIST_HEAD(&ctx->defer_list); INIT_LIST_HEAD(&ctx->timeout_list); - spin_lock_init(&ctx->inflight_lock); - INIT_LIST_HEAD(&ctx->inflight_list); - INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work); - init_llist_head(&ctx->file_put_llist); + INIT_LIST_HEAD(&ctx->ltimeout_list); + spin_lock_init(&ctx->rsrc_ref_lock); + INIT_LIST_HEAD(&ctx->rsrc_ref_list); + INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work); + init_llist_head(&ctx->rsrc_put_llist); + INIT_LIST_HEAD(&ctx->tctx_list); + INIT_LIST_HEAD(&ctx->submit_state.free_list); + INIT_LIST_HEAD(&ctx->locked_free_list); + INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func); return ctx; err: - if (ctx->fallback_req) - kmem_cache_free(req_cachep, ctx->fallback_req); + kfree(ctx->dummy_ubuf); kfree(ctx->cancel_hash); kfree(ctx); return NULL; } +static void io_account_cq_overflow(struct io_ring_ctx *ctx) +{ + struct io_rings *r = ctx->rings; + + WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1); + ctx->cq_extra--; +} + static bool req_need_defer(struct io_kiocb *req, u32 seq) { if (unlikely(req->flags & REQ_F_IO_DRAIN)) { struct io_ring_ctx *ctx = req->ctx; - return seq != ctx->cached_cq_tail - + READ_ONCE(ctx->cached_cq_overflow); + return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail; } return false; } -static void __io_commit_cqring(struct io_ring_ctx *ctx) -{ - struct io_rings *rings = ctx->rings; - - /* order cqe stores with ring update */ - smp_store_release(&rings->cq.tail, ctx->cached_cq_tail); -} - -static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req) -{ - if (req->work.identity == &tctx->__identity) - return; - if (refcount_dec_and_test(&req->work.identity->count)) - kfree(req->work.identity); -} - -static void io_req_clean_work(struct io_kiocb *req) -{ - if (!(req->flags & REQ_F_WORK_INITIALIZED)) - return; - - req->flags &= ~REQ_F_WORK_INITIALIZED; - - if (req->work.flags & IO_WQ_WORK_MM) { - mmdrop(req->work.identity->mm); - req->work.flags &= ~IO_WQ_WORK_MM; - } -#ifdef CONFIG_BLK_CGROUP - if (req->work.flags & IO_WQ_WORK_BLKCG) { - css_put(req->work.identity->blkcg_css); - req->work.flags &= ~IO_WQ_WORK_BLKCG; - } +#define FFS_ASYNC_READ 0x1UL +#define FFS_ASYNC_WRITE 0x2UL +#ifdef CONFIG_64BIT +#define FFS_ISREG 0x4UL +#else +#define FFS_ISREG 0x0UL #endif - if (req->work.flags & IO_WQ_WORK_CREDS) { - put_cred(req->work.identity->creds); - req->work.flags &= ~IO_WQ_WORK_CREDS; - } - if (req->work.flags & IO_WQ_WORK_FS) { - struct fs_struct *fs = req->work.identity->fs; +#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG) - spin_lock(&req->work.identity->fs->lock); - if (--fs->users) - fs = NULL; - spin_unlock(&req->work.identity->fs->lock); - if (fs) - free_fs_struct(fs); - req->work.flags &= ~IO_WQ_WORK_FS; - } - if (req->flags & REQ_F_INFLIGHT) - io_req_drop_files(req); - - io_put_identity(req->task->io_uring, req); +static inline bool io_req_ffs_set(struct io_kiocb *req) +{ + return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE); } -/* - * Create a private copy of io_identity, since some fields don't match - * the current context. - */ -static bool io_identity_cow(struct io_kiocb *req) +static void io_req_track_inflight(struct io_kiocb *req) { - struct io_uring_task *tctx = current->io_uring; - const struct cred *creds = NULL; - struct io_identity *id; - - if (req->work.flags & IO_WQ_WORK_CREDS) - creds = req->work.identity->creds; - - id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL); - if (unlikely(!id)) { - req->work.flags |= IO_WQ_WORK_CANCEL; - return false; + if (!(req->flags & REQ_F_INFLIGHT)) { + req->flags |= REQ_F_INFLIGHT; + atomic_inc(&req->task->io_uring->inflight_tracked); } - - /* - * We can safely just re-init the creds we copied Either the field - * matches the current one, or we haven't grabbed it yet. The only - * exception is ->creds, through registered personalities, so handle - * that one separately. - */ - io_init_identity(id); - if (creds) - id->creds = creds; - - /* add one for this request */ - refcount_inc(&id->count); - - /* drop tctx and req identity references, if needed */ - if (tctx->identity != &tctx->__identity && - refcount_dec_and_test(&tctx->identity->count)) - kfree(tctx->identity); - if (req->work.identity != &tctx->__identity && - refcount_dec_and_test(&req->work.identity->count)) - kfree(req->work.identity); - - req->work.identity = id; - tctx->identity = id; - return true; } -static bool io_grab_identity(struct io_kiocb *req) +static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) { - const struct io_op_def *def = &io_op_defs[req->opcode]; - struct io_identity *id = req->work.identity; - struct io_ring_ctx *ctx = req->ctx; + if (WARN_ON_ONCE(!req->link)) + return NULL; - if (def->work_flags & IO_WQ_WORK_FSIZE) { - if (id->fsize != rlimit(RLIMIT_FSIZE)) - return false; - req->work.flags |= IO_WQ_WORK_FSIZE; - } -#ifdef CONFIG_BLK_CGROUP - if (!(req->work.flags & IO_WQ_WORK_BLKCG) && - (def->work_flags & IO_WQ_WORK_BLKCG)) { - rcu_read_lock(); - if (id->blkcg_css != blkcg_css()) { - rcu_read_unlock(); - return false; - } - /* - * This should be rare, either the cgroup is dying or the task - * is moving cgroups. Just punt to root for the handful of ios. - */ - if (css_tryget_online(id->blkcg_css)) - req->work.flags |= IO_WQ_WORK_BLKCG; - rcu_read_unlock(); - } -#endif - if (!(req->work.flags & IO_WQ_WORK_CREDS)) { - if (id->creds != current_cred()) - return false; - get_cred(id->creds); - req->work.flags |= IO_WQ_WORK_CREDS; - } -#ifdef CONFIG_AUDIT - if (!uid_eq(current->loginuid, id->loginuid) || - current->sessionid != id->sessionid) - return false; -#endif - if (!(req->work.flags & IO_WQ_WORK_FS) && - (def->work_flags & IO_WQ_WORK_FS)) { - if (current->fs != id->fs) - return false; - spin_lock(&id->fs->lock); - if (!id->fs->in_exec) { - id->fs->users++; - req->work.flags |= IO_WQ_WORK_FS; - } else { - req->work.flags |= IO_WQ_WORK_CANCEL; - } - spin_unlock(¤t->fs->lock); - } - if (!(req->work.flags & IO_WQ_WORK_FILES) && - (def->work_flags & IO_WQ_WORK_FILES) && - !(req->flags & REQ_F_NO_FILE_TABLE)) { - if (id->files != current->files || - id->nsproxy != current->nsproxy) - return false; - atomic_inc(&id->files->count); - get_nsproxy(id->nsproxy); + req->flags &= ~REQ_F_ARM_LTIMEOUT; + req->flags |= REQ_F_LINK_TIMEOUT; - if (!(req->flags & REQ_F_INFLIGHT)) { - req->flags |= REQ_F_INFLIGHT; + /* linked timeouts should have two refs once prep'ed */ + io_req_set_refcount(req); + __io_req_set_refcount(req->link, 2); + return req->link; +} - spin_lock_irq(&ctx->inflight_lock); - list_add(&req->inflight_entry, &ctx->inflight_list); - spin_unlock_irq(&ctx->inflight_lock); - } - req->work.flags |= IO_WQ_WORK_FILES; - } - if (!(req->work.flags & IO_WQ_WORK_MM) && - (def->work_flags & IO_WQ_WORK_MM)) { - if (id->mm != current->mm) - return false; - mmgrab(id->mm); - req->work.flags |= IO_WQ_WORK_MM; - } - - return true; +static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) +{ + if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) + return NULL; + return __io_prep_linked_timeout(req); } static void io_prep_async_work(struct io_kiocb *req) { const struct io_op_def *def = &io_op_defs[req->opcode]; struct io_ring_ctx *ctx = req->ctx; - struct io_identity *id; - io_req_init_async(req); - id = req->work.identity; + if (!(req->flags & REQ_F_CREDS)) { + req->flags |= REQ_F_CREDS; + req->creds = get_current_cred(); + } + req->work.list.next = NULL; + req->work.flags = 0; if (req->flags & REQ_F_FORCE_ASYNC) req->work.flags |= IO_WQ_WORK_CONCURRENT; @@ -1457,92 +1454,77 @@ static void io_prep_async_work(struct io_kiocb *req) if (def->unbound_nonreg_file) req->work.flags |= IO_WQ_WORK_UNBOUND; } - - /* if we fail grabbing identity, we must COW, regrab, and retry */ - if (io_grab_identity(req)) - return; - - if (!io_identity_cow(req)) - return; - - /* can't fail at this point */ - if (!io_grab_identity(req)) - WARN_ON(1); } static void io_prep_async_link(struct io_kiocb *req) { struct io_kiocb *cur; - io_prep_async_work(req); - if (req->flags & REQ_F_LINK_HEAD) - list_for_each_entry(cur, &req->link_list, link_list) + if (req->flags & REQ_F_LINK_TIMEOUT) { + struct io_ring_ctx *ctx = req->ctx; + + spin_lock_irq(&ctx->timeout_lock); + io_for_each_link(cur, req) io_prep_async_work(cur); + spin_unlock_irq(&ctx->timeout_lock); + } else { + io_for_each_link(cur, req) + io_prep_async_work(cur); + } } -static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req) +static void io_queue_async_work(struct io_kiocb *req, bool *locked) { struct io_ring_ctx *ctx = req->ctx; struct io_kiocb *link = io_prep_linked_timeout(req); + struct io_uring_task *tctx = req->task->io_uring; - trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, - &req->work, req->flags); - io_wq_enqueue(ctx->io_wq, &req->work); - return link; -} + /* must not take the lock, NULL it as a precaution */ + locked = NULL; -static void io_queue_async_work(struct io_kiocb *req) -{ - struct io_kiocb *link; + BUG_ON(!tctx); + BUG_ON(!tctx->io_wq); /* init ->work of the whole link before punting */ io_prep_async_link(req); - link = __io_queue_async_work(req); + /* + * Not expected to happen, but if we do have a bug where this _can_ + * happen, catch it here and ensure the request is marked as + * canceled. That will make io-wq go through the usual work cancel + * procedure rather than attempt to run this request (or create a new + * worker for it). + */ + if (WARN_ON_ONCE(!same_thread_group(req->task, current))) + req->work.flags |= IO_WQ_WORK_CANCEL; + + trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, + &req->work, req->flags); + io_wq_enqueue(tctx->io_wq, &req->work); if (link) io_queue_linked_timeout(link); } static void io_kill_timeout(struct io_kiocb *req, int status) + __must_hold(&req->ctx->completion_lock) + __must_hold(&req->ctx->timeout_lock) { struct io_timeout_data *io = req->async_data; - int ret; - ret = hrtimer_try_to_cancel(&io->timer); - if (ret != -1) { + if (hrtimer_try_to_cancel(&io->timer) != -1) { if (status) - req_set_fail_links(req); + req_set_fail(req); atomic_set(&req->ctx->cq_timeouts, atomic_read(&req->ctx->cq_timeouts) + 1); list_del_init(&req->timeout.list); - io_cqring_fill_event(req, status); - io_put_req_deferred(req, 1); + io_fill_cqe_req(req, status, 0); + io_put_req_deferred(req); } } -/* - * Returns true if we found and killed one or more timeouts - */ -static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, - struct files_struct *files) +static void io_queue_deferred(struct io_ring_ctx *ctx) { - struct io_kiocb *req, *tmp; - int canceled = 0; - - spin_lock_irq(&ctx->completion_lock); - list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { - if (io_match_task(req, tsk, files)) { - io_kill_timeout(req, -ECANCELED); - canceled++; - } - } - spin_unlock_irq(&ctx->completion_lock); - return canceled != 0; -} - -static void __io_queue_deferred(struct io_ring_ctx *ctx) -{ - do { + while (!list_empty(&ctx->defer_list)) { struct io_defer_entry *de = list_first_entry(&ctx->defer_list, struct io_defer_entry, list); @@ -1551,19 +1533,16 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx) list_del_init(&de->list); io_req_task_queue(de->req); kfree(de); - } while (!list_empty(&ctx->defer_list)); + } } static void io_flush_timeouts(struct io_ring_ctx *ctx) + __must_hold(&ctx->completion_lock) { + u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); struct io_kiocb *req, *tmp; - u32 seq; - - if (list_empty(&ctx->timeout_list)) - return; - - seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); + spin_lock_irq(&ctx->timeout_lock); list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { u32 events_needed, events_got; @@ -1584,441 +1563,568 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx) io_kill_timeout(req, 0); } - ctx->cq_last_tm_flush = seq; + spin_unlock_irq(&ctx->timeout_lock); } -static void io_commit_cqring(struct io_ring_ctx *ctx) +static void __io_commit_cqring_flush(struct io_ring_ctx *ctx) { - io_flush_timeouts(ctx); - __io_commit_cqring(ctx); + if (ctx->off_timeout_used) + io_flush_timeouts(ctx); + if (ctx->drain_active) + io_queue_deferred(ctx); +} - if (unlikely(!list_empty(&ctx->defer_list))) - __io_queue_deferred(ctx); +static inline void io_commit_cqring(struct io_ring_ctx *ctx) +{ + if (unlikely(ctx->off_timeout_used || ctx->drain_active)) + __io_commit_cqring_flush(ctx); + /* order cqe stores with ring update */ + smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); } static inline bool io_sqring_full(struct io_ring_ctx *ctx) { struct io_rings *r = ctx->rings; - return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries; + return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; } -static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx) +static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx) +{ + return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); +} + +static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx) { struct io_rings *rings = ctx->rings; - unsigned tail; + unsigned tail, mask = ctx->cq_entries - 1; - tail = ctx->cached_cq_tail; /* * writes to the cq entry need to come after reading head; the * control dependency is enough as we're using WRITE_ONCE to * fill the cq entry */ - if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries) + if (__io_cqring_events(ctx) == ctx->cq_entries) return NULL; - ctx->cached_cq_tail++; - return &rings->cqes[tail & ctx->cq_mask]; + tail = ctx->cached_cq_tail++; + return &rings->cqes[tail & mask]; } static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx) { - if (!ctx->cq_ev_fd) + if (likely(!ctx->cq_ev_fd)) return false; if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) return false; - if (!ctx->eventfd_async) - return true; - return io_wq_current_is_worker(); + return !ctx->eventfd_async || io_wq_current_is_worker(); } +/* + * This should only get called when at least one event has been posted. + * Some applications rely on the eventfd notification count only changing + * IFF a new CQE has been added to the CQ ring. There's no depedency on + * 1:1 relationship between how many times this function is called (and + * hence the eventfd count) and number of CQEs posted to the CQ ring. + */ static void io_cqring_ev_posted(struct io_ring_ctx *ctx) { - if (wq_has_sleeper(&ctx->cq_wait)) { - wake_up_interruptible(&ctx->cq_wait); - kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN); - } - if (waitqueue_active(&ctx->wait)) - wake_up(&ctx->wait); + /* + * wake_up_all() may seem excessive, but io_wake_function() and + * io_should_wake() handle the termination of the loop and only + * wake as many waiters as we need to. + */ + if (wq_has_sleeper(&ctx->cq_wait)) + __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, + poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait)) wake_up(&ctx->sq_data->wait); if (io_should_trigger_evfd(ctx)) - eventfd_signal(ctx->cq_ev_fd, 1); + eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE); + if (waitqueue_active(&ctx->poll_wait)) + __wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0, + poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); } -static void io_cqring_mark_overflow(struct io_ring_ctx *ctx) +static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) { - if (list_empty(&ctx->cq_overflow_list)) { - clear_bit(0, &ctx->sq_check_overflow); - clear_bit(0, &ctx->cq_check_overflow); - ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW; + /* see waitqueue_active() comment */ + smp_mb(); + + if (ctx->flags & IORING_SETUP_SQPOLL) { + if (waitqueue_active(&ctx->cq_wait)) + __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, + poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); } + if (io_should_trigger_evfd(ctx)) + eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE); + if (waitqueue_active(&ctx->poll_wait)) + __wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0, + poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); } /* Returns true if there are no backlogged entries after the flush */ -static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, - struct task_struct *tsk, - struct files_struct *files) +static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) { - struct io_rings *rings = ctx->rings; - struct io_kiocb *req, *tmp; - struct io_uring_cqe *cqe; - unsigned long flags; - LIST_HEAD(list); + bool all_flushed, posted; - if (!force) { - if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) == - rings->cq_ring_entries)) - return false; - } + if (!force && __io_cqring_events(ctx) == ctx->cq_entries) + return false; - spin_lock_irqsave(&ctx->completion_lock, flags); + posted = false; + spin_lock(&ctx->completion_lock); + while (!list_empty(&ctx->cq_overflow_list)) { + struct io_uring_cqe *cqe = io_get_cqe(ctx); + struct io_overflow_cqe *ocqe; - cqe = NULL; - list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) { - if (!io_match_task(req, tsk, files)) - continue; - - cqe = io_get_cqring(ctx); if (!cqe && !force) break; + ocqe = list_first_entry(&ctx->cq_overflow_list, + struct io_overflow_cqe, list); + if (cqe) + memcpy(cqe, &ocqe->cqe, sizeof(*cqe)); + else + io_account_cq_overflow(ctx); - list_move(&req->compl.list, &list); - if (cqe) { - WRITE_ONCE(cqe->user_data, req->user_data); - WRITE_ONCE(cqe->res, req->result); - WRITE_ONCE(cqe->flags, req->compl.cflags); - } else { - ctx->cached_cq_overflow++; - WRITE_ONCE(ctx->rings->cq_overflow, - ctx->cached_cq_overflow); - } + posted = true; + list_del(&ocqe->list); + kfree(ocqe); } - io_commit_cqring(ctx); - io_cqring_mark_overflow(ctx); - - spin_unlock_irqrestore(&ctx->completion_lock, flags); - io_cqring_ev_posted(ctx); - - while (!list_empty(&list)) { - req = list_first_entry(&list, struct io_kiocb, compl.list); - list_del(&req->compl.list); - io_put_req(req); + all_flushed = list_empty(&ctx->cq_overflow_list); + if (all_flushed) { + clear_bit(0, &ctx->check_cq_overflow); + WRITE_ONCE(ctx->rings->sq_flags, + ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW); } - return cqe != NULL; + if (posted) + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); + if (posted) + io_cqring_ev_posted(ctx); + return all_flushed; } -static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, - struct task_struct *tsk, - struct files_struct *files) +static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx) { - if (test_bit(0, &ctx->cq_check_overflow)) { + bool ret = true; + + if (test_bit(0, &ctx->check_cq_overflow)) { /* iopoll syncs against uring_lock, not completion_lock */ if (ctx->flags & IORING_SETUP_IOPOLL) mutex_lock(&ctx->uring_lock); - __io_cqring_overflow_flush(ctx, force, tsk, files); + ret = __io_cqring_overflow_flush(ctx, false); if (ctx->flags & IORING_SETUP_IOPOLL) mutex_unlock(&ctx->uring_lock); } + + return ret; } -static void __io_cqring_fill_event(struct io_kiocb *req, long res, - unsigned int cflags) +/* must to be called somewhat shortly after putting a request */ +static inline void io_put_task(struct task_struct *task, int nr) +{ + struct io_uring_task *tctx = task->io_uring; + + if (likely(task == current)) { + tctx->cached_refs += nr; + } else { + percpu_counter_sub(&tctx->inflight, nr); + if (unlikely(atomic_read(&tctx->in_idle))) + wake_up(&tctx->wait); + put_task_struct_many(task, nr); + } +} + +static void io_task_refs_refill(struct io_uring_task *tctx) +{ + unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR; + + percpu_counter_add(&tctx->inflight, refill); + refcount_add(refill, ¤t->usage); + tctx->cached_refs += refill; +} + +static inline void io_get_task_refs(int nr) +{ + struct io_uring_task *tctx = current->io_uring; + + tctx->cached_refs -= nr; + if (unlikely(tctx->cached_refs < 0)) + io_task_refs_refill(tctx); +} + +static __cold void io_uring_drop_tctx_refs(struct task_struct *task) +{ + struct io_uring_task *tctx = task->io_uring; + unsigned int refs = tctx->cached_refs; + + if (refs) { + tctx->cached_refs = 0; + percpu_counter_sub(&tctx->inflight, refs); + put_task_struct_many(task, refs); + } +} + +static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, + s32 res, u32 cflags) +{ + struct io_overflow_cqe *ocqe; + + ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT); + if (!ocqe) { + /* + * If we're in ring overflow flush mode, or in task cancel mode, + * or cannot allocate an overflow entry, then we need to drop it + * on the floor. + */ + io_account_cq_overflow(ctx); + return false; + } + if (list_empty(&ctx->cq_overflow_list)) { + set_bit(0, &ctx->check_cq_overflow); + WRITE_ONCE(ctx->rings->sq_flags, + ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW); + + } + ocqe->cqe.user_data = user_data; + ocqe->cqe.res = res; + ocqe->cqe.flags = cflags; + list_add_tail(&ocqe->list, &ctx->cq_overflow_list); + return true; +} + +static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data, + s32 res, u32 cflags) { - struct io_ring_ctx *ctx = req->ctx; struct io_uring_cqe *cqe; - trace_io_uring_complete(ctx, req->user_data, res); + trace_io_uring_complete(ctx, user_data, res, cflags); /* * If we can't get a cq entry, userspace overflowed the * submission (by quite a lot). Increment the overflow count in * the ring. */ - cqe = io_get_cqring(ctx); + cqe = io_get_cqe(ctx); if (likely(cqe)) { - WRITE_ONCE(cqe->user_data, req->user_data); + WRITE_ONCE(cqe->user_data, user_data); WRITE_ONCE(cqe->res, res); WRITE_ONCE(cqe->flags, cflags); - } else if (ctx->cq_overflow_flushed || - atomic_read(&req->task->io_uring->in_idle)) { - /* - * If we're in ring overflow flush mode, or in task cancel mode, - * then we cannot store the request for later flushing, we need - * to drop it on the floor. - */ - ctx->cached_cq_overflow++; - WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow); - } else { - if (list_empty(&ctx->cq_overflow_list)) { - set_bit(0, &ctx->sq_check_overflow); - set_bit(0, &ctx->cq_check_overflow); - ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW; - } - io_clean_op(req); - req->result = res; - req->compl.cflags = cflags; - refcount_inc(&req->refs); - list_add_tail(&req->compl.list, &ctx->cq_overflow_list); + return true; } + return io_cqring_event_overflow(ctx, user_data, res, cflags); } -static void io_cqring_fill_event(struct io_kiocb *req, long res) +static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags) { - __io_cqring_fill_event(req, res, 0); + __io_fill_cqe(req->ctx, req->user_data, res, cflags); } -static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags) +static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, + s32 res, u32 cflags) +{ + ctx->cq_extra++; + return __io_fill_cqe(ctx, user_data, res, cflags); +} + +static void io_req_complete_post(struct io_kiocb *req, s32 res, + u32 cflags) { struct io_ring_ctx *ctx = req->ctx; - unsigned long flags; - spin_lock_irqsave(&ctx->completion_lock, flags); - __io_cqring_fill_event(req, res, cflags); - io_commit_cqring(ctx); - spin_unlock_irqrestore(&ctx->completion_lock, flags); - - io_cqring_ev_posted(ctx); -} - -static void io_submit_flush_completions(struct io_comp_state *cs) -{ - struct io_ring_ctx *ctx = cs->ctx; - - spin_lock_irq(&ctx->completion_lock); - while (!list_empty(&cs->list)) { - struct io_kiocb *req; - - req = list_first_entry(&cs->list, struct io_kiocb, compl.list); - list_del(&req->compl.list); - __io_cqring_fill_event(req, req->result, req->compl.cflags); - - /* - * io_free_req() doesn't care about completion_lock unless one - * of these flags is set. REQ_F_WORK_INITIALIZED is in the list - * because of a potential deadlock with req->work.fs->lock - */ - if (req->flags & (REQ_F_FAIL_LINK|REQ_F_LINK_TIMEOUT - |REQ_F_WORK_INITIALIZED)) { - spin_unlock_irq(&ctx->completion_lock); - io_put_req(req); - spin_lock_irq(&ctx->completion_lock); - } else { - io_put_req(req); + spin_lock(&ctx->completion_lock); + __io_fill_cqe(ctx, req->user_data, res, cflags); + /* + * If we're the last reference to this request, add to our locked + * free_list cache. + */ + if (req_ref_put_and_test(req)) { + if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { + if (req->flags & IO_DISARM_MASK) + io_disarm_next(req); + if (req->link) { + io_req_task_queue(req->link); + req->link = NULL; + } } - } - io_commit_cqring(ctx); - spin_unlock_irq(&ctx->completion_lock); - - io_cqring_ev_posted(ctx); - cs->nr = 0; -} - -static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags, - struct io_comp_state *cs) -{ - if (!cs) { - io_cqring_add_event(req, res, cflags); - io_put_req(req); + io_dismantle_req(req); + io_put_task(req->task, 1); + list_add(&req->inflight_entry, &ctx->locked_free_list); + ctx->locked_free_nr++; } else { + if (!percpu_ref_tryget(&ctx->refs)) + req = NULL; + } + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); + + if (req) { + io_cqring_ev_posted(ctx); + percpu_ref_put(&ctx->refs); + } +} + +static inline bool io_req_needs_clean(struct io_kiocb *req) +{ + return req->flags & IO_REQ_CLEAN_FLAGS; +} + +static inline void io_req_complete_state(struct io_kiocb *req, s32 res, + u32 cflags) +{ + if (io_req_needs_clean(req)) io_clean_op(req); - req->result = res; - req->compl.cflags = cflags; - list_add_tail(&req->compl.list, &cs->list); - if (++cs->nr >= 32) - io_submit_flush_completions(cs); - } + req->result = res; + req->compl.cflags = cflags; + req->flags |= REQ_F_COMPLETE_INLINE; } -static void io_req_complete(struct io_kiocb *req, long res) +static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, + s32 res, u32 cflags) { - __io_req_complete(req, res, 0, NULL); + if (issue_flags & IO_URING_F_COMPLETE_DEFER) + io_req_complete_state(req, res, cflags); + else + io_req_complete_post(req, res, cflags); } -static inline bool io_is_fallback_req(struct io_kiocb *req) +static inline void io_req_complete(struct io_kiocb *req, s32 res) { - return req == (struct io_kiocb *) - ((unsigned long) req->ctx->fallback_req & ~1UL); + __io_req_complete(req, 0, res, 0); } -static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx) +static void io_req_complete_failed(struct io_kiocb *req, s32 res) { - struct io_kiocb *req; - - req = ctx->fallback_req; - if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req)) - return req; - - return NULL; + req_set_fail(req); + io_req_complete_post(req, res, 0); } -static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx, - struct io_submit_state *state) +static void io_req_complete_fail_submit(struct io_kiocb *req) { - if (!state->free_reqs) { - gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; - size_t sz; - int ret; + /* + * We don't submit, fail them all, for that replace hardlinks with + * normal links. Extra REQ_F_LINK is tolerated. + */ + req->flags &= ~REQ_F_HARDLINK; + req->flags |= REQ_F_LINK; + io_req_complete_failed(req, req->result); +} - sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs)); - ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs); +/* + * Don't initialise the fields below on every allocation, but do that in + * advance and keep them valid across allocations. + */ +static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) +{ + req->ctx = ctx; + req->link = NULL; + req->async_data = NULL; + /* not necessary, but safer to zero */ + req->result = 0; +} - /* - * Bulk alloc is all-or-nothing. If we fail to get a batch, - * retry single alloc to be on the safe side. - */ - if (unlikely(ret <= 0)) { - state->reqs[0] = kmem_cache_alloc(req_cachep, gfp); - if (!state->reqs[0]) - goto fallback; - ret = 1; - } - state->free_reqs = ret; +static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, + struct io_submit_state *state) +{ + spin_lock(&ctx->completion_lock); + list_splice_init(&ctx->locked_free_list, &state->free_list); + ctx->locked_free_nr = 0; + spin_unlock(&ctx->completion_lock); +} + +/* Returns true IFF there are requests in the cache */ +static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) +{ + struct io_submit_state *state = &ctx->submit_state; + int nr; + + /* + * If we have more than a batch's worth of requests in our IRQ side + * locked cache, grab the lock and move them over to our submission + * side cache. + */ + if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH) + io_flush_cached_locked_reqs(ctx, state); + + nr = state->free_reqs; + while (!list_empty(&state->free_list)) { + struct io_kiocb *req = list_first_entry(&state->free_list, + struct io_kiocb, inflight_entry); + + list_del(&req->inflight_entry); + state->reqs[nr++] = req; + if (nr == ARRAY_SIZE(state->reqs)) + break; } + state->free_reqs = nr; + return nr != 0; +} + +/* + * A request might get retired back into the request caches even before opcode + * handlers and io_issue_sqe() are done with it, e.g. inline completion path. + * Because of that, io_alloc_req() should be called only under ->uring_lock + * and with extra caution to not get a request that is still worked on. + */ +static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) + __must_hold(&ctx->uring_lock) +{ + struct io_submit_state *state = &ctx->submit_state; + gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; + int ret, i; + + BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH); + + if (likely(state->free_reqs || io_flush_cached_reqs(ctx))) + goto got_req; + + ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH, + state->reqs); + + /* + * Bulk alloc is all-or-nothing. If we fail to get a batch, + * retry single alloc to be on the safe side. + */ + if (unlikely(ret <= 0)) { + state->reqs[0] = kmem_cache_alloc(req_cachep, gfp); + if (!state->reqs[0]) + return NULL; + ret = 1; + } + + for (i = 0; i < ret; i++) + io_preinit_req(state->reqs[i], ctx); + state->free_reqs = ret; +got_req: state->free_reqs--; return state->reqs[state->free_reqs]; -fallback: - return io_get_fallback_req(ctx); } -static inline void io_put_file(struct io_kiocb *req, struct file *file, - bool fixed) +static inline void io_put_file(struct file *file) { - if (fixed) - percpu_ref_put(req->fixed_file_refs); - else + if (file) fput(file); } static void io_dismantle_req(struct io_kiocb *req) { - io_clean_op(req); + unsigned int flags = req->flags; - if (req->async_data) + if (io_req_needs_clean(req)) + io_clean_op(req); + if (!(flags & REQ_F_FIXED_FILE)) + io_put_file(req->file); + if (req->fixed_rsrc_refs) + percpu_ref_put(req->fixed_rsrc_refs); + if (req->async_data) { kfree(req->async_data); - if (req->file) - io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE)); - - io_req_clean_work(req); + req->async_data = NULL; + } } static void __io_free_req(struct io_kiocb *req) { - struct io_uring_task *tctx = req->task->io_uring; struct io_ring_ctx *ctx = req->ctx; io_dismantle_req(req); + io_put_task(req->task, 1); - percpu_counter_dec(&tctx->inflight); - if (atomic_read(&tctx->in_idle)) - wake_up(&tctx->wait); - put_task_struct(req->task); + spin_lock(&ctx->completion_lock); + list_add(&req->inflight_entry, &ctx->locked_free_list); + ctx->locked_free_nr++; + spin_unlock(&ctx->completion_lock); - if (likely(!io_is_fallback_req(req))) - kmem_cache_free(req_cachep, req); - else - clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req); percpu_ref_put(&ctx->refs); } -static void io_kill_linked_timeout(struct io_kiocb *req) +static inline void io_remove_next_linked(struct io_kiocb *req) { - struct io_ring_ctx *ctx = req->ctx; - struct io_kiocb *link; - bool cancelled = false; - unsigned long flags; + struct io_kiocb *nxt = req->link; - spin_lock_irqsave(&ctx->completion_lock, flags); - link = list_first_entry_or_null(&req->link_list, struct io_kiocb, - link_list); - /* - * Can happen if a linked timeout fired and link had been like - * req -> link t-out -> link t-out [-> ...] - */ - if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) { + req->link = nxt->link; + nxt->link = NULL; +} + +static bool io_kill_linked_timeout(struct io_kiocb *req) + __must_hold(&req->ctx->completion_lock) + __must_hold(&req->ctx->timeout_lock) +{ + struct io_kiocb *link = req->link; + + if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { struct io_timeout_data *io = link->async_data; - int ret; - list_del_init(&link->link_list); - ret = hrtimer_try_to_cancel(&io->timer); - if (ret != -1) { - io_cqring_fill_event(link, -ECANCELED); - io_commit_cqring(ctx); - cancelled = true; + io_remove_next_linked(req); + link->timeout.head = NULL; + if (hrtimer_try_to_cancel(&io->timer) != -1) { + list_del(&link->timeout.list); + io_fill_cqe_req(link, -ECANCELED, 0); + io_put_req_deferred(link); + return true; } } - req->flags &= ~REQ_F_LINK_TIMEOUT; - spin_unlock_irqrestore(&ctx->completion_lock, flags); - - if (cancelled) { - io_cqring_ev_posted(ctx); - io_put_req(link); - } + return false; } -static struct io_kiocb *io_req_link_next(struct io_kiocb *req) -{ - struct io_kiocb *nxt; - - /* - * The list should never be empty when we are called here. But could - * potentially happen if the chain is messed up, check to be on the - * safe side. - */ - if (unlikely(list_empty(&req->link_list))) - return NULL; - - nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list); - list_del_init(&req->link_list); - if (!list_empty(&nxt->link_list)) - nxt->flags |= REQ_F_LINK_HEAD; - return nxt; -} - -/* - * Called if REQ_F_LINK_HEAD is set, and we fail the head request - */ static void io_fail_links(struct io_kiocb *req) + __must_hold(&req->ctx->completion_lock) { - struct io_ring_ctx *ctx = req->ctx; - unsigned long flags; + struct io_kiocb *nxt, *link = req->link; - spin_lock_irqsave(&ctx->completion_lock, flags); - while (!list_empty(&req->link_list)) { - struct io_kiocb *link = list_first_entry(&req->link_list, - struct io_kiocb, link_list); + req->link = NULL; + while (link) { + long res = -ECANCELED; + + if (link->flags & REQ_F_FAIL) + res = link->result; + + nxt = link->link; + link->link = NULL; - list_del_init(&link->link_list); trace_io_uring_fail_link(req, link); - - io_cqring_fill_event(link, -ECANCELED); - - /* - * It's ok to free under spinlock as they're not linked anymore, - * but avoid REQ_F_WORK_INITIALIZED because it may deadlock on - * work.fs->lock. - */ - if (link->flags & REQ_F_WORK_INITIALIZED) - io_put_req_deferred(link, 2); - else - io_double_put_req(link); + io_fill_cqe_req(link, res, 0); + io_put_req_deferred(link); + link = nxt; } +} - io_commit_cqring(ctx); - spin_unlock_irqrestore(&ctx->completion_lock, flags); +static bool io_disarm_next(struct io_kiocb *req) + __must_hold(&req->ctx->completion_lock) +{ + bool posted = false; - io_cqring_ev_posted(ctx); + if (req->flags & REQ_F_ARM_LTIMEOUT) { + struct io_kiocb *link = req->link; + + req->flags &= ~REQ_F_ARM_LTIMEOUT; + if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { + io_remove_next_linked(req); + io_fill_cqe_req(link, -ECANCELED, 0); + io_put_req_deferred(link); + posted = true; + } + } else if (req->flags & REQ_F_LINK_TIMEOUT) { + struct io_ring_ctx *ctx = req->ctx; + + spin_lock_irq(&ctx->timeout_lock); + posted = io_kill_linked_timeout(req); + spin_unlock_irq(&ctx->timeout_lock); + } + if (unlikely((req->flags & REQ_F_FAIL) && + !(req->flags & REQ_F_HARDLINK))) { + posted |= (req->link != NULL); + io_fail_links(req); + } + return posted; } static struct io_kiocb *__io_req_find_next(struct io_kiocb *req) { - req->flags &= ~REQ_F_LINK_HEAD; - if (req->flags & REQ_F_LINK_TIMEOUT) - io_kill_linked_timeout(req); + struct io_kiocb *nxt; /* * If LINK is set, we have dependent requests in this chain. If we @@ -2026,28 +2132,112 @@ static struct io_kiocb *__io_req_find_next(struct io_kiocb *req) * dependencies to the next request. In case of failure, fail the rest * of the chain. */ - if (likely(!(req->flags & REQ_F_FAIL_LINK))) - return io_req_link_next(req); - io_fail_links(req); - return NULL; + if (req->flags & IO_DISARM_MASK) { + struct io_ring_ctx *ctx = req->ctx; + bool posted; + + spin_lock(&ctx->completion_lock); + posted = io_disarm_next(req); + if (posted) + io_commit_cqring(req->ctx); + spin_unlock(&ctx->completion_lock); + if (posted) + io_cqring_ev_posted(ctx); + } + nxt = req->link; + req->link = NULL; + return nxt; } -static struct io_kiocb *io_req_find_next(struct io_kiocb *req) +static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) { - if (likely(!(req->flags & REQ_F_LINK_HEAD))) + if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK)))) return NULL; return __io_req_find_next(req); } -static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok) +static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked) +{ + if (!ctx) + return; + if (*locked) { + if (ctx->submit_state.compl_nr) + io_submit_flush_completions(ctx); + mutex_unlock(&ctx->uring_lock); + *locked = false; + } + percpu_ref_put(&ctx->refs); +} + +static void tctx_task_work(struct callback_head *cb) +{ + bool locked = false; + struct io_ring_ctx *ctx = NULL; + struct io_uring_task *tctx = container_of(cb, struct io_uring_task, + task_work); + + while (1) { + struct io_wq_work_node *node; + + if (!tctx->task_list.first && locked && ctx->submit_state.compl_nr) + io_submit_flush_completions(ctx); + + spin_lock_irq(&tctx->task_lock); + node = tctx->task_list.first; + INIT_WQ_LIST(&tctx->task_list); + if (!node) + tctx->task_running = false; + spin_unlock_irq(&tctx->task_lock); + if (!node) + break; + + do { + struct io_wq_work_node *next = node->next; + struct io_kiocb *req = container_of(node, struct io_kiocb, + io_task_work.node); + + if (req->ctx != ctx) { + ctx_flush_and_put(ctx, &locked); + ctx = req->ctx; + /* if not contended, grab and improve batching */ + locked = mutex_trylock(&ctx->uring_lock); + percpu_ref_get(&ctx->refs); + } + req->io_task_work.func(req, &locked); + node = next; + } while (node); + + cond_resched(); + } + + ctx_flush_and_put(ctx, &locked); + + /* relaxed read is enough as only the task itself sets ->in_idle */ + if (unlikely(atomic_read(&tctx->in_idle))) + io_uring_drop_tctx_refs(current); +} + +static void io_req_task_work_add(struct io_kiocb *req) { struct task_struct *tsk = req->task; - struct io_ring_ctx *ctx = req->ctx; + struct io_uring_task *tctx = tsk->io_uring; enum task_work_notify_mode notify; - int ret; + struct io_wq_work_node *node; + unsigned long flags; + bool running; - if (tsk->flags & PF_EXITING) - return -ESRCH; + WARN_ON_ONCE(!tctx); + + spin_lock_irqsave(&tctx->task_lock, flags); + wq_list_add_tail(&req->io_task_work.node, &tctx->task_list); + running = tctx->task_running; + if (!running) + tctx->task_running = true; + spin_unlock_irqrestore(&tctx->task_lock, flags); + + /* task_work already pending, we're done */ + if (running) + return; /* * SQPOLL kernel thread doesn't need notification, just a wakeup. For @@ -2055,85 +2245,68 @@ static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok) * processing task_work. There's no reliable way to tell if TWA_RESUME * will do the job. */ - notify = TWA_NONE; - if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok) - notify = TWA_SIGNAL; - - ret = task_work_add(tsk, &req->task_work, notify); - if (!ret) + notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL; + if (!task_work_add(tsk, &tctx->task_work, notify)) { wake_up_process(tsk); + return; + } - return ret; + spin_lock_irqsave(&tctx->task_lock, flags); + tctx->task_running = false; + node = tctx->task_list.first; + INIT_WQ_LIST(&tctx->task_list); + spin_unlock_irqrestore(&tctx->task_lock, flags); + + while (node) { + req = container_of(node, struct io_kiocb, io_task_work.node); + node = node->next; + if (llist_add(&req->io_task_work.fallback_node, + &req->ctx->fallback_llist)) + schedule_delayed_work(&req->ctx->fallback_work, 1); + } } -static void __io_req_task_cancel(struct io_kiocb *req, int error) +static void io_req_task_cancel(struct io_kiocb *req, bool *locked) { struct io_ring_ctx *ctx = req->ctx; - spin_lock_irq(&ctx->completion_lock); - io_cqring_fill_event(req, error); - io_commit_cqring(ctx); - spin_unlock_irq(&ctx->completion_lock); - - io_cqring_ev_posted(ctx); - req_set_fail_links(req); - io_double_put_req(req); + /* not needed for normal modes, but SQPOLL depends on it */ + io_tw_lock(ctx, locked); + io_req_complete_failed(req, req->result); } -static void io_req_task_cancel(struct callback_head *cb) -{ - struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); - struct io_ring_ctx *ctx = req->ctx; - - mutex_lock(&ctx->uring_lock); - __io_req_task_cancel(req, -ECANCELED); - mutex_unlock(&ctx->uring_lock); - percpu_ref_put(&ctx->refs); -} - -static void __io_req_task_submit(struct io_kiocb *req) +static void io_req_task_submit(struct io_kiocb *req, bool *locked) { struct io_ring_ctx *ctx = req->ctx; - mutex_lock(&ctx->uring_lock); - if (!ctx->sqo_dead && !__io_sq_thread_acquire_mm(ctx)) - __io_queue_sqe(req, NULL); + io_tw_lock(ctx, locked); + /* req->task == current here, checking PF_EXITING is safe */ + if (likely(!(req->task->flags & PF_EXITING))) + __io_queue_sqe(req); else - __io_req_task_cancel(req, -EFAULT); - mutex_unlock(&ctx->uring_lock); - - if (ctx->flags & IORING_SETUP_SQPOLL) - io_sq_thread_drop_mm(); + io_req_complete_failed(req, -EFAULT); } -static void io_req_task_submit(struct callback_head *cb) +static void io_req_task_queue_fail(struct io_kiocb *req, int ret) { - struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); - struct io_ring_ctx *ctx = req->ctx; - - __io_req_task_submit(req); - percpu_ref_put(&ctx->refs); + req->result = ret; + req->io_task_work.func = io_req_task_cancel; + io_req_task_work_add(req); } static void io_req_task_queue(struct io_kiocb *req) { - int ret; - - init_task_work(&req->task_work, io_req_task_submit); - percpu_ref_get(&req->ctx->refs); - - ret = io_req_task_work_add(req, true); - if (unlikely(ret)) { - struct task_struct *tsk; - - init_task_work(&req->task_work, io_req_task_cancel); - tsk = io_wq_get_task(req->ctx->io_wq); - task_work_add(tsk, &req->task_work, TWA_NONE); - wake_up_process(tsk); - } + req->io_task_work.func = io_req_task_submit; + io_req_task_work_add(req); } -static void io_queue_next(struct io_kiocb *req) +static void io_req_task_queue_reissue(struct io_kiocb *req) +{ + req->io_task_work.func = io_queue_async_work; + io_req_task_work_add(req); +} + +static inline void io_queue_next(struct io_kiocb *req) { struct io_kiocb *nxt = io_req_find_next(req); @@ -2147,153 +2320,118 @@ static void io_free_req(struct io_kiocb *req) __io_free_req(req); } -struct req_batch { - void *reqs[IO_IOPOLL_BATCH]; - int to_free; +static void io_free_req_work(struct io_kiocb *req, bool *locked) +{ + io_free_req(req); +} +struct req_batch { struct task_struct *task; int task_refs; + int ctx_refs; }; static inline void io_init_req_batch(struct req_batch *rb) { - rb->to_free = 0; rb->task_refs = 0; + rb->ctx_refs = 0; rb->task = NULL; } -static void __io_req_free_batch_flush(struct io_ring_ctx *ctx, - struct req_batch *rb) -{ - kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs); - percpu_ref_put_many(&ctx->refs, rb->to_free); - rb->to_free = 0; -} - static void io_req_free_batch_finish(struct io_ring_ctx *ctx, struct req_batch *rb) { - if (rb->to_free) - __io_req_free_batch_flush(ctx, rb); - if (rb->task) { - struct io_uring_task *tctx = rb->task->io_uring; - - percpu_counter_sub(&tctx->inflight, rb->task_refs); - if (atomic_read(&tctx->in_idle)) - wake_up(&tctx->wait); - put_task_struct_many(rb->task, rb->task_refs); - rb->task = NULL; - } + if (rb->ctx_refs) + percpu_ref_put_many(&ctx->refs, rb->ctx_refs); + if (rb->task) + io_put_task(rb->task, rb->task_refs); } -static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) +static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, + struct io_submit_state *state) { - if (unlikely(io_is_fallback_req(req))) { - io_free_req(req); - return; - } - if (req->flags & REQ_F_LINK_HEAD) - io_queue_next(req); + io_queue_next(req); + io_dismantle_req(req); if (req->task != rb->task) { - if (rb->task) { - struct io_uring_task *tctx = rb->task->io_uring; - - percpu_counter_sub(&tctx->inflight, rb->task_refs); - if (atomic_read(&tctx->in_idle)) - wake_up(&tctx->wait); - put_task_struct_many(rb->task, rb->task_refs); - } + if (rb->task) + io_put_task(rb->task, rb->task_refs); rb->task = req->task; rb->task_refs = 0; } rb->task_refs++; + rb->ctx_refs++; - io_dismantle_req(req); - rb->reqs[rb->to_free++] = req; - if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs))) - __io_req_free_batch_flush(req->ctx, rb); + if (state->free_reqs != ARRAY_SIZE(state->reqs)) + state->reqs[state->free_reqs++] = req; + else + list_add(&req->inflight_entry, &state->free_list); +} + +static void io_submit_flush_completions(struct io_ring_ctx *ctx) + __must_hold(&ctx->uring_lock) +{ + struct io_submit_state *state = &ctx->submit_state; + int i, nr = state->compl_nr; + struct req_batch rb; + + spin_lock(&ctx->completion_lock); + for (i = 0; i < nr; i++) { + struct io_kiocb *req = state->compl_reqs[i]; + + __io_fill_cqe(ctx, req->user_data, req->result, + req->compl.cflags); + } + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); + io_cqring_ev_posted(ctx); + + io_init_req_batch(&rb); + for (i = 0; i < nr; i++) { + struct io_kiocb *req = state->compl_reqs[i]; + + if (req_ref_put_and_test(req)) + io_req_free_batch(&rb, req, &ctx->submit_state); + } + + io_req_free_batch_finish(ctx, &rb); + state->compl_nr = 0; } /* * Drop reference to request, return next in chain (if there is one) if this * was the last reference to this request. */ -static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) +static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) { struct io_kiocb *nxt = NULL; - if (refcount_dec_and_test(&req->refs)) { + if (req_ref_put_and_test(req)) { nxt = io_req_find_next(req); __io_free_req(req); } return nxt; } -static void io_put_req(struct io_kiocb *req) +static inline void io_put_req(struct io_kiocb *req) { - if (refcount_dec_and_test(&req->refs)) + if (req_ref_put_and_test(req)) io_free_req(req); } -static void io_put_req_deferred_cb(struct callback_head *cb) +static inline void io_put_req_deferred(struct io_kiocb *req) { - struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); - - io_free_req(req); -} - -static void io_free_req_deferred(struct io_kiocb *req) -{ - int ret; - - init_task_work(&req->task_work, io_put_req_deferred_cb); - ret = io_req_task_work_add(req, true); - if (unlikely(ret)) { - struct task_struct *tsk; - - tsk = io_wq_get_task(req->ctx->io_wq); - task_work_add(tsk, &req->task_work, TWA_NONE); - wake_up_process(tsk); + if (req_ref_put_and_test(req)) { + req->io_task_work.func = io_free_req_work; + io_req_task_work_add(req); } } -static inline void io_put_req_deferred(struct io_kiocb *req, int refs) -{ - if (refcount_sub_and_test(refs, &req->refs)) - io_free_req_deferred(req); -} - -static struct io_wq_work *io_steal_work(struct io_kiocb *req) -{ - struct io_kiocb *nxt; - - /* - * A ref is owned by io-wq in which context we're. So, if that's the - * last one, it's safe to steal next work. False negatives are Ok, - * it just will be re-punted async in io_put_work() - */ - if (refcount_read(&req->refs) != 1) - return NULL; - - nxt = io_req_find_next(req); - return nxt ? &nxt->work : NULL; -} - -static void io_double_put_req(struct io_kiocb *req) -{ - /* drop both submit and complete references */ - if (refcount_sub_and_test(2, &req->refs)) - io_free_req(req); -} - static unsigned io_cqring_events(struct io_ring_ctx *ctx) { - struct io_rings *rings = ctx->rings; - /* See comment at the top of this file */ smp_rmb(); - return ctx->cached_cq_tail - READ_ONCE(rings->cq.head); + return __io_cqring_events(ctx); } static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) @@ -2319,38 +2457,23 @@ static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req) { struct io_buffer *kbuf; + if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) + return 0; kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; return io_put_kbuf(req, kbuf); } static inline bool io_run_task_work(void) { - /* - * Not safe to run on exiting task, and the task_work handling will - * not add work to such a task. - */ - if (unlikely(current->flags & PF_EXITING)) - return false; - if (current->task_works) { + if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) { __set_current_state(TASK_RUNNING); - task_work_run(); + tracehook_notify_signal(); return true; } return false; } -static void io_iopoll_queue(struct list_head *again) -{ - struct io_kiocb *req; - - do { - req = list_first_entry(again, struct io_kiocb, inflight_entry); - list_del(&req->inflight_entry); - __io_complete_rw(req, -EAGAIN, 0, NULL); - } while (!list_empty(again)); -} - /* * Find and free completed poll iocbs */ @@ -2359,41 +2482,25 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, { struct req_batch rb; struct io_kiocb *req; - LIST_HEAD(again); /* order with ->result store in io_complete_rw_iopoll() */ smp_rmb(); io_init_req_batch(&rb); while (!list_empty(done)) { - int cflags = 0; - req = list_first_entry(done, struct io_kiocb, inflight_entry); - if (READ_ONCE(req->result) == -EAGAIN) { - req->result = 0; - req->iopoll_completed = 0; - list_move_tail(&req->inflight_entry, &again); - continue; - } list_del(&req->inflight_entry); - if (req->flags & REQ_F_BUFFER_SELECTED) - cflags = io_put_rw_kbuf(req); - - __io_cqring_fill_event(req, req->result, cflags); + io_fill_cqe_req(req, req->result, io_put_rw_kbuf(req)); (*nr_events)++; - if (refcount_dec_and_test(&req->refs)) - io_req_free_batch(&rb, req); + if (req_ref_put_and_test(req)) + io_req_free_batch(&rb, req, &ctx->submit_state); } io_commit_cqring(ctx); - if (ctx->flags & IORING_SETUP_SQPOLL) - io_cqring_ev_posted(ctx); + io_cqring_ev_posted_iopoll(ctx); io_req_free_batch_finish(ctx, &rb); - - if (!list_empty(&again)) - io_iopoll_queue(&again); } static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, @@ -2402,17 +2509,16 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, struct io_kiocb *req, *tmp; LIST_HEAD(done); bool spin; - int ret; /* * Only spin for completions if we don't have multiple devices hanging * off our complete list, and we're under the requested amount. */ - spin = !ctx->poll_multi_file && *nr_events < min; + spin = !ctx->poll_multi_queue && *nr_events < min; - ret = 0; list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { struct kiocb *kiocb = &req->rw.kiocb; + int ret; /* * Move completed and retryable entries to our local lists. @@ -2427,43 +2533,20 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, break; ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin); - if (ret < 0) - break; + if (unlikely(ret < 0)) + return ret; + else if (ret) + spin = false; /* iopoll may have completed current req */ if (READ_ONCE(req->iopoll_completed)) list_move_tail(&req->inflight_entry, &done); - - if (ret && spin) - spin = false; - ret = 0; } if (!list_empty(&done)) io_iopoll_complete(ctx, nr_events, &done); - return ret; -} - -/* - * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a - * non-spinning poll check - we'll still enter the driver poll loop, but only - * as a non-spinning completion check. - */ -static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events, - long min) -{ - while (!list_empty(&ctx->iopoll_list) && !need_resched()) { - int ret; - - ret = io_do_iopoll(ctx, nr_events, min); - if (ret < 0) - return ret; - if (*nr_events >= min) - return 0; - } - - return 1; + return 0; } /* @@ -2501,7 +2584,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) static int io_iopoll_check(struct io_ring_ctx *ctx, long min) { unsigned int nr_events = 0; - int iters = 0, ret = 0; + int ret = 0; /* * We disallow the app entering submit/complete with polling, but we @@ -2509,17 +2592,16 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) * that got punted to a workqueue. */ mutex_lock(&ctx->uring_lock); + /* + * Don't enter poll loop if we already have events pending. + * If we do, we can potentially be spinning for commands that + * already triggered a CQE (eg in error). + */ + if (test_bit(0, &ctx->check_cq_overflow)) + __io_cqring_overflow_flush(ctx, false); + if (io_cqring_events(ctx)) + goto out; do { - /* - * Don't enter poll loop if we already have events pending. - * If we do, we can potentially be spinning for commands that - * already triggered a CQE (eg in error). - */ - if (test_bit(0, &ctx->cq_check_overflow)) - __io_cqring_overflow_flush(ctx, false, NULL, NULL); - if (io_cqring_events(ctx)) - break; - /* * If a submit got punted to a workqueue, we can have the * application entering polling for a command before it gets @@ -2530,18 +2612,21 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) * forever, while the workqueue is stuck trying to acquire the * very same mutex. */ - if (!(++iters & 7)) { + if (list_empty(&ctx->iopoll_list)) { + u32 tail = ctx->cached_cq_tail; + mutex_unlock(&ctx->uring_lock); io_run_task_work(); mutex_lock(&ctx->uring_lock); + + /* some requests don't go through iopoll_list */ + if (tail != ctx->cached_cq_tail || + list_empty(&ctx->iopoll_list)) + break; } - - ret = io_iopoll_getevents(ctx, &nr_events, min); - if (ret <= 0) - break; - ret = 0; - } while (min && !nr_events && !need_resched()); - + ret = io_do_iopoll(ctx, &nr_events, min); + } while (!ret && nr_events < min && !need_resched()); +out: mutex_unlock(&ctx->uring_lock); return ret; } @@ -2553,118 +2638,129 @@ static void kiocb_end_write(struct io_kiocb *req) * thread. */ if (req->flags & REQ_F_ISREG) { - struct inode *inode = file_inode(req->file); + struct super_block *sb = file_inode(req->file)->i_sb; - __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE); + __sb_writers_acquired(sb, SB_FREEZE_WRITE); + sb_end_write(sb); } - file_end_write(req->file); -} - -static void io_complete_rw_common(struct kiocb *kiocb, long res, - struct io_comp_state *cs) -{ - struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); - int cflags = 0; - - if (kiocb->ki_flags & IOCB_WRITE) - kiocb_end_write(req); - - if (res != req->result) - req_set_fail_links(req); - if (req->flags & REQ_F_BUFFER_SELECTED) - cflags = io_put_rw_kbuf(req); - __io_req_complete(req, res, cflags, cs); } #ifdef CONFIG_BLOCK -static bool io_resubmit_prep(struct io_kiocb *req, int error) +static bool io_resubmit_prep(struct io_kiocb *req) { - struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; - ssize_t ret = -ECANCELED; - struct iov_iter iter; - int rw; + struct io_async_rw *rw = req->async_data; - if (error) { - ret = error; - goto end_req; - } - - switch (req->opcode) { - case IORING_OP_READV: - case IORING_OP_READ_FIXED: - case IORING_OP_READ: - rw = READ; - break; - case IORING_OP_WRITEV: - case IORING_OP_WRITE_FIXED: - case IORING_OP_WRITE: - rw = WRITE; - break; - default: - printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n", - req->opcode); - goto end_req; - } - - if (!req->async_data) { - ret = io_import_iovec(rw, req, &iovec, &iter, false); - if (ret < 0) - goto end_req; - ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false); - if (!ret) - return true; - kfree(iovec); - } else { - return true; - } -end_req: - req_set_fail_links(req); - return false; + if (!rw) + return !io_req_prep_async(req); + iov_iter_restore(&rw->iter, &rw->iter_state); + return true; } -#endif -static bool io_rw_reissue(struct io_kiocb *req, long res) +static bool io_rw_should_reissue(struct io_kiocb *req) { -#ifdef CONFIG_BLOCK umode_t mode = file_inode(req->file)->i_mode; - int ret; + struct io_ring_ctx *ctx = req->ctx; if (!S_ISBLK(mode) && !S_ISREG(mode)) return false; - if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker()) + if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && + !(ctx->flags & IORING_SETUP_IOPOLL))) return false; /* * If ref is dying, we might be running poll reap from the exit work. * Don't attempt to reissue from that path, just let it fail with * -EAGAIN. */ - if (percpu_ref_is_dying(&req->ctx->refs)) + if (percpu_ref_is_dying(&ctx->refs)) return false; - - ret = io_sq_thread_acquire_mm(req->ctx, req); - - if (io_resubmit_prep(req, ret)) { - refcount_inc(&req->refs); - io_queue_async_work(req); - return true; - } - + /* + * Play it safe and assume not safe to re-import and reissue if we're + * not in the original thread group (or in task context). + */ + if (!same_thread_group(req->task, current) || !in_task()) + return false; + return true; +} +#else +static bool io_resubmit_prep(struct io_kiocb *req) +{ + return false; +} +static bool io_rw_should_reissue(struct io_kiocb *req) +{ + return false; +} #endif + +static bool __io_complete_rw_common(struct io_kiocb *req, long res) +{ + if (req->rw.kiocb.ki_flags & IOCB_WRITE) { + kiocb_end_write(req); + fsnotify_modify(req->file); + } else { + fsnotify_access(req->file); + } + if (res != req->result) { + if ((res == -EAGAIN || res == -EOPNOTSUPP) && + io_rw_should_reissue(req)) { + req->flags |= REQ_F_REISSUE; + return true; + } + req_set_fail(req); + req->result = res; + } return false; } -static void __io_complete_rw(struct io_kiocb *req, long res, long res2, - struct io_comp_state *cs) +static inline int io_fixup_rw_res(struct io_kiocb *req, unsigned res) { - if (!io_rw_reissue(req, res)) - io_complete_rw_common(&req->rw.kiocb, res, cs); + struct io_async_rw *io = req->async_data; + + /* add previously done IO, if any */ + if (io && io->bytes_done > 0) { + if (res < 0) + res = io->bytes_done; + else + res += io->bytes_done; + } + return res; +} + +static void io_req_task_complete(struct io_kiocb *req, bool *locked) +{ + unsigned int cflags = io_put_rw_kbuf(req); + int res = req->result; + + if (*locked) { + struct io_ring_ctx *ctx = req->ctx; + struct io_submit_state *state = &ctx->submit_state; + + io_req_complete_state(req, res, cflags); + state->compl_reqs[state->compl_nr++] = req; + if (state->compl_nr == ARRAY_SIZE(state->compl_reqs)) + io_submit_flush_completions(ctx); + } else { + io_req_complete_post(req, res, cflags); + } +} + +static void __io_complete_rw(struct io_kiocb *req, long res, long res2, + unsigned int issue_flags) +{ + if (__io_complete_rw_common(req, res)) + return; + __io_req_complete(req, issue_flags, io_fixup_rw_res(req, res), io_put_rw_kbuf(req)); } static void io_complete_rw(struct kiocb *kiocb, long res, long res2) { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); - __io_complete_rw(req, res, res2, NULL); + if (__io_complete_rw_common(req, res)) + return; + req->result = io_fixup_rw_res(req, res); + req->io_task_work.func = io_req_task_complete; + io_req_task_work_add(req); } static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) @@ -2673,12 +2769,15 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) if (kiocb->ki_flags & IOCB_WRITE) kiocb_end_write(req); - - if (res != -EAGAIN && res != req->result) - req_set_fail_links(req); + if (unlikely(res != req->result)) { + if (res == -EAGAIN && io_rw_should_reissue(req)) { + req->flags |= REQ_F_REISSUE; + return; + } + } WRITE_ONCE(req->result, res); - /* order with io_poll_complete() checking ->result */ + /* order with io_iopoll_complete() checking ->result */ smp_wmb(); WRITE_ONCE(req->iopoll_completed, 1); } @@ -2686,12 +2785,17 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) /* * After the iocb has been issued, it's safe to be found on the poll list. * Adding the kiocb to the list AFTER submission ensures that we don't - * find it from a io_iopoll_getevents() thread before the issuer is done + * find it from a io_do_iopoll() thread before the issuer is done * accessing the kiocb cookie. */ static void io_iopoll_req_issued(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; + const bool in_async = io_wq_current_is_worker(); + + /* workqueue context doesn't hold uring_lock, grab it now */ + if (unlikely(in_async)) + mutex_lock(&ctx->uring_lock); /* * Track whether we have multiple files in our lists. This will impact @@ -2699,14 +2803,22 @@ static void io_iopoll_req_issued(struct io_kiocb *req) * different devices. */ if (list_empty(&ctx->iopoll_list)) { - ctx->poll_multi_file = false; - } else if (!ctx->poll_multi_file) { + ctx->poll_multi_queue = false; + } else if (!ctx->poll_multi_queue) { struct io_kiocb *list_req; + unsigned int queue_num0, queue_num1; list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb, inflight_entry); - if (list_req->file != req->file) - ctx->poll_multi_file = true; + + if (list_req->file != req->file) { + ctx->poll_multi_queue = true; + } else { + queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie); + queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie); + if (queue_num0 != queue_num1) + ctx->poll_multi_queue = true; + } } /* @@ -2718,57 +2830,24 @@ static void io_iopoll_req_issued(struct io_kiocb *req) else list_add_tail(&req->inflight_entry, &ctx->iopoll_list); - if ((ctx->flags & IORING_SETUP_SQPOLL) && - wq_has_sleeper(&ctx->sq_data->wait)) - wake_up(&ctx->sq_data->wait); -} + if (unlikely(in_async)) { + /* + * If IORING_SETUP_SQPOLL is enabled, sqes are either handle + * in sq thread task context or in io worker task context. If + * current task context is sq thread, we don't need to check + * whether should wake up sq thread. + */ + if ((ctx->flags & IORING_SETUP_SQPOLL) && + wq_has_sleeper(&ctx->sq_data->wait)) + wake_up(&ctx->sq_data->wait); -static void __io_state_file_put(struct io_submit_state *state) -{ - if (state->has_refs) - fput_many(state->file, state->has_refs); - state->file = NULL; -} - -static inline void io_state_file_put(struct io_submit_state *state) -{ - if (state->file) - __io_state_file_put(state); -} - -/* - * Get as many references to a file as we have IOs left in this submission, - * assuming most submissions are for one file, or at least that each file - * has more than one submission. - */ -static struct file *__io_file_get(struct io_submit_state *state, int fd) -{ - if (!state) - return fget(fd); - - if (state->file) { - if (state->fd == fd) { - state->has_refs--; - return state->file; - } - __io_state_file_put(state); + mutex_unlock(&ctx->uring_lock); } - state->file = fget_many(fd, state->ios_left); - if (!state->file) - return NULL; - - state->fd = fd; - state->has_refs = state->ios_left - 1; - return state->file; } static bool io_bdev_nowait(struct block_device *bdev) { -#ifdef CONFIG_BLOCK return !bdev || blk_queue_nowait(bdev_get_queue(bdev)); -#else - return true; -#endif } /* @@ -2776,19 +2855,21 @@ static bool io_bdev_nowait(struct block_device *bdev) * any file. For now, just ensure that anything potentially problematic is done * inline. */ -static bool io_file_supports_async(struct file *file, int rw) +static bool __io_file_supports_nowait(struct file *file, int rw) { umode_t mode = file_inode(file)->i_mode; if (S_ISBLK(mode)) { - if (io_bdev_nowait(file->f_inode->i_bdev)) + if (IS_ENABLED(CONFIG_BLOCK) && + io_bdev_nowait(I_BDEV(file->f_mapping->host))) return true; return false; } if (S_ISSOCK(mode)) return true; if (S_ISREG(mode)) { - if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) && + if (IS_ENABLED(CONFIG_BLOCK) && + io_bdev_nowait(file->f_inode->i_sb->s_bdev) && file->f_op != &io_uring_fops) return true; return false; @@ -2807,20 +2888,36 @@ static bool io_file_supports_async(struct file *file, int rw) return file->f_op->write_iter != NULL; } -static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static bool io_file_supports_nowait(struct io_kiocb *req, int rw) +{ + if (rw == READ && (req->flags & REQ_F_NOWAIT_READ)) + return true; + else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE)) + return true; + + return __io_file_supports_nowait(req->file, rw); +} + +static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, + int rw) { struct io_ring_ctx *ctx = req->ctx; struct kiocb *kiocb = &req->rw.kiocb; + struct file *file = req->file; unsigned ioprio; int ret; - if (S_ISREG(file_inode(req->file)->i_mode)) + if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode)) req->flags |= REQ_F_ISREG; kiocb->ki_pos = READ_ONCE(sqe->off); - if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) { - req->flags |= REQ_F_CUR_POS; - kiocb->ki_pos = req->file->f_pos; + if (kiocb->ki_pos == -1) { + if (!(file->f_mode & FMODE_STREAM)) { + req->flags |= REQ_F_CUR_POS; + kiocb->ki_pos = file->f_pos; + } else { + kiocb->ki_pos = 0; + } } kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp)); kiocb->ki_flags = iocb_flags(kiocb->ki_filp); @@ -2828,6 +2925,15 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (unlikely(ret)) return ret; + /* + * If the file is marked O_NONBLOCK, still allow retry for it if it + * supports async. Otherwise it's impossible to use O_NONBLOCK files + * reliably. If not, or it IOCB_NOWAIT is set, don't retry. + */ + if ((kiocb->ki_flags & IOCB_NOWAIT) || + ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw))) + req->flags |= REQ_F_NOWAIT; + ioprio = READ_ONCE(sqe->ioprio); if (ioprio) { ret = ioprio_check_cap(ioprio); @@ -2838,10 +2944,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) } else kiocb->ki_ioprio = get_current_ioprio(); - /* don't allow async punt if RWF_NOWAIT was requested */ - if (kiocb->ki_flags & IOCB_NOWAIT) - req->flags |= REQ_F_NOWAIT; - if (ctx->flags & IORING_SETUP_IOPOLL) { if (!(kiocb->ki_flags & IOCB_DIRECT) || !kiocb->ki_filp->f_op->iopoll) @@ -2856,9 +2958,24 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) kiocb->ki_complete = io_complete_rw; } + /* used for fixed read/write too - just read unconditionally */ + req->buf_index = READ_ONCE(sqe->buf_index); + req->imu = NULL; + + if (req->opcode == IORING_OP_READ_FIXED || + req->opcode == IORING_OP_WRITE_FIXED) { + struct io_ring_ctx *ctx = req->ctx; + u16 index; + + if (unlikely(req->buf_index >= ctx->nr_user_bufs)) + return -EFAULT; + index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); + req->imu = ctx->user_bufs[index]; + io_req_set_rsrc_node(req); + } + req->rw.addr = READ_ONCE(sqe->addr); req->rw.len = READ_ONCE(sqe->len); - req->buf_index = READ_ONCE(sqe->buf_index); return 0; } @@ -2884,48 +3001,49 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) } static void kiocb_done(struct kiocb *kiocb, ssize_t ret, - struct io_comp_state *cs) + unsigned int issue_flags) { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); - struct io_async_rw *io = req->async_data; - - /* add previously done IO, if any */ - if (io && io->bytes_done > 0) { - if (ret < 0) - ret = io->bytes_done; - else - ret += io->bytes_done; - } if (req->flags & REQ_F_CUR_POS) req->file->f_pos = kiocb->ki_pos; - if (ret >= 0 && kiocb->ki_complete == io_complete_rw) - __io_complete_rw(req, ret, 0, cs); + if (ret >= 0 && (kiocb->ki_complete == io_complete_rw)) + __io_complete_rw(req, ret, 0, issue_flags); else io_rw_done(kiocb, ret); + + if (req->flags & REQ_F_REISSUE) { + req->flags &= ~REQ_F_REISSUE; + if (io_resubmit_prep(req)) { + io_req_task_queue_reissue(req); + } else { + unsigned int cflags = io_put_rw_kbuf(req); + struct io_ring_ctx *ctx = req->ctx; + + ret = io_fixup_rw_res(req, ret); + req_set_fail(req); + if (!(issue_flags & IO_URING_F_NONBLOCK)) { + mutex_lock(&ctx->uring_lock); + __io_req_complete(req, issue_flags, ret, cflags); + mutex_unlock(&ctx->uring_lock); + } else { + __io_req_complete(req, issue_flags, ret, cflags); + } + } + } } -static ssize_t io_import_fixed(struct io_kiocb *req, int rw, - struct iov_iter *iter) +static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter, + struct io_mapped_ubuf *imu) { - struct io_ring_ctx *ctx = req->ctx; size_t len = req->rw.len; - struct io_mapped_ubuf *imu; - u16 index, buf_index = req->buf_index; + u64 buf_end, buf_addr = req->rw.addr; size_t offset; - u64 buf_addr; - if (unlikely(buf_index >= ctx->nr_user_bufs)) - return -EFAULT; - index = array_index_nospec(buf_index, ctx->nr_user_bufs); - imu = &ctx->user_bufs[index]; - buf_addr = req->rw.addr; - - /* overflow */ - if (buf_addr + len < buf_addr) + if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end))) return -EFAULT; /* not inside the mapped region */ - if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len) + if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end)) return -EFAULT; /* @@ -2970,7 +3088,14 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw, } } - return len; + return 0; +} + +static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter) +{ + if (WARN_ON_ONCE(!req->imu)) + return -EFAULT; + return __io_import_fixed(req, rw, iter, req->imu); } static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock) @@ -3111,16 +3236,14 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, return __io_iov_buffer_select(req, iov, needs_lock); } -static ssize_t __io_import_iovec(int rw, struct io_kiocb *req, - struct iovec **iovec, struct iov_iter *iter, - bool needs_lock) +static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, + struct iov_iter *iter, bool needs_lock) { void __user *buf = u64_to_user_ptr(req->rw.addr); size_t sqe_len = req->rw.len; + u8 opcode = req->opcode; ssize_t ret; - u8 opcode; - opcode = req->opcode; if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { *iovec = NULL; return io_import_fixed(req, rw, iter); @@ -3145,10 +3268,8 @@ static ssize_t __io_import_iovec(int rw, struct io_kiocb *req, if (req->flags & REQ_F_BUFFER_SELECT) { ret = io_iov_buffer_select(req, *iovec, needs_lock); - if (!ret) { - ret = (*iovec)->iov_len; - iov_iter_init(iter, rw, *iovec, 1, ret); - } + if (!ret) + iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len); *iovec = NULL; return ret; } @@ -3157,18 +3278,6 @@ static ssize_t __io_import_iovec(int rw, struct io_kiocb *req, req->ctx->compat); } -static ssize_t io_import_iovec(int rw, struct io_kiocb *req, - struct iovec **iovec, struct iov_iter *iter, - bool needs_lock) -{ - struct io_async_rw *iorw = req->async_data; - - if (!iorw) - return __io_import_iovec(rw, req, iovec, iter, needs_lock); - *iovec = NULL; - return 0; -} - static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) { return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; @@ -3261,32 +3370,31 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, } } -static inline int __io_alloc_async_data(struct io_kiocb *req) +static inline int io_alloc_async_data(struct io_kiocb *req) { WARN_ON_ONCE(!io_op_defs[req->opcode].async_size); req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL); return req->async_data == NULL; } -static int io_alloc_async_data(struct io_kiocb *req) -{ - if (!io_op_defs[req->opcode].needs_async_data) - return 0; - - return __io_alloc_async_data(req); -} - static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, const struct iovec *fast_iov, struct iov_iter *iter, bool force) { - if (!force && !io_op_defs[req->opcode].needs_async_data) + if (!force && !io_op_defs[req->opcode].needs_async_setup) return 0; if (!req->async_data) { - if (__io_alloc_async_data(req)) + struct io_async_rw *iorw; + + if (io_alloc_async_data(req)) { + kfree(iovec); return -ENOMEM; + } io_req_map_rw(req, iovec, fast_iov, iter); + iorw = req->async_data; + /* we've copied and mapped the iter, ensure state is saved */ + iov_iter_save_state(&iorw->iter, &iorw->iter_state); } return 0; } @@ -3295,9 +3403,9 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw) { struct io_async_rw *iorw = req->async_data; struct iovec *iov = iorw->fast_iov; - ssize_t ret; + int ret; - ret = __io_import_iovec(rw, req, &iov, &iorw->iter, false); + ret = io_import_iovec(rw, req, &iov, &iorw->iter, false); if (unlikely(ret < 0)) return ret; @@ -3305,24 +3413,15 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw) iorw->free_iovec = iov; if (iov) req->flags |= REQ_F_NEED_CLEANUP; + iov_iter_save_state(&iorw->iter, &iorw->iter_state); return 0; } static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - ssize_t ret; - - ret = io_prep_rw(req, sqe); - if (ret) - return ret; - if (unlikely(!(req->file->f_mode & FMODE_READ))) return -EBADF; - - /* either don't need iovec imported or already have it */ - if (!req->async_data) - return 0; - return io_rw_prep_async(req, READ); + return io_prep_rw(req, sqe, READ); } /* @@ -3341,7 +3440,6 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, struct wait_page_queue *wpq; struct io_kiocb *req = wait->private; struct wait_page_key *key = arg; - int ret; wpq = container_of(wait, struct wait_page_queue, wait); @@ -3350,22 +3448,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, req->rw.kiocb.ki_flags &= ~IOCB_WAITQ; list_del_init(&wait->entry); - - init_task_work(&req->task_work, io_req_task_submit); - percpu_ref_get(&req->ctx->refs); - - /* submit ref gets dropped, acquire a new one */ - refcount_inc(&req->refs); - ret = io_req_task_work_add(req, true); - if (unlikely(ret)) { - struct task_struct *tsk; - - /* queue just for cancelation */ - init_task_work(&req->task_work, io_req_task_cancel); - tsk = io_wq_get_task(req->ctx->io_wq); - task_work_add(tsk, &req->task_work, TWA_NONE); - wake_up_process(tsk); - } + io_req_task_queue(req); return 1; } @@ -3412,7 +3495,7 @@ static bool io_rw_should_retry(struct io_kiocb *req) return true; } -static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) +static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) { if (req->file->f_op->read_iter) return call_read_iter(req->file, &req->rw.kiocb, iter); @@ -3422,25 +3505,40 @@ static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) return -EINVAL; } -static int io_read(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static bool need_read_all(struct io_kiocb *req) +{ + return req->flags & REQ_F_ISREG || + S_ISBLK(file_inode(req->file)->i_mode); +} + +static int io_read(struct io_kiocb *req, unsigned int issue_flags) { struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter __iter, *iter = &__iter; struct io_async_rw *rw = req->async_data; - ssize_t io_size, ret, ret2; - bool no_async; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + struct iov_iter_state __state, *state; + ssize_t ret, ret2; - if (rw) + if (rw) { iter = &rw->iter; - - ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); - if (ret < 0) - return ret; - io_size = iov_iter_count(iter); - req->result = io_size; - ret = 0; + state = &rw->iter_state; + /* + * We come here from an earlier attempt, restore our state to + * match in case it doesn't. It's cheap enough that we don't + * need to make this conditional. + */ + iov_iter_restore(iter, state); + iovec = NULL; + } else { + ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); + if (ret < 0) + return ret; + state = &__state; + iov_iter_save_state(iter, state); + } + req->result = iov_iter_count(iter); /* Ensure we clear previously set non-block flag */ if (!force_nonblock) @@ -3448,125 +3546,130 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, else kiocb->ki_flags |= IOCB_NOWAIT; - /* If the file doesn't support async, just async punt */ - no_async = force_nonblock && !io_file_supports_async(req->file, READ); - if (no_async) - goto copy_iov; + if (force_nonblock && !io_file_supports_nowait(req, READ)) { + ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true); + return ret ?: -EAGAIN; + } - ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size); - if (unlikely(ret)) - goto out_free; + ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result); + if (unlikely(ret)) { + kfree(iovec); + return ret; + } ret = io_iter_do_read(req, iter); - if (!ret) { - goto done; - } else if (ret == -EIOCBQUEUED) { - ret = 0; - goto out_free; - } else if (ret == -EAGAIN) { + if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { + req->flags &= ~REQ_F_REISSUE; /* IOPOLL retry should happen for io-wq threads */ if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) goto done; - /* no retry on NONBLOCK marked file */ - if (req->file->f_flags & O_NONBLOCK) + /* no retry on NONBLOCK nor RWF_NOWAIT */ + if (req->flags & REQ_F_NOWAIT) goto done; - /* some cases will consume bytes even on error returns */ - iov_iter_revert(iter, io_size - iov_iter_count(iter)); ret = 0; - goto copy_iov; - } else if (ret < 0) { - /* make sure -ERESTARTSYS -> -EINTR is done */ - goto done; - } - - /* read it all, or we did blocking attempt. no retry. */ - if (!iov_iter_count(iter) || !force_nonblock || - (req->file->f_flags & O_NONBLOCK) || !(req->flags & REQ_F_ISREG)) - goto done; - - io_size -= ret; -copy_iov: - ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true); - if (ret2) { - ret = ret2; + } else if (ret == -EIOCBQUEUED) { goto out_free; - } - if (no_async) - return -EAGAIN; - rw = req->async_data; - /* it's copied and will be cleaned with ->io */ - iovec = NULL; - /* now use our persistent iterator, if we aren't already */ - iter = &rw->iter; -retry: - rw->bytes_done += ret; - /* if we can retry, do so with the callbacks armed */ - if (!io_rw_should_retry(req)) { - kiocb->ki_flags &= ~IOCB_WAITQ; - return -EAGAIN; + } else if (ret <= 0 || ret == req->result || !force_nonblock || + (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) { + /* read all, failed, already did sync or don't want to retry */ + goto done; } /* - * Now retry read with the IOCB_WAITQ parts set in the iocb. If we - * get -EIOCBQUEUED, then we'll get a notification when the desired - * page gets unlocked. We can also get a partial read here, and if we - * do, then just retry at the new offset. + * Don't depend on the iter state matching what was consumed, or being + * untouched in case of error. Restore it and we'll advance it + * manually if we need to. */ - ret = io_iter_do_read(req, iter); - if (ret == -EIOCBQUEUED) { - ret = 0; - goto out_free; - } else if (ret > 0 && ret < io_size) { + iov_iter_restore(iter, state); + + ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true); + if (ret2) + return ret2; + + iovec = NULL; + rw = req->async_data; + /* + * Now use our persistent iterator and state, if we aren't already. + * We've restored and mapped the iter to match. + */ + if (iter != &rw->iter) { + iter = &rw->iter; + state = &rw->iter_state; + } + + do { + /* + * We end up here because of a partial read, either from + * above or inside this loop. Advance the iter by the bytes + * that were consumed. + */ + iov_iter_advance(iter, ret); + if (!iov_iter_count(iter)) + break; + rw->bytes_done += ret; + iov_iter_save_state(iter, state); + + /* if we can retry, do so with the callbacks armed */ + if (!io_rw_should_retry(req)) { + kiocb->ki_flags &= ~IOCB_WAITQ; + return -EAGAIN; + } + + req->result = iov_iter_count(iter); + /* + * Now retry read with the IOCB_WAITQ parts set in the iocb. If + * we get -EIOCBQUEUED, then we'll get a notification when the + * desired page gets unlocked. We can also get a partial read + * here, and if we do, then just retry at the new offset. + */ + ret = io_iter_do_read(req, iter); + if (ret == -EIOCBQUEUED) + return 0; /* we got some bytes, but not all. retry. */ kiocb->ki_flags &= ~IOCB_WAITQ; - goto retry; - } + iov_iter_restore(iter, state); + } while (ret > 0); done: - kiocb_done(kiocb, ret, cs); - ret = 0; + kiocb_done(kiocb, ret, issue_flags); out_free: - /* it's reportedly faster than delegating the null check to kfree() */ + /* it's faster to check here then delegate to kfree */ if (iovec) kfree(iovec); - return ret; + return 0; } static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - ssize_t ret; - - ret = io_prep_rw(req, sqe); - if (ret) - return ret; - if (unlikely(!(req->file->f_mode & FMODE_WRITE))) return -EBADF; - - /* either don't need iovec imported or already have it */ - if (!req->async_data) - return 0; - return io_rw_prep_async(req, WRITE); + return io_prep_rw(req, sqe, WRITE); } -static int io_write(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_write(struct io_kiocb *req, unsigned int issue_flags) { struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter __iter, *iter = &__iter; struct io_async_rw *rw = req->async_data; - ssize_t ret, ret2, io_size; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + struct iov_iter_state __state, *state; + ssize_t ret, ret2; - if (rw) + if (rw) { iter = &rw->iter; - - ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); - if (ret < 0) - return ret; - io_size = iov_iter_count(iter); - req->result = io_size; + state = &rw->iter_state; + iov_iter_restore(iter, state); + iovec = NULL; + } else { + ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); + if (ret < 0) + return ret; + state = &__state; + iov_iter_save_state(iter, state); + } + req->result = iov_iter_count(iter); /* Ensure we clear previously set non-block flag */ if (!force_nonblock) @@ -3575,7 +3678,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, kiocb->ki_flags |= IOCB_NOWAIT; /* If the file doesn't support async, just async punt */ - if (force_nonblock && !io_file_supports_async(req->file, WRITE)) + if (force_nonblock && !io_file_supports_nowait(req, WRITE)) goto copy_iov; /* file path doesn't support NOWAIT for non-direct_IO */ @@ -3583,7 +3686,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, (req->flags & REQ_F_ISREG)) goto copy_iov; - ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size); + ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result); if (unlikely(ret)) goto out_free; @@ -3608,28 +3711,36 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, else ret2 = -EINVAL; + if (req->flags & REQ_F_REISSUE) { + req->flags &= ~REQ_F_REISSUE; + ret2 = -EAGAIN; + } + /* * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just * retry them without IOCB_NOWAIT. */ if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) ret2 = -EAGAIN; - /* no retry on NONBLOCK marked file */ - if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK)) + /* no retry on NONBLOCK nor RWF_NOWAIT */ + if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) goto done; if (!force_nonblock || ret2 != -EAGAIN) { /* IOPOLL retry should happen for io-wq threads */ if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) goto copy_iov; done: - kiocb_done(kiocb, ret2, cs); + kiocb_done(kiocb, ret2, issue_flags); } else { copy_iov: - /* some cases will consume bytes even on error returns */ - iov_iter_revert(iter, io_size - iov_iter_count(iter)); + iov_iter_restore(iter, state); ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); - if (!ret) + if (!ret) { + if (kiocb->ki_flags & IOCB_WRITE) + kiocb_end_write(req); return -EAGAIN; + } + return ret; } out_free: /* it's reportedly faster than delegating the null check to kfree() */ @@ -3638,37 +3749,160 @@ out_free: return ret; } +static int io_renameat_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + struct io_rename *ren = &req->rename; + const char __user *oldf, *newf; + + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in) + return -EINVAL; + if (unlikely(req->flags & REQ_F_FIXED_FILE)) + return -EBADF; + + ren->old_dfd = READ_ONCE(sqe->fd); + oldf = u64_to_user_ptr(READ_ONCE(sqe->addr)); + newf = u64_to_user_ptr(READ_ONCE(sqe->addr2)); + ren->new_dfd = READ_ONCE(sqe->len); + ren->flags = READ_ONCE(sqe->rename_flags); + + ren->oldpath = getname(oldf); + if (IS_ERR(ren->oldpath)) + return PTR_ERR(ren->oldpath); + + ren->newpath = getname(newf); + if (IS_ERR(ren->newpath)) { + putname(ren->oldpath); + return PTR_ERR(ren->newpath); + } + + req->flags |= REQ_F_NEED_CLEANUP; + return 0; +} + +static int io_renameat(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_rename *ren = &req->rename; + int ret; + + if (issue_flags & IO_URING_F_NONBLOCK) + return -EAGAIN; + + ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd, + ren->newpath, ren->flags); + + req->flags &= ~REQ_F_NEED_CLEANUP; + if (ret < 0) + req_set_fail(req); + io_req_complete(req, ret); + return 0; +} + +static int io_unlinkat_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + struct io_unlink *un = &req->unlink; + const char __user *fname; + + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index || + sqe->splice_fd_in) + return -EINVAL; + if (unlikely(req->flags & REQ_F_FIXED_FILE)) + return -EBADF; + + un->dfd = READ_ONCE(sqe->fd); + + un->flags = READ_ONCE(sqe->unlink_flags); + if (un->flags & ~AT_REMOVEDIR) + return -EINVAL; + + fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); + un->filename = getname(fname); + if (IS_ERR(un->filename)) + return PTR_ERR(un->filename); + + req->flags |= REQ_F_NEED_CLEANUP; + return 0; +} + +static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_unlink *un = &req->unlink; + int ret; + + if (issue_flags & IO_URING_F_NONBLOCK) + return -EAGAIN; + + if (un->flags & AT_REMOVEDIR) + ret = do_rmdir(un->dfd, un->filename); + else + ret = do_unlinkat(un->dfd, un->filename); + + req->flags &= ~REQ_F_NEED_CLEANUP; + if (ret < 0) + req_set_fail(req); + io_req_complete(req, ret); + return 0; +} + +static int io_shutdown_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ +#if defined(CONFIG_NET) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags || + sqe->buf_index || sqe->splice_fd_in)) + return -EINVAL; + + req->shutdown.how = READ_ONCE(sqe->len); + return 0; +#else + return -EOPNOTSUPP; +#endif +} + +static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) +{ +#if defined(CONFIG_NET) + struct socket *sock; + int ret; + + if (issue_flags & IO_URING_F_NONBLOCK) + return -EAGAIN; + + sock = sock_from_file(req->file, &ret); + if (unlikely(!sock)) + return ret; + + ret = __sys_shutdown_sock(sock, req->shutdown.how); + if (ret < 0) + req_set_fail(req); + io_req_complete(req, ret); + return 0; +#else + return -EOPNOTSUPP; +#endif +} + static int __io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - struct io_splice* sp = &req->splice; + struct io_splice *sp = &req->splice; unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL; if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - sp->file_in = NULL; sp->len = READ_ONCE(sqe->len); sp->flags = READ_ONCE(sqe->splice_flags); - if (unlikely(sp->flags & ~valid_flags)) return -EINVAL; - - sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in), - (sp->flags & SPLICE_F_FD_IN_FIXED)); - if (!sp->file_in) - return -EBADF; - req->flags |= REQ_F_NEED_CLEANUP; - - if (!S_ISREG(file_inode(sp->file_in)->i_mode)) { - /* - * Splice operation will be punted aync, and here need to - * modify io_wq_work.flags, so initialize io_wq_work firstly. - */ - io_req_init_async(req); - req->work.flags |= IO_WQ_WORK_UNBOUND; - } - + sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in); return 0; } @@ -3680,60 +3914,75 @@ static int io_tee_prep(struct io_kiocb *req, return __io_splice_prep(req, sqe); } -static int io_tee(struct io_kiocb *req, bool force_nonblock) +static int io_tee(struct io_kiocb *req, unsigned int issue_flags) { struct io_splice *sp = &req->splice; - struct file *in = sp->file_in; struct file *out = sp->file_out; unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED; + struct file *in; long ret = 0; - if (force_nonblock) + if (issue_flags & IO_URING_F_NONBLOCK) return -EAGAIN; + + in = io_file_get(req->ctx, req, sp->splice_fd_in, + (sp->flags & SPLICE_F_FD_IN_FIXED)); + if (!in) { + ret = -EBADF; + goto done; + } + if (sp->len) ret = do_tee(in, out, sp->len, flags); - io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED)); - req->flags &= ~REQ_F_NEED_CLEANUP; - + if (!(sp->flags & SPLICE_F_FD_IN_FIXED)) + io_put_file(in); +done: if (ret != sp->len) - req_set_fail_links(req); + req_set_fail(req); io_req_complete(req, ret); return 0; } static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - struct io_splice* sp = &req->splice; + struct io_splice *sp = &req->splice; sp->off_in = READ_ONCE(sqe->splice_off_in); sp->off_out = READ_ONCE(sqe->off); return __io_splice_prep(req, sqe); } -static int io_splice(struct io_kiocb *req, bool force_nonblock) +static int io_splice(struct io_kiocb *req, unsigned int issue_flags) { struct io_splice *sp = &req->splice; - struct file *in = sp->file_in; struct file *out = sp->file_out; unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED; loff_t *poff_in, *poff_out; + struct file *in; long ret = 0; - if (force_nonblock) + if (issue_flags & IO_URING_F_NONBLOCK) return -EAGAIN; + in = io_file_get(req->ctx, req, sp->splice_fd_in, + (sp->flags & SPLICE_F_FD_IN_FIXED)); + if (!in) { + ret = -EBADF; + goto done; + } + poff_in = (sp->off_in == -1) ? NULL : &sp->off_in; poff_out = (sp->off_out == -1) ? NULL : &sp->off_out; if (sp->len) ret = do_splice(in, poff_in, out, poff_out, sp->len, flags); - io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED)); - req->flags &= ~REQ_F_NEED_CLEANUP; - + if (!(sp->flags & SPLICE_F_FD_IN_FIXED)) + io_put_file(in); +done: if (ret != sp->len) - req_set_fail_links(req); + req_set_fail(req); io_req_complete(req, ret); return 0; } @@ -3741,24 +3990,21 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock) /* * IORING_OP_NOP just posts a completion event, nothing else. */ -static int io_nop(struct io_kiocb *req, struct io_comp_state *cs) +static int io_nop(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - __io_req_complete(req, 0, 0, cs); + __io_req_complete(req, issue_flags, 0, 0); return 0; } -static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_ring_ctx *ctx = req->ctx; - if (!req->file) - return -EBADF; - if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index || @@ -3774,20 +4020,20 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } -static int io_fsync(struct io_kiocb *req, bool force_nonblock) +static int io_fsync(struct io_kiocb *req, unsigned int issue_flags) { loff_t end = req->sync.off + req->sync.len; int ret; /* fsync always requires a blocking context */ - if (force_nonblock) + if (issue_flags & IO_URING_F_NONBLOCK) return -EAGAIN; ret = vfs_fsync_range(req->file, req->sync.off, end > 0 ? end : LLONG_MAX, req->sync.flags & IORING_FSYNC_DATASYNC); if (ret < 0) - req_set_fail_links(req); + req_set_fail(req); io_req_complete(req, ret); return 0; } @@ -3807,17 +4053,19 @@ static int io_fallocate_prep(struct io_kiocb *req, return 0; } -static int io_fallocate(struct io_kiocb *req, bool force_nonblock) +static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) { int ret; /* fallocate always requiring blocking context */ - if (force_nonblock) + if (issue_flags & IO_URING_F_NONBLOCK) return -EAGAIN; ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off, req->sync.len); if (ret < 0) - req_set_fail_links(req); + req_set_fail(req); + else + fsnotify_modify(req->file); io_req_complete(req, ret); return 0; } @@ -3827,7 +4075,9 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe const char __user *fname; int ret; - if (unlikely(sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (unlikely(sqe->ioprio || sqe->buf_index)) return -EINVAL; if (unlikely(req->flags & REQ_F_FIXED_FILE)) return -EBADF; @@ -3844,20 +4094,21 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe req->open.filename = NULL; return ret; } + + req->open.file_slot = READ_ONCE(sqe->file_index); + if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC)) + return -EINVAL; + req->open.nofile = rlimit(RLIMIT_NOFILE); - req->open.ignore_nonblock = false; req->flags |= REQ_F_NEED_CLEANUP; return 0; } static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - u64 flags, mode; + u64 mode = READ_ONCE(sqe->len); + u64 flags = READ_ONCE(sqe->open_flags); - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) - return -EINVAL; - mode = READ_ONCE(sqe->len); - flags = READ_ONCE(sqe->open_flags); req->open.how = build_open_how(flags, mode); return __io_openat_prep(req, sqe); } @@ -3868,8 +4119,6 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) size_t len; int ret; - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) - return -EINVAL; how = u64_to_user_ptr(READ_ONCE(sqe->addr2)); len = READ_ONCE(sqe->len); if (len < OPEN_HOW_SIZE_VER0) @@ -3883,58 +4132,75 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return __io_openat_prep(req, sqe); } -static int io_openat2(struct io_kiocb *req, bool force_nonblock) +static int io_openat2(struct io_kiocb *req, unsigned int issue_flags) { struct open_flags op; struct file *file; + bool resolve_nonblock, nonblock_set; + bool fixed = !!req->open.file_slot; int ret; - if (force_nonblock && !req->open.ignore_nonblock) - return -EAGAIN; - ret = build_open_flags(&req->open.how, &op); if (ret) goto err; + nonblock_set = op.open_flag & O_NONBLOCK; + resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED; + if (issue_flags & IO_URING_F_NONBLOCK) { + /* + * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open, + * it'll always -EAGAIN + */ + if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE)) + return -EAGAIN; + op.lookup_flags |= LOOKUP_CACHED; + op.open_flag |= O_NONBLOCK; + } - ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile); - if (ret < 0) - goto err; + if (!fixed) { + ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile); + if (ret < 0) + goto err; + } file = do_filp_open(req->open.dfd, req->open.filename, &op); if (IS_ERR(file)) { - put_unused_fd(ret); - ret = PTR_ERR(file); /* - * A work-around to ensure that /proc/self works that way - * that it should - if we get -EOPNOTSUPP back, then assume - * that proc_self_get_link() failed us because we're in async - * context. We should be safe to retry this from the task - * itself with force_nonblock == false set, as it should not - * block on lookup. Would be nice to know this upfront and - * avoid the async dance, but doesn't seem feasible. + * We could hang on to this 'fd' on retrying, but seems like + * marginal gain for something that is now known to be a slower + * path. So just put it, and we'll get a new one when we retry. */ - if (ret == -EOPNOTSUPP && io_wq_current_is_worker()) { - req->open.ignore_nonblock = true; - refcount_inc(&req->refs); - io_req_task_queue(req); - return 0; - } - } else { - fsnotify_open(file); - fd_install(ret, file); + if (!fixed) + put_unused_fd(ret); + + ret = PTR_ERR(file); + /* only retry if RESOLVE_CACHED wasn't already set by application */ + if (ret == -EAGAIN && + (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK))) + return -EAGAIN; + goto err; } + + if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set) + file->f_flags &= ~O_NONBLOCK; + fsnotify_open(file); + + if (!fixed) + fd_install(ret, file); + else + ret = io_install_fixed_file(req, file, issue_flags, + req->open.file_slot - 1); err: putname(req->open.filename); req->flags &= ~REQ_F_NEED_CLEANUP; if (ret < 0) - req_set_fail_links(req); - io_req_complete(req, ret); + req_set_fail(req); + __io_req_complete(req, issue_flags, ret, 0); return 0; } -static int io_openat(struct io_kiocb *req, bool force_nonblock) +static int io_openat(struct io_kiocb *req, unsigned int issue_flags) { - return io_openat2(req, force_nonblock); + return io_openat2(req, issue_flags); } static int io_remove_buffers_prep(struct io_kiocb *req, @@ -3975,6 +4241,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf, kfree(nxt); if (++i == nbufs) return i; + cond_resched(); } i++; kfree(buf); @@ -3983,13 +4250,13 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf, return i; } -static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) { struct io_provide_buf *p = &req->pbuf; struct io_ring_ctx *ctx = req->ctx; struct io_buffer *head; int ret = 0; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; io_ring_submit_lock(ctx, !force_nonblock); @@ -4000,16 +4267,11 @@ static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock, if (head) ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs); if (ret < 0) - req_set_fail_links(req); + req_set_fail(req); - /* need to hold the lock to complete IOPOLL requests */ - if (ctx->flags & IORING_SETUP_IOPOLL) { - __io_req_complete(req, ret, 0, cs); - io_ring_submit_unlock(ctx, !force_nonblock); - } else { - io_ring_submit_unlock(ctx, !force_nonblock); - __io_req_complete(req, ret, 0, cs); - } + /* complete before unlock, IOPOLL may need the lock */ + __io_req_complete(req, issue_flags, ret, 0); + io_ring_submit_unlock(ctx, !force_nonblock); return 0; } @@ -4076,13 +4338,13 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head) return i ? i : -ENOMEM; } -static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) { struct io_provide_buf *p = &req->pbuf; struct io_ring_ctx *ctx = req->ctx; struct io_buffer *head, *list; int ret = 0; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; io_ring_submit_lock(ctx, !force_nonblock); @@ -4092,21 +4354,16 @@ static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock, ret = io_add_buffers(p, &head); if (ret >= 0 && !list) { - ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL); + ret = xa_insert(&ctx->io_buffers, p->bgid, head, + GFP_KERNEL_ACCOUNT); if (ret < 0) __io_remove_buffers(ctx, head, p->bgid, -1U); } if (ret < 0) - req_set_fail_links(req); - - /* need to hold the lock to complete IOPOLL requests */ - if (ctx->flags & IORING_SETUP_IOPOLL) { - __io_req_complete(req, ret, 0, cs); - io_ring_submit_unlock(ctx, !force_nonblock); - } else { - io_ring_submit_unlock(ctx, !force_nonblock); - __io_req_complete(req, ret, 0, cs); - } + req_set_fail(req); + /* complete before unlock, IOPOLL may need the lock */ + __io_req_complete(req, issue_flags, ret, 0); + io_ring_submit_unlock(ctx, !force_nonblock); return 0; } @@ -4116,7 +4373,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req, #if defined(CONFIG_EPOLL) if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in) return -EINVAL; - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL))) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; req->epoll.epfd = READ_ONCE(sqe->fd); @@ -4137,20 +4394,20 @@ static int io_epoll_ctl_prep(struct io_kiocb *req, #endif } -static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags) { #if defined(CONFIG_EPOLL) struct io_epoll *ie = &req->epoll; int ret; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock); if (force_nonblock && ret == -EAGAIN) return -EAGAIN; if (ret < 0) - req_set_fail_links(req); - __io_req_complete(req, ret, 0, cs); + req_set_fail(req); + __io_req_complete(req, issue_flags, ret, 0); return 0; #else return -EOPNOTSUPP; @@ -4174,18 +4431,18 @@ static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) #endif } -static int io_madvise(struct io_kiocb *req, bool force_nonblock) +static int io_madvise(struct io_kiocb *req, unsigned int issue_flags) { #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU) struct io_madvise *ma = &req->madvise; int ret; - if (force_nonblock) + if (issue_flags & IO_URING_F_NONBLOCK) return -EAGAIN; ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice); if (ret < 0) - req_set_fail_links(req); + req_set_fail(req); io_req_complete(req, ret); return 0; #else @@ -4206,12 +4463,12 @@ static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } -static int io_fadvise(struct io_kiocb *req, bool force_nonblock) +static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags) { struct io_fadvise *fa = &req->fadvise; int ret; - if (force_nonblock) { + if (issue_flags & IO_URING_F_NONBLOCK) { switch (fa->advice) { case POSIX_FADV_NORMAL: case POSIX_FADV_RANDOM: @@ -4224,14 +4481,14 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock) ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); if (ret < 0) - req_set_fail_links(req); - io_req_complete(req, ret); + req_set_fail(req); + __io_req_complete(req, issue_flags, ret, 0); return 0; } static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL))) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in) return -EINVAL; @@ -4247,93 +4504,96 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } -static int io_statx(struct io_kiocb *req, bool force_nonblock) +static int io_statx(struct io_kiocb *req, unsigned int issue_flags) { struct io_statx *ctx = &req->statx; int ret; - if (force_nonblock) { - /* only need file table for an actual valid fd */ - if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD) - req->flags |= REQ_F_NO_FILE_TABLE; + if (issue_flags & IO_URING_F_NONBLOCK) return -EAGAIN; - } ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask, ctx->buffer); if (ret < 0) - req_set_fail_links(req); + req_set_fail(req); io_req_complete(req, ret); return 0; } static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - /* - * If we queue this for async, it must not be cancellable. That would - * leave the 'file' in an undeterminate state, and here need to modify - * io_wq_work.flags, so initialize io_wq_work firstly. - */ - io_req_init_async(req); - - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (sqe->ioprio || sqe->off || sqe->addr || sqe->len || - sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in) + sqe->rw_flags || sqe->buf_index) return -EINVAL; if (req->flags & REQ_F_FIXED_FILE) return -EBADF; req->close.fd = READ_ONCE(sqe->fd); - if ((req->file && req->file->f_op == &io_uring_fops)) - return -EBADF; + req->close.file_slot = READ_ONCE(sqe->file_index); + if (req->close.file_slot && req->close.fd) + return -EINVAL; - req->close.put_file = NULL; return 0; } -static int io_close(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_close(struct io_kiocb *req, unsigned int issue_flags) { + struct files_struct *files = current->files; struct io_close *close = &req->close; - int ret; + struct fdtable *fdt; + struct file *file = NULL; + int ret = -EBADF; - /* might be already done during nonblock submission */ - if (!close->put_file) { - ret = __close_fd_get_file(close->fd, &close->put_file); - if (ret < 0) - return (ret == -ENOENT) ? -EBADF : ret; + if (req->close.file_slot) { + ret = io_close_fixed(req, issue_flags); + goto err; + } + + spin_lock(&files->file_lock); + fdt = files_fdtable(files); + if (close->fd >= fdt->max_fds) { + spin_unlock(&files->file_lock); + goto err; + } + file = fdt->fd[close->fd]; + if (!file || file->f_op == &io_uring_fops) { + spin_unlock(&files->file_lock); + file = NULL; + goto err; } /* if the file has a flush method, be safe and punt to async */ - if (close->put_file->f_op->flush && force_nonblock) { - /* not safe to cancel at this point */ - req->work.flags |= IO_WQ_WORK_NO_CANCEL; - /* was never set, but play safe */ - req->flags &= ~REQ_F_NOWAIT; - /* avoid grabbing files - we don't need the files */ - req->flags |= REQ_F_NO_FILE_TABLE; + if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) { + spin_unlock(&files->file_lock); return -EAGAIN; } + ret = __close_fd_get_file(close->fd, &file); + spin_unlock(&files->file_lock); + if (ret < 0) { + if (ret == -ENOENT) + ret = -EBADF; + goto err; + } + /* No ->flush() or already async, safely close from here */ - ret = filp_close(close->put_file, req->work.identity->files); + ret = filp_close(file, current->files); +err: if (ret < 0) - req_set_fail_links(req); - fput(close->put_file); - close->put_file = NULL; - __io_req_complete(req, ret, 0, cs); + req_set_fail(req); + if (file) + fput(file); + __io_req_complete(req, issue_flags, ret, 0); return 0; } -static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_ring_ctx *ctx = req->ctx; - if (!req->file) - return -EBADF; - if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index || @@ -4346,18 +4606,18 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } -static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock) +static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) { int ret; /* sync_file_range always requires a blocking context */ - if (force_nonblock) + if (issue_flags & IO_URING_F_NONBLOCK) return -EAGAIN; ret = sync_file_range(req->file, req->sync.off, req->sync.len, req->sync.flags); if (ret < 0) - req_set_fail_links(req); + req_set_fail(req); io_req_complete(req, ret); return 0; } @@ -4371,53 +4631,65 @@ static int io_setup_async_msg(struct io_kiocb *req, if (async_msg) return -EAGAIN; if (io_alloc_async_data(req)) { - if (kmsg->iov != kmsg->fast_iov) - kfree(kmsg->iov); + kfree(kmsg->free_iov); return -ENOMEM; } async_msg = req->async_data; req->flags |= REQ_F_NEED_CLEANUP; memcpy(async_msg, kmsg, sizeof(*kmsg)); + if (async_msg->msg.msg_name) + async_msg->msg.msg_name = &async_msg->addr; + /* if were using fast_iov, set it to the new one */ + if (!async_msg->free_iov) + async_msg->msg.msg_iter.iov = async_msg->fast_iov; + return -EAGAIN; } static int io_sendmsg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg) { - iomsg->iov = iomsg->fast_iov; iomsg->msg.msg_name = &iomsg->addr; + iomsg->free_iov = iomsg->fast_iov; return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg, - req->sr_msg.msg_flags, &iomsg->iov); + req->sr_msg.msg_flags, &iomsg->free_iov); } -static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int io_sendmsg_prep_async(struct io_kiocb *req) { - struct io_async_msghdr *async_msg = req->async_data; - struct io_sr_msg *sr = &req->sr_msg; int ret; - if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) - return -EINVAL; - - sr->msg_flags = READ_ONCE(sqe->msg_flags); - sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); - sr->len = READ_ONCE(sqe->len); - -#ifdef CONFIG_COMPAT - if (req->ctx->compat) - sr->msg_flags |= MSG_CMSG_COMPAT; -#endif - - if (!async_msg || !io_op_defs[req->opcode].needs_async_data) - return 0; - ret = io_sendmsg_copy_hdr(req, async_msg); + ret = io_sendmsg_copy_hdr(req, req->async_data); if (!ret) req->flags |= REQ_F_NEED_CLEANUP; return ret; } -static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_sr_msg *sr = &req->sr_msg; + + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (unlikely(sqe->addr2 || sqe->file_index)) + return -EINVAL; + if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio)) + return -EINVAL; + + sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); + sr->len = READ_ONCE(sqe->len); + sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; + if (sr->msg_flags & MSG_DONTWAIT) + req->flags |= REQ_F_NOWAIT; + +#ifdef CONFIG_COMPAT + if (req->ctx->compat) + sr->msg_flags |= MSG_CMSG_COMPAT; +#endif + return 0; +} + +static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) { struct io_async_msghdr iomsg, *kmsg; struct socket *sock; @@ -4429,46 +4701,37 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, if (unlikely(!sock)) return ret; - if (req->async_data) { - kmsg = req->async_data; - kmsg->msg.msg_name = &kmsg->addr; - /* if iov is set, it's allocated already */ - if (!kmsg->iov) - kmsg->iov = kmsg->fast_iov; - kmsg->msg.msg_iter.iov = kmsg->iov; - } else { + kmsg = req->async_data; + if (!kmsg) { ret = io_sendmsg_copy_hdr(req, &iomsg); if (ret) return ret; kmsg = &iomsg; } - flags = req->sr_msg.msg_flags | MSG_NOSIGNAL; - if (flags & MSG_DONTWAIT) - req->flags |= REQ_F_NOWAIT; - else if (force_nonblock) + flags = req->sr_msg.msg_flags; + if (issue_flags & IO_URING_F_NONBLOCK) flags |= MSG_DONTWAIT; - if (flags & MSG_WAITALL) min_ret = iov_iter_count(&kmsg->msg.msg_iter); ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); - if (force_nonblock && ret == -EAGAIN) + if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN) return io_setup_async_msg(req, kmsg); if (ret == -ERESTARTSYS) ret = -EINTR; - if (kmsg->iov != kmsg->fast_iov) - kfree(kmsg->iov); + /* fast path, check for non-NULL to avoid function call */ + if (kmsg->free_iov) + kfree(kmsg->free_iov); req->flags &= ~REQ_F_NEED_CLEANUP; if (ret < min_ret) - req_set_fail_links(req); - __io_req_complete(req, ret, 0, cs); + req_set_fail(req); + __io_req_complete(req, issue_flags, ret, 0); return 0; } -static int io_send(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_send(struct io_kiocb *req, unsigned int issue_flags) { struct io_sr_msg *sr = &req->sr_msg; struct msghdr msg; @@ -4491,25 +4754,22 @@ static int io_send(struct io_kiocb *req, bool force_nonblock, msg.msg_controllen = 0; msg.msg_namelen = 0; - flags = req->sr_msg.msg_flags | MSG_NOSIGNAL; - if (flags & MSG_DONTWAIT) - req->flags |= REQ_F_NOWAIT; - else if (force_nonblock) + flags = req->sr_msg.msg_flags; + if (issue_flags & IO_URING_F_NONBLOCK) flags |= MSG_DONTWAIT; - if (flags & MSG_WAITALL) min_ret = iov_iter_count(&msg.msg_iter); msg.msg_flags = flags; ret = sock_sendmsg(sock, &msg); - if (force_nonblock && ret == -EAGAIN) + if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN) return -EAGAIN; if (ret == -ERESTARTSYS) ret = -EINTR; if (ret < min_ret) - req_set_fail_links(req); - __io_req_complete(req, ret, 0, cs); + req_set_fail(req); + __io_req_complete(req, issue_flags, ret, 0); return 0; } @@ -4529,15 +4789,14 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req, if (req->flags & REQ_F_BUFFER_SELECT) { if (iov_len > 1) return -EINVAL; - if (copy_from_user(iomsg->iov, uiov, sizeof(*uiov))) + if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov))) return -EFAULT; - sr->len = iomsg->iov[0].iov_len; - iov_iter_init(&iomsg->msg.msg_iter, READ, iomsg->iov, 1, - sr->len); - iomsg->iov = NULL; + sr->len = iomsg->fast_iov[0].iov_len; + iomsg->free_iov = NULL; } else { + iomsg->free_iov = iomsg->fast_iov; ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV, - &iomsg->iov, &iomsg->msg.msg_iter, + &iomsg->free_iov, &iomsg->msg.msg_iter, false); if (ret > 0) ret = 0; @@ -4550,16 +4809,14 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req, static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg) { - struct compat_msghdr __user *msg_compat; struct io_sr_msg *sr = &req->sr_msg; struct compat_iovec __user *uiov; compat_uptr_t ptr; compat_size_t len; int ret; - msg_compat = (struct compat_msghdr __user *) sr->umsg; - ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr, - &ptr, &len); + ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr, + &ptr, &len); if (ret) return ret; @@ -4576,11 +4833,11 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, if (clen < 0) return -EINVAL; sr->len = clen; - iomsg->iov[0].iov_len = clen; - iomsg->iov = NULL; + iomsg->free_iov = NULL; } else { + iomsg->free_iov = iomsg->fast_iov; ret = __import_iovec(READ, (struct iovec __user *)uiov, len, - UIO_FASTIOV, &iomsg->iov, + UIO_FASTIOV, &iomsg->free_iov, &iomsg->msg.msg_iter, true); if (ret < 0) return ret; @@ -4594,7 +4851,6 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg) { iomsg->msg.msg_name = &iomsg->addr; - iomsg->iov = iomsg->fast_iov; #ifdef CONFIG_COMPAT if (req->ctx->compat) @@ -4624,36 +4880,42 @@ static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req) return io_put_kbuf(req, req->sr_msg.kbuf); } -static int io_recvmsg_prep(struct io_kiocb *req, - const struct io_uring_sqe *sqe) +static int io_recvmsg_prep_async(struct io_kiocb *req) { - struct io_async_msghdr *async_msg = req->async_data; - struct io_sr_msg *sr = &req->sr_msg; int ret; - if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) - return -EINVAL; - - sr->msg_flags = READ_ONCE(sqe->msg_flags); - sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); - sr->len = READ_ONCE(sqe->len); - sr->bgid = READ_ONCE(sqe->buf_group); - -#ifdef CONFIG_COMPAT - if (req->ctx->compat) - sr->msg_flags |= MSG_CMSG_COMPAT; -#endif - - if (!async_msg || !io_op_defs[req->opcode].needs_async_data) - return 0; - ret = io_recvmsg_copy_hdr(req, async_msg); + ret = io_recvmsg_copy_hdr(req, req->async_data); if (!ret) req->flags |= REQ_F_NEED_CLEANUP; return ret; } -static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_sr_msg *sr = &req->sr_msg; + + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (unlikely(sqe->addr2 || sqe->file_index)) + return -EINVAL; + if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio)) + return -EINVAL; + + sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); + sr->len = READ_ONCE(sqe->len); + sr->bgid = READ_ONCE(sqe->buf_group); + sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; + if (sr->msg_flags & MSG_DONTWAIT) + req->flags |= REQ_F_NOWAIT; + +#ifdef CONFIG_COMPAT + if (req->ctx->compat) + sr->msg_flags |= MSG_CMSG_COMPAT; +#endif + return 0; +} + +static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) { struct io_async_msghdr iomsg, *kmsg; struct socket *sock; @@ -4661,19 +4923,14 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, unsigned flags; int min_ret = 0; int ret, cflags = 0; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; sock = sock_from_file(req->file, &ret); if (unlikely(!sock)) return ret; - if (req->async_data) { - kmsg = req->async_data; - kmsg->msg.msg_name = &kmsg->addr; - /* if iov is set, it's allocated already */ - if (!kmsg->iov) - kmsg->iov = kmsg->fast_iov; - kmsg->msg.msg_iter.iov = kmsg->iov; - } else { + kmsg = req->async_data; + if (!kmsg) { ret = io_recvmsg_copy_hdr(req, &iomsg); if (ret) return ret; @@ -4685,16 +4942,14 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, if (IS_ERR(kbuf)) return PTR_ERR(kbuf); kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr); - iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov, + kmsg->fast_iov[0].iov_len = req->sr_msg.len; + iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1, req->sr_msg.len); } - flags = req->sr_msg.msg_flags | MSG_NOSIGNAL; - if (flags & MSG_DONTWAIT) - req->flags |= REQ_F_NOWAIT; - else if (force_nonblock) + flags = req->sr_msg.msg_flags; + if (force_nonblock) flags |= MSG_DONTWAIT; - if (flags & MSG_WAITALL) min_ret = iov_iter_count(&kmsg->msg.msg_iter); @@ -4707,17 +4962,17 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, if (req->flags & REQ_F_BUFFER_SELECTED) cflags = io_put_recv_kbuf(req); - if (kmsg->iov != kmsg->fast_iov) - kfree(kmsg->iov); + /* fast path, check for non-NULL to avoid function call */ + if (kmsg->free_iov) + kfree(kmsg->free_iov); req->flags &= ~REQ_F_NEED_CLEANUP; if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC)))) - req_set_fail_links(req); - __io_req_complete(req, ret, cflags, cs); + req_set_fail(req); + __io_req_complete(req, issue_flags, ret, cflags); return 0; } -static int io_recv(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_recv(struct io_kiocb *req, unsigned int issue_flags) { struct io_buffer *kbuf; struct io_sr_msg *sr = &req->sr_msg; @@ -4728,6 +4983,7 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock, unsigned flags; int min_ret = 0; int ret, cflags = 0; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; sock = sock_from_file(req->file, &ret); if (unlikely(!sock)) @@ -4751,12 +5007,9 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock, msg.msg_iocb = NULL; msg.msg_flags = 0; - flags = req->sr_msg.msg_flags | MSG_NOSIGNAL; - if (flags & MSG_DONTWAIT) - req->flags |= REQ_F_NOWAIT; - else if (force_nonblock) + flags = req->sr_msg.msg_flags; + if (force_nonblock) flags |= MSG_DONTWAIT; - if (flags & MSG_WAITALL) min_ret = iov_iter_count(&msg.msg_iter); @@ -4769,8 +5022,8 @@ out_free: if (req->flags & REQ_F_BUFFER_SELECTED) cflags = io_put_recv_kbuf(req); if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC)))) - req_set_fail_links(req); - __io_req_complete(req, ret, cflags, cs); + req_set_fail(req); + __io_req_complete(req, issue_flags, ret, cflags); return 0; } @@ -4778,48 +5031,79 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_accept *accept = &req->accept; - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->splice_fd_in) + if (sqe->ioprio || sqe->len || sqe->buf_index) return -EINVAL; accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2)); accept->flags = READ_ONCE(sqe->accept_flags); accept->nofile = rlimit(RLIMIT_NOFILE); + + accept->file_slot = READ_ONCE(sqe->file_index); + if (accept->file_slot && (accept->flags & SOCK_CLOEXEC)) + return -EINVAL; + if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) + return -EINVAL; + if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK)) + accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK; return 0; } -static int io_accept(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_accept(struct io_kiocb *req, unsigned int issue_flags) { struct io_accept *accept = &req->accept; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0; - int ret; + bool fixed = !!accept->file_slot; + struct file *file; + int ret, fd; if (req->file->f_flags & O_NONBLOCK) req->flags |= REQ_F_NOWAIT; - ret = __sys_accept4_file(req->file, file_flags, accept->addr, - accept->addr_len, accept->flags, - accept->nofile); - if (ret == -EAGAIN && force_nonblock) - return -EAGAIN; - if (ret < 0) { + if (!fixed) { + fd = __get_unused_fd_flags(accept->flags, accept->nofile); + if (unlikely(fd < 0)) + return fd; + } + file = do_accept(req->file, file_flags, accept->addr, accept->addr_len, + accept->flags); + + if (IS_ERR(file)) { + if (!fixed) + put_unused_fd(fd); + ret = PTR_ERR(file); + if (ret == -EAGAIN && force_nonblock) + return -EAGAIN; if (ret == -ERESTARTSYS) ret = -EINTR; - req_set_fail_links(req); + req_set_fail(req); + } else if (!fixed) { + fd_install(fd, file); + ret = fd; + } else { + ret = io_install_fixed_file(req, file, issue_flags, + accept->file_slot - 1); } - __io_req_complete(req, ret, 0, cs); + __io_req_complete(req, issue_flags, ret, 0); return 0; } +static int io_connect_prep_async(struct io_kiocb *req) +{ + struct io_async_connect *io = req->async_data; + struct io_connect *conn = &req->connect; + + return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address); +} + static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_connect *conn = &req->connect; - struct io_async_connect *io = req->async_data; - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) @@ -4827,20 +5111,15 @@ static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); conn->addr_len = READ_ONCE(sqe->addr2); - - if (!io) - return 0; - - return move_addr_to_kernel(conn->addr, conn->addr_len, - &io->address); + return 0; } -static int io_connect(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_connect(struct io_kiocb *req, unsigned int issue_flags) { struct io_async_connect __io, *io; unsigned file_flags; int ret; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; if (req->async_data) { io = req->async_data; @@ -4864,7 +5143,6 @@ static int io_connect(struct io_kiocb *req, bool force_nonblock, ret = -ENOMEM; goto out; } - io = req->async_data; memcpy(req->async_data, &__io, sizeof(__io)); return -EAGAIN; } @@ -4872,67 +5150,37 @@ static int io_connect(struct io_kiocb *req, bool force_nonblock, ret = -EINTR; out: if (ret < 0) - req_set_fail_links(req); - __io_req_complete(req, ret, 0, cs); + req_set_fail(req); + __io_req_complete(req, issue_flags, ret, 0); return 0; } #else /* !CONFIG_NET */ -static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) -{ - return -EOPNOTSUPP; +#define IO_NETOP_FN(op) \ +static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \ +{ \ + return -EOPNOTSUPP; \ } -static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) -{ - return -EOPNOTSUPP; +#define IO_NETOP_PREP(op) \ +IO_NETOP_FN(op) \ +static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \ +{ \ + return -EOPNOTSUPP; \ +} \ + +#define IO_NETOP_PREP_ASYNC(op) \ +IO_NETOP_PREP(op) \ +static int io_##op##_prep_async(struct io_kiocb *req) \ +{ \ + return -EOPNOTSUPP; \ } -static int io_send(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) -{ - return -EOPNOTSUPP; -} - -static int io_recvmsg_prep(struct io_kiocb *req, - const struct io_uring_sqe *sqe) -{ - return -EOPNOTSUPP; -} - -static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) -{ - return -EOPNOTSUPP; -} - -static int io_recv(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) -{ - return -EOPNOTSUPP; -} - -static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) -{ - return -EOPNOTSUPP; -} - -static int io_accept(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) -{ - return -EOPNOTSUPP; -} - -static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) -{ - return -EOPNOTSUPP; -} - -static int io_connect(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) -{ - return -EOPNOTSUPP; -} +IO_NETOP_PREP_ASYNC(sendmsg); +IO_NETOP_PREP_ASYNC(recvmsg); +IO_NETOP_PREP_ASYNC(connect); +IO_NETOP_PREP(accept); +IO_NETOP_FN(send); +IO_NETOP_FN(recv); #endif /* CONFIG_NET */ struct io_poll_table { @@ -4942,68 +5190,47 @@ struct io_poll_table { int error; }; -static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, - __poll_t mask, task_work_func_t func) +#define IO_POLL_CANCEL_FLAG BIT(31) +#define IO_POLL_RETRY_FLAG BIT(30) +#define IO_POLL_REF_MASK GENMASK(29, 0) + +/* + * We usually have 1-2 refs taken, 128 is more than enough and we want to + * maximise the margin between this amount and the moment when it overflows. + */ +#define IO_POLL_REF_BIAS 128 + +static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) { - bool twa_signal_ok; - int ret; - - /* for instances that support it check for an event match first: */ - if (mask && !(mask & poll->events)) - return 0; - - trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask); - - list_del_init(&poll->wait.entry); - - req->result = mask; - init_task_work(&req->task_work, func); - percpu_ref_get(&req->ctx->refs); + int v; /* - * If we using the signalfd wait_queue_head for this wakeup, then - * it's not safe to use TWA_SIGNAL as we could be recursing on the - * tsk->sighand->siglock on doing the wakeup. Should not be needed - * either, as the normal wakeup will suffice. + * poll_refs are already elevated and we don't have much hope for + * grabbing the ownership. Instead of incrementing set a retry flag + * to notify the loop that there might have been some change. */ - twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh); - - /* - * If this fails, then the task is exiting. When a task exits, the - * work gets canceled, so just cancel this request as well instead - * of executing it. We can't safely execute it anyway, as we may not - * have the needed state needed for it anyway. - */ - ret = io_req_task_work_add(req, twa_signal_ok); - if (unlikely(ret)) { - struct task_struct *tsk; - - WRITE_ONCE(poll->canceled, true); - tsk = io_wq_get_task(req->ctx->io_wq); - task_work_add(tsk, &req->task_work, TWA_NONE); - wake_up_process(tsk); - } - return 1; + v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); + if (v & IO_POLL_REF_MASK) + return false; + return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); } -static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll) - __acquires(&req->ctx->completion_lock) +/* + * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can + * bump it and acquire ownership. It's disallowed to modify requests while not + * owning it, that prevents from races for enqueueing task_work's and b/w + * arming poll and wakeups. + */ +static inline bool io_poll_get_ownership(struct io_kiocb *req) { - struct io_ring_ctx *ctx = req->ctx; + if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) + return io_poll_get_ownership_slowpath(req); + return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); +} - if (!req->result && !READ_ONCE(poll->canceled)) { - struct poll_table_struct pt = { ._key = poll->events }; - - req->result = vfs_poll(req->file, &pt) & poll->events; - } - - spin_lock_irq(&ctx->completion_lock); - if (!req->result && !READ_ONCE(poll->canceled)) { - add_wait_queue(poll->head, &poll->wait); - return true; - } - - return false; +static void io_poll_mark_cancelled(struct io_kiocb *req) +{ + atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); } static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req) @@ -5021,99 +5248,263 @@ static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req) return &req->apoll->poll; } -static void io_poll_remove_double(struct io_kiocb *req) -{ - struct io_poll_iocb *poll = io_poll_get_double(req); - - lockdep_assert_held(&req->ctx->completion_lock); - - if (poll && poll->head) { - struct wait_queue_head *head = poll->head; - - spin_lock(&head->lock); - list_del_init(&poll->wait.entry); - if (poll->wait.private) - refcount_dec(&req->refs); - poll->head = NULL; - spin_unlock(&head->lock); - } -} - -static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) +static void io_poll_req_insert(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; + struct hlist_head *list; - io_poll_remove_double(req); - req->poll.done = true; - io_cqring_fill_event(req, error ? error : mangle_poll(mask)); - io_commit_cqring(ctx); -} - -static void io_poll_task_func(struct callback_head *cb) -{ - struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); - struct io_ring_ctx *ctx = req->ctx; - struct io_kiocb *nxt; - - if (io_poll_rewait(req, &req->poll)) { - spin_unlock_irq(&ctx->completion_lock); - } else { - hash_del(&req->hash_node); - io_poll_complete(req, req->result, 0); - spin_unlock_irq(&ctx->completion_lock); - - nxt = io_put_req_find_next(req); - io_cqring_ev_posted(ctx); - if (nxt) - __io_req_task_submit(nxt); - } - - percpu_ref_put(&ctx->refs); -} - -static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, - int sync, void *key) -{ - struct io_kiocb *req = wait->private; - struct io_poll_iocb *poll = io_poll_get_single(req); - __poll_t mask = key_to_poll(key); - - /* for instances that support it check for an event match first: */ - if (mask && !(mask & poll->events)) - return 0; - - list_del_init(&wait->entry); - - if (poll && poll->head) { - bool done; - - spin_lock(&poll->head->lock); - done = list_empty(&poll->wait.entry); - if (!done) - list_del_init(&poll->wait.entry); - /* make sure double remove sees this as being gone */ - wait->private = NULL; - spin_unlock(&poll->head->lock); - if (!done) { - /* use wait func handler, so it matches the rq type */ - poll->wait.func(&poll->wait, mode, sync, key); - } - } - refcount_dec(&req->refs); - return 1; + list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)]; + hlist_add_head(&req->hash_node, list); } static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events, wait_queue_func_t wake_func) { poll->head = NULL; - poll->done = false; - poll->canceled = false; - poll->events = events; +#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP) + /* mask in events that we always want/need */ + poll->events = events | IO_POLL_UNMASK; INIT_LIST_HEAD(&poll->wait.entry); init_waitqueue_func_entry(&poll->wait, wake_func); } +static inline void io_poll_remove_entry(struct io_poll_iocb *poll) +{ + struct wait_queue_head *head = smp_load_acquire(&poll->head); + + if (head) { + spin_lock_irq(&head->lock); + list_del_init(&poll->wait.entry); + poll->head = NULL; + spin_unlock_irq(&head->lock); + } +} + +static void io_poll_remove_entries(struct io_kiocb *req) +{ + struct io_poll_iocb *poll = io_poll_get_single(req); + struct io_poll_iocb *poll_double = io_poll_get_double(req); + + /* + * While we hold the waitqueue lock and the waitqueue is nonempty, + * wake_up_pollfree() will wait for us. However, taking the waitqueue + * lock in the first place can race with the waitqueue being freed. + * + * We solve this as eventpoll does: by taking advantage of the fact that + * all users of wake_up_pollfree() will RCU-delay the actual free. If + * we enter rcu_read_lock() and see that the pointer to the queue is + * non-NULL, we can then lock it without the memory being freed out from + * under us. + * + * Keep holding rcu_read_lock() as long as we hold the queue lock, in + * case the caller deletes the entry from the queue, leaving it empty. + * In that case, only RCU prevents the queue memory from being freed. + */ + rcu_read_lock(); + io_poll_remove_entry(poll); + if (poll_double) + io_poll_remove_entry(poll_double); + rcu_read_unlock(); +} + +/* + * All poll tw should go through this. Checks for poll events, manages + * references, does rewait, etc. + * + * Returns a negative error on failure. >0 when no action require, which is + * either spurious wakeup or multishot CQE is served. 0 when it's done with + * the request, then the mask is stored in req->result. + */ +static int io_poll_check_events(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_poll_iocb *poll = io_poll_get_single(req); + int v; + + /* req->task == current here, checking PF_EXITING is safe */ + if (unlikely(req->task->flags & PF_EXITING)) + io_poll_mark_cancelled(req); + + do { + v = atomic_read(&req->poll_refs); + + /* tw handler should be the owner, and so have some references */ + if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK))) + return 0; + if (v & IO_POLL_CANCEL_FLAG) + return -ECANCELED; + /* + * cqe.res contains only events of the first wake up + * and all others are be lost. Redo vfs_poll() to get + * up to date state. + */ + if ((v & IO_POLL_REF_MASK) != 1) + req->result = 0; + if (v & IO_POLL_RETRY_FLAG) { + req->result = 0; + /* + * We won't find new events that came in between + * vfs_poll and the ref put unless we clear the + * flag in advance. + */ + atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs); + v &= ~IO_POLL_RETRY_FLAG; + } + + if (!req->result) { + struct poll_table_struct pt = { ._key = poll->events }; + + req->result = vfs_poll(req->file, &pt) & poll->events; + } + + /* multishot, just fill an CQE and proceed */ + if (req->result && !(poll->events & EPOLLONESHOT)) { + __poll_t mask = mangle_poll(req->result & poll->events); + bool filled; + + spin_lock(&ctx->completion_lock); + filled = io_fill_cqe_aux(ctx, req->user_data, mask, + IORING_CQE_F_MORE); + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); + if (unlikely(!filled)) + return -ECANCELED; + io_cqring_ev_posted(ctx); + } else if (req->result) { + return 0; + } + + /* force the next iteration to vfs_poll() */ + req->result = 0; + + /* + * Release all references, retry if someone tried to restart + * task_work while we were executing it. + */ + } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) & + IO_POLL_REF_MASK); + + return 1; +} + +static void io_poll_task_func(struct io_kiocb *req, bool *locked) +{ + struct io_ring_ctx *ctx = req->ctx; + int ret; + + ret = io_poll_check_events(req); + if (ret > 0) + return; + + if (!ret) { + req->result = mangle_poll(req->result & req->poll.events); + } else { + req->result = ret; + req_set_fail(req); + } + + io_poll_remove_entries(req); + spin_lock(&ctx->completion_lock); + hash_del(&req->hash_node); + spin_unlock(&ctx->completion_lock); + io_req_complete_post(req, req->result, 0); +} + +static void io_apoll_task_func(struct io_kiocb *req, bool *locked) +{ + struct io_ring_ctx *ctx = req->ctx; + int ret; + + ret = io_poll_check_events(req); + if (ret > 0) + return; + + io_poll_remove_entries(req); + spin_lock(&ctx->completion_lock); + hash_del(&req->hash_node); + spin_unlock(&ctx->completion_lock); + + if (!ret) + io_req_task_submit(req, locked); + else + io_req_complete_failed(req, ret); +} + +static void __io_poll_execute(struct io_kiocb *req, int mask) +{ + req->result = mask; + if (req->opcode == IORING_OP_POLL_ADD) + req->io_task_work.func = io_poll_task_func; + else + req->io_task_work.func = io_apoll_task_func; + + trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask); + io_req_task_work_add(req); +} + +static inline void io_poll_execute(struct io_kiocb *req, int res) +{ + if (io_poll_get_ownership(req)) + __io_poll_execute(req, res); +} + +static void io_poll_cancel_req(struct io_kiocb *req) +{ + io_poll_mark_cancelled(req); + /* kick tw, which should complete the request */ + io_poll_execute(req, 0); +} + +static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, + void *key) +{ + struct io_kiocb *req = wait->private; + struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb, + wait); + __poll_t mask = key_to_poll(key); + + if (unlikely(mask & POLLFREE)) { + io_poll_mark_cancelled(req); + /* we have to kick tw in case it's not already */ + io_poll_execute(req, 0); + + /* + * If the waitqueue is being freed early but someone is already + * holds ownership over it, we have to tear down the request as + * best we can. That means immediately removing the request from + * its waitqueue and preventing all further accesses to the + * waitqueue via the request. + */ + list_del_init(&poll->wait.entry); + + /* + * Careful: this *must* be the last step, since as soon + * as req->head is NULL'ed out, the request can be + * completed and freed, since aio_poll_complete_work() + * will no longer need to take the waitqueue lock. + */ + smp_store_release(&poll->head, NULL); + return 1; + } + + /* for instances that support it check for an event match first */ + if (mask && !(mask & poll->events)) + return 0; + + if (io_poll_get_ownership(req)) { + /* + * If we trigger a multishot poll off our own wakeup path, + * disable multishot as there is a circular dependency between + * CQ posting and triggering the event. + */ + if (mask & EPOLL_URING_WAKE) + poll->events |= EPOLLONESHOT; + + __io_poll_execute(req, mask); + } + return 1; +} + static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, struct wait_queue_head *head, struct io_poll_iocb **poll_ptr) @@ -5126,29 +5517,31 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, * if this happens. */ if (unlikely(pt->nr_entries)) { - struct io_poll_iocb *poll_one = poll; + struct io_poll_iocb *first = poll; + /* double add on the same waitqueue head, ignore */ + if (first->head == head) + return; /* already have a 2nd entry, fail a third attempt */ if (*poll_ptr) { + if ((*poll_ptr)->head == head) + return; pt->error = -EINVAL; return; } - /* double add on the same waitqueue head, ignore */ - if (poll->head == head) - return; + poll = kmalloc(sizeof(*poll), GFP_ATOMIC); if (!poll) { pt->error = -ENOMEM; return; } - io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake); - refcount_inc(&req->refs); - poll->wait.private = req; + io_init_poll_iocb(poll, first->events, first->wait.func); *poll_ptr = poll; } pt->nr_entries++; poll->head = head; + poll->wait.private = req; if (poll->events & EPOLLEXCLUSIVE) add_wait_queue_exclusive(head, &poll->wait); @@ -5156,6 +5549,73 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, add_wait_queue(head, &poll->wait); } +static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, + struct poll_table_struct *p) +{ + struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); + + __io_queue_proc(&pt->req->poll, pt, head, + (struct io_poll_iocb **) &pt->req->async_data); +} + +static int __io_arm_poll_handler(struct io_kiocb *req, + struct io_poll_iocb *poll, + struct io_poll_table *ipt, __poll_t mask) +{ + struct io_ring_ctx *ctx = req->ctx; + + INIT_HLIST_NODE(&req->hash_node); + io_init_poll_iocb(poll, mask, io_poll_wake); + poll->file = req->file; + poll->wait.private = req; + + ipt->pt._key = mask; + ipt->req = req; + ipt->error = 0; + ipt->nr_entries = 0; + + /* + * Take the ownership to delay any tw execution up until we're done + * with poll arming. see io_poll_get_ownership(). + */ + atomic_set(&req->poll_refs, 1); + mask = vfs_poll(req->file, &ipt->pt) & poll->events; + + if (mask && (poll->events & EPOLLONESHOT)) { + io_poll_remove_entries(req); + /* no one else has access to the req, forget about the ref */ + return mask; + } + if (!mask && unlikely(ipt->error || !ipt->nr_entries)) { + io_poll_remove_entries(req); + if (!ipt->error) + ipt->error = -EINVAL; + return 0; + } + + spin_lock(&ctx->completion_lock); + io_poll_req_insert(req); + spin_unlock(&ctx->completion_lock); + + if (mask) { + /* can't multishot if failed, just queue the event we've got */ + if (unlikely(ipt->error || !ipt->nr_entries)) { + poll->events |= EPOLLONESHOT; + ipt->error = 0; + } + __io_poll_execute(req, mask); + return 0; + } + + /* + * Try to release ownership. If we see a change of state, e.g. + * poll was waken up, queue up a tw, it'll deal with it. + */ + if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1) + __io_poll_execute(req, 0); + return 0; +} + static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, struct poll_table_struct *p) { @@ -5165,240 +5625,87 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); } -static void io_async_task_func(struct callback_head *cb) -{ - struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); - struct async_poll *apoll = req->apoll; - struct io_ring_ctx *ctx = req->ctx; +enum { + IO_APOLL_OK, + IO_APOLL_ABORTED, + IO_APOLL_READY +}; - trace_io_uring_task_run(req->ctx, req->opcode, req->user_data); - - if (io_poll_rewait(req, &apoll->poll)) { - spin_unlock_irq(&ctx->completion_lock); - percpu_ref_put(&ctx->refs); - return; - } - - /* If req is still hashed, it cannot have been canceled. Don't check. */ - if (hash_hashed(&req->hash_node)) - hash_del(&req->hash_node); - - io_poll_remove_double(req); - spin_unlock_irq(&ctx->completion_lock); - - if (!READ_ONCE(apoll->poll.canceled)) - __io_req_task_submit(req); - else - __io_req_task_cancel(req, -ECANCELED); - - percpu_ref_put(&ctx->refs); - kfree(apoll->double_poll); - kfree(apoll); -} - -static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync, - void *key) -{ - struct io_kiocb *req = wait->private; - struct io_poll_iocb *poll = &req->apoll->poll; - - trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data, - key_to_poll(key)); - - return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func); -} - -static void io_poll_req_insert(struct io_kiocb *req) -{ - struct io_ring_ctx *ctx = req->ctx; - struct hlist_head *list; - - list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)]; - hlist_add_head(&req->hash_node, list); -} - -static __poll_t __io_arm_poll_handler(struct io_kiocb *req, - struct io_poll_iocb *poll, - struct io_poll_table *ipt, __poll_t mask, - wait_queue_func_t wake_func) - __acquires(&ctx->completion_lock) -{ - struct io_ring_ctx *ctx = req->ctx; - bool cancel = false; - - INIT_HLIST_NODE(&req->hash_node); - io_init_poll_iocb(poll, mask, wake_func); - poll->file = req->file; - poll->wait.private = req; - - ipt->pt._key = mask; - ipt->req = req; - ipt->error = 0; - ipt->nr_entries = 0; - - mask = vfs_poll(req->file, &ipt->pt) & poll->events; - if (unlikely(!ipt->nr_entries) && !ipt->error) - ipt->error = -EINVAL; - - spin_lock_irq(&ctx->completion_lock); - if (ipt->error) - io_poll_remove_double(req); - if (likely(poll->head)) { - spin_lock(&poll->head->lock); - if (unlikely(list_empty(&poll->wait.entry))) { - if (ipt->error) - cancel = true; - ipt->error = 0; - mask = 0; - } - if (mask || ipt->error) - list_del_init(&poll->wait.entry); - else if (cancel) - WRITE_ONCE(poll->canceled, true); - else if (!poll->done) /* actually waiting for an event */ - io_poll_req_insert(req); - spin_unlock(&poll->head->lock); - } - - return mask; -} - -static bool io_arm_poll_handler(struct io_kiocb *req) +static int io_arm_poll_handler(struct io_kiocb *req) { const struct io_op_def *def = &io_op_defs[req->opcode]; struct io_ring_ctx *ctx = req->ctx; struct async_poll *apoll; struct io_poll_table ipt; - __poll_t mask, ret; - int rw; + __poll_t mask = EPOLLONESHOT | POLLERR | POLLPRI; + int ret; if (!req->file || !file_can_poll(req->file)) - return false; + return IO_APOLL_ABORTED; if (req->flags & REQ_F_POLLED) - return false; - if (def->pollin) - rw = READ; - else if (def->pollout) - rw = WRITE; - else - return false; - /* if we can't nonblock try, then no point in arming a poll handler */ - if (!io_file_supports_async(req->file, rw)) - return false; + return IO_APOLL_ABORTED; + if (!def->pollin && !def->pollout) + return IO_APOLL_ABORTED; + + if (def->pollin) { + mask |= POLLIN | POLLRDNORM; + + /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */ + if ((req->opcode == IORING_OP_RECVMSG) && + (req->sr_msg.msg_flags & MSG_ERRQUEUE)) + mask &= ~POLLIN; + } else { + mask |= POLLOUT | POLLWRNORM; + } apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); if (unlikely(!apoll)) - return false; + return IO_APOLL_ABORTED; apoll->double_poll = NULL; - - req->flags |= REQ_F_POLLED; req->apoll = apoll; - - mask = 0; - if (def->pollin) - mask |= POLLIN | POLLRDNORM; - if (def->pollout) - mask |= POLLOUT | POLLWRNORM; - - /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */ - if ((req->opcode == IORING_OP_RECVMSG) && - (req->sr_msg.msg_flags & MSG_ERRQUEUE)) - mask &= ~POLLIN; - - mask |= POLLERR | POLLPRI; - + req->flags |= REQ_F_POLLED; ipt.pt._qproc = io_async_queue_proc; - ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, - io_async_wake); - if (ret || ipt.error) { - io_poll_remove_double(req); - spin_unlock_irq(&ctx->completion_lock); - kfree(apoll->double_poll); - kfree(apoll); - return false; - } - spin_unlock_irq(&ctx->completion_lock); - trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask, - apoll->poll.events); - return true; -} + ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask); + if (ret || ipt.error) + return ret ? IO_APOLL_READY : IO_APOLL_ABORTED; -static bool __io_poll_remove_one(struct io_kiocb *req, - struct io_poll_iocb *poll) -{ - bool do_complete = false; - - spin_lock(&poll->head->lock); - WRITE_ONCE(poll->canceled, true); - if (!list_empty(&poll->wait.entry)) { - list_del_init(&poll->wait.entry); - do_complete = true; - } - spin_unlock(&poll->head->lock); - hash_del(&req->hash_node); - return do_complete; -} - -static bool io_poll_remove_one(struct io_kiocb *req) -{ - bool do_complete; - - io_poll_remove_double(req); - - if (req->opcode == IORING_OP_POLL_ADD) { - do_complete = __io_poll_remove_one(req, &req->poll); - } else { - struct async_poll *apoll = req->apoll; - - /* non-poll requests have submit ref still */ - do_complete = __io_poll_remove_one(req, &apoll->poll); - if (do_complete) { - io_put_req(req); - kfree(apoll->double_poll); - kfree(apoll); - } - } - - if (do_complete) { - io_cqring_fill_event(req, -ECANCELED); - io_commit_cqring(req->ctx); - req_set_fail_links(req); - io_put_req_deferred(req, 1); - } - - return do_complete; + trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data, + mask, apoll->poll.events); + return IO_APOLL_OK; } /* * Returns true if we found and killed one or more poll requests */ static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, - struct files_struct *files) + bool cancel_all) { struct hlist_node *tmp; struct io_kiocb *req; - int posted = 0, i; + bool found = false; + int i; - spin_lock_irq(&ctx->completion_lock); + spin_lock(&ctx->completion_lock); for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { struct hlist_head *list; list = &ctx->cancel_hash[i]; hlist_for_each_entry_safe(req, tmp, list, hash_node) { - if (io_match_task(req, tsk, files)) - posted += io_poll_remove_one(req); + if (io_match_task_safe(req, tsk, cancel_all)) { + hlist_del_init(&req->hash_node); + io_poll_cancel_req(req); + found = true; + } } } - spin_unlock_irq(&ctx->completion_lock); - - if (posted) - io_cqring_ev_posted(ctx); - - return posted != 0; + spin_unlock(&ctx->completion_lock); + return found; } -static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr) +static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr, + bool poll_only) + __must_hold(&ctx->completion_lock) { struct hlist_head *list; struct io_kiocb *req; @@ -5407,107 +5714,161 @@ static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr) hlist_for_each_entry(req, list, hash_node) { if (sqe_addr != req->user_data) continue; - if (io_poll_remove_one(req)) - return 0; - return -EALREADY; + if (poll_only && req->opcode != IORING_OP_POLL_ADD) + continue; + return req; } - - return -ENOENT; + return NULL; } -static int io_poll_remove_prep(struct io_kiocb *req, - const struct io_uring_sqe *sqe) +static bool io_poll_disarm(struct io_kiocb *req) + __must_hold(&ctx->completion_lock) { - if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) - return -EINVAL; - if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index || - sqe->poll_events) - return -EINVAL; + if (!io_poll_get_ownership(req)) + return false; + io_poll_remove_entries(req); + hash_del(&req->hash_node); + return true; +} - req->poll.addr = READ_ONCE(sqe->addr); +static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr, + bool poll_only) + __must_hold(&ctx->completion_lock) +{ + struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only); + + if (!req) + return -ENOENT; + io_poll_cancel_req(req); return 0; } -/* - * Find a running poll command that matches one specified in sqe->addr, - * and remove it if found. - */ -static int io_poll_remove(struct io_kiocb *req) +static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe, + unsigned int flags) { - struct io_ring_ctx *ctx = req->ctx; - u64 addr; - int ret; - - addr = req->poll.addr; - spin_lock_irq(&ctx->completion_lock); - ret = io_poll_cancel(ctx, addr); - spin_unlock_irq(&ctx->completion_lock); - - if (ret < 0) - req_set_fail_links(req); - io_req_complete(req, ret); - return 0; -} - -static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, - void *key) -{ - struct io_kiocb *req = wait->private; - struct io_poll_iocb *poll = &req->poll; - - return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func); -} - -static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, - struct poll_table_struct *p) -{ - struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); - - __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data); -} - -static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) -{ - struct io_poll_iocb *poll = &req->poll; u32 events; - if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) - return -EINVAL; - if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index) - return -EINVAL; - events = READ_ONCE(sqe->poll32_events); #ifdef __BIG_ENDIAN events = swahw32(events); #endif - poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP | - (events & EPOLLEXCLUSIVE); + if (!(flags & IORING_POLL_ADD_MULTI)) + events |= EPOLLONESHOT; + return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT)); +} + +static int io_poll_update_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + struct io_poll_update *upd = &req->poll_update; + u32 flags; + + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in) + return -EINVAL; + flags = READ_ONCE(sqe->len); + if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA | + IORING_POLL_ADD_MULTI)) + return -EINVAL; + /* meaningless without update */ + if (flags == IORING_POLL_ADD_MULTI) + return -EINVAL; + + upd->old_user_data = READ_ONCE(sqe->addr); + upd->update_events = flags & IORING_POLL_UPDATE_EVENTS; + upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA; + + upd->new_user_data = READ_ONCE(sqe->off); + if (!upd->update_user_data && upd->new_user_data) + return -EINVAL; + if (upd->update_events) + upd->events = io_poll_parse_events(sqe, flags); + else if (sqe->poll32_events) + return -EINVAL; + return 0; } -static int io_poll_add(struct io_kiocb *req) +static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_poll_iocb *poll = &req->poll; + u32 flags; + + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr) + return -EINVAL; + flags = READ_ONCE(sqe->len); + if (flags & ~IORING_POLL_ADD_MULTI) + return -EINVAL; + + io_req_set_refcount(req); + poll->events = io_poll_parse_events(sqe, flags); + return 0; +} + +static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) { struct io_poll_iocb *poll = &req->poll; - struct io_ring_ctx *ctx = req->ctx; struct io_poll_table ipt; - __poll_t mask; + int ret; ipt.pt._qproc = io_poll_queue_proc; - mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events, - io_poll_wake); + ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events); + if (!ret && ipt.error) + req_set_fail(req); + ret = ret ?: ipt.error; + if (ret) + __io_req_complete(req, issue_flags, ret, 0); + return 0; +} - if (mask) { /* no async, we'd stolen it */ - ipt.error = 0; - io_poll_complete(req, mask, 0); - } - spin_unlock_irq(&ctx->completion_lock); +static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_kiocb *preq; + int ret2, ret = 0; - if (mask) { - io_cqring_ev_posted(ctx); - io_put_req(req); + spin_lock(&ctx->completion_lock); + preq = io_poll_find(ctx, req->poll_update.old_user_data, true); + if (!preq || !io_poll_disarm(preq)) { + spin_unlock(&ctx->completion_lock); + ret = preq ? -EALREADY : -ENOENT; + goto out; } - return ipt.error; + spin_unlock(&ctx->completion_lock); + + if (req->poll_update.update_events || req->poll_update.update_user_data) { + /* only mask one event flags, keep behavior flags */ + if (req->poll_update.update_events) { + preq->poll.events &= ~0xffff; + preq->poll.events |= req->poll_update.events & 0xffff; + preq->poll.events |= IO_POLL_UNMASK; + } + if (req->poll_update.update_user_data) + preq->user_data = req->poll_update.new_user_data; + + ret2 = io_poll_add(preq, issue_flags); + /* successfully updated, don't complete poll request */ + if (!ret2) + goto out; + } + req_set_fail(preq); + io_req_complete(preq, -ECANCELED); +out: + if (ret < 0) + req_set_fail(req); + /* complete update request, we're done with it */ + io_req_complete(req, ret); + return 0; +} + +static void io_req_task_timeout(struct io_kiocb *req, bool *locked) +{ + req_set_fail(req); + io_req_complete_post(req, -ETIME, 0); } static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) @@ -5518,88 +5879,182 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) struct io_ring_ctx *ctx = req->ctx; unsigned long flags; - spin_lock_irqsave(&ctx->completion_lock, flags); + spin_lock_irqsave(&ctx->timeout_lock, flags); list_del_init(&req->timeout.list); atomic_set(&req->ctx->cq_timeouts, atomic_read(&req->ctx->cq_timeouts) + 1); + spin_unlock_irqrestore(&ctx->timeout_lock, flags); - io_cqring_fill_event(req, -ETIME); - io_commit_cqring(ctx); - spin_unlock_irqrestore(&ctx->completion_lock, flags); - - io_cqring_ev_posted(ctx); - req_set_fail_links(req); - io_put_req(req); + req->io_task_work.func = io_req_task_timeout; + io_req_task_work_add(req); return HRTIMER_NORESTART; } -static int __io_timeout_cancel(struct io_kiocb *req) +static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, + __u64 user_data) + __must_hold(&ctx->timeout_lock) { - struct io_timeout_data *io = req->async_data; - int ret; + struct io_timeout_data *io; + struct io_kiocb *req; + bool found = false; - ret = hrtimer_try_to_cancel(&io->timer); - if (ret == -1) - return -EALREADY; + list_for_each_entry(req, &ctx->timeout_list, timeout.list) { + found = user_data == req->user_data; + if (found) + break; + } + if (!found) + return ERR_PTR(-ENOENT); + + io = req->async_data; + if (hrtimer_try_to_cancel(&io->timer) == -1) + return ERR_PTR(-EALREADY); list_del_init(&req->timeout.list); - - req_set_fail_links(req); - io_cqring_fill_event(req, -ECANCELED); - io_put_req_deferred(req, 1); - return 0; + return req; } static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) + __must_hold(&ctx->completion_lock) + __must_hold(&ctx->timeout_lock) { - struct io_kiocb *req; - int ret = -ENOENT; + struct io_kiocb *req = io_timeout_extract(ctx, user_data); - list_for_each_entry(req, &ctx->timeout_list, timeout.list) { - if (user_data == req->user_data) { - ret = 0; - break; - } + if (IS_ERR(req)) + return PTR_ERR(req); + + req_set_fail(req); + io_fill_cqe_req(req, -ECANCELED, 0); + io_put_req_deferred(req); + return 0; +} + +static clockid_t io_timeout_get_clock(struct io_timeout_data *data) +{ + switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) { + case IORING_TIMEOUT_BOOTTIME: + return CLOCK_BOOTTIME; + case IORING_TIMEOUT_REALTIME: + return CLOCK_REALTIME; + default: + /* can't happen, vetted at prep time */ + WARN_ON_ONCE(1); + fallthrough; + case 0: + return CLOCK_MONOTONIC; } +} - if (ret == -ENOENT) - return ret; +static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, + struct timespec64 *ts, enum hrtimer_mode mode) + __must_hold(&ctx->timeout_lock) +{ + struct io_timeout_data *io; + struct io_kiocb *req; + bool found = false; - return __io_timeout_cancel(req); + list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) { + found = user_data == req->user_data; + if (found) + break; + } + if (!found) + return -ENOENT; + + io = req->async_data; + if (hrtimer_try_to_cancel(&io->timer) == -1) + return -EALREADY; + hrtimer_init(&io->timer, io_timeout_get_clock(io), mode); + io->timer.function = io_link_timeout_fn; + hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode); + return 0; +} + +static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, + struct timespec64 *ts, enum hrtimer_mode mode) + __must_hold(&ctx->timeout_lock) +{ + struct io_kiocb *req = io_timeout_extract(ctx, user_data); + struct io_timeout_data *data; + + if (IS_ERR(req)) + return PTR_ERR(req); + + req->timeout.off = 0; /* noseq */ + data = req->async_data; + list_add_tail(&req->timeout.list, &ctx->timeout_list); + hrtimer_init(&data->timer, io_timeout_get_clock(data), mode); + data->timer.function = io_timeout_fn; + hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode); + return 0; } static int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { + struct io_timeout_rem *tr = &req->timeout_rem; + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) return -EINVAL; - if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->timeout_flags || - sqe->splice_fd_in) + if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in) return -EINVAL; - req->timeout_rem.addr = READ_ONCE(sqe->addr); + tr->ltimeout = false; + tr->addr = READ_ONCE(sqe->addr); + tr->flags = READ_ONCE(sqe->timeout_flags); + if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) { + if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1) + return -EINVAL; + if (tr->flags & IORING_LINK_TIMEOUT_UPDATE) + tr->ltimeout = true; + if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS)) + return -EINVAL; + if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2))) + return -EFAULT; + } else if (tr->flags) { + /* timeout removal doesn't support flags */ + return -EINVAL; + } + return 0; } +static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags) +{ + return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS + : HRTIMER_MODE_REL; +} + /* * Remove or update an existing timeout command */ -static int io_timeout_remove(struct io_kiocb *req) +static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) { + struct io_timeout_rem *tr = &req->timeout_rem; struct io_ring_ctx *ctx = req->ctx; int ret; - spin_lock_irq(&ctx->completion_lock); - ret = io_timeout_cancel(ctx, req->timeout_rem.addr); + if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) { + spin_lock(&ctx->completion_lock); + spin_lock_irq(&ctx->timeout_lock); + ret = io_timeout_cancel(ctx, tr->addr); + spin_unlock_irq(&ctx->timeout_lock); + spin_unlock(&ctx->completion_lock); + } else { + enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags); + + spin_lock_irq(&ctx->timeout_lock); + if (tr->ltimeout) + ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode); + else + ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode); + spin_unlock_irq(&ctx->timeout_lock); + } - io_cqring_fill_event(req, ret); - io_commit_cqring(ctx); - spin_unlock_irq(&ctx->completion_lock); - io_cqring_ev_posted(ctx); if (ret < 0) - req_set_fail_links(req); - io_put_req(req); + req_set_fail(req); + io_req_complete_post(req, ret, 0); return 0; } @@ -5618,38 +6073,52 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (off && is_timeout_link) return -EINVAL; flags = READ_ONCE(sqe->timeout_flags); - if (flags & ~IORING_TIMEOUT_ABS) + if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK)) + return -EINVAL; + /* more than one clock specified is invalid, obviously */ + if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1) return -EINVAL; + INIT_LIST_HEAD(&req->timeout.list); req->timeout.off = off; + if (unlikely(off && !req->ctx->off_timeout_used)) + req->ctx->off_timeout_used = true; if (!req->async_data && io_alloc_async_data(req)) return -ENOMEM; data = req->async_data; data->req = req; + data->flags = flags; if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) return -EFAULT; - if (flags & IORING_TIMEOUT_ABS) - data->mode = HRTIMER_MODE_ABS; - else - data->mode = HRTIMER_MODE_REL; - INIT_LIST_HEAD(&req->timeout.list); - hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode); + data->mode = io_translate_timeout_mode(flags); + hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode); + + if (is_timeout_link) { + struct io_submit_link *link = &req->ctx->submit_state.link; + + if (!link->head) + return -EINVAL; + if (link->last->opcode == IORING_OP_LINK_TIMEOUT) + return -EINVAL; + req->timeout.head = link->last; + link->last->flags |= REQ_F_ARM_LTIMEOUT; + } return 0; } -static int io_timeout(struct io_kiocb *req) +static int io_timeout(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; struct io_timeout_data *data = req->async_data; struct list_head *entry; u32 tail, off = req->timeout.off; - spin_lock_irq(&ctx->completion_lock); + spin_lock_irq(&ctx->timeout_lock); /* * sqe->off holds how many events that need to occur for this @@ -5688,23 +6157,34 @@ add: list_add(&req->timeout.list, entry); data->timer.function = io_timeout_fn; hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); - spin_unlock_irq(&ctx->completion_lock); + spin_unlock_irq(&ctx->timeout_lock); return 0; } +struct io_cancel_data { + struct io_ring_ctx *ctx; + u64 user_data; +}; + static bool io_cancel_cb(struct io_wq_work *work, void *data) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); + struct io_cancel_data *cd = data; - return req->user_data == (unsigned long) data; + return req->ctx == cd->ctx && req->user_data == cd->user_data; } -static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr) +static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data, + struct io_ring_ctx *ctx) { + struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, }; enum io_wq_cancel cancel_ret; int ret = 0; - cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false); + if (!tctx || !tctx->io_wq) + return -ENOENT; + + cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false); switch (cancel_ret) { case IO_WQ_CANCEL_OK: ret = 0; @@ -5720,35 +6200,27 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr) return ret; } -static void io_async_find_and_cancel(struct io_ring_ctx *ctx, - struct io_kiocb *req, __u64 sqe_addr, - int success_ret) +static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr) { - unsigned long flags; + struct io_ring_ctx *ctx = req->ctx; int ret; - ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr); - if (ret != -ENOENT) { - spin_lock_irqsave(&ctx->completion_lock, flags); - goto done; - } + WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current); - spin_lock_irqsave(&ctx->completion_lock, flags); - ret = io_timeout_cancel(ctx, sqe_addr); + ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx); if (ret != -ENOENT) - goto done; - ret = io_poll_cancel(ctx, sqe_addr); -done: - if (!ret) - ret = success_ret; - io_cqring_fill_event(req, ret); - io_commit_cqring(ctx); - spin_unlock_irqrestore(&ctx->completion_lock, flags); - io_cqring_ev_posted(ctx); + return ret; - if (ret < 0) - req_set_fail_links(req); - io_put_req(req); + spin_lock(&ctx->completion_lock); + spin_lock_irq(&ctx->timeout_lock); + ret = io_timeout_cancel(ctx, sqe_addr); + spin_unlock_irq(&ctx->timeout_lock); + if (ret != -ENOENT) + goto out; + ret = io_poll_cancel(ctx, sqe_addr, false); +out: + spin_unlock(&ctx->completion_lock); + return ret; } static int io_async_cancel_prep(struct io_kiocb *req, @@ -5766,52 +6238,72 @@ static int io_async_cancel_prep(struct io_kiocb *req, return 0; } -static int io_async_cancel(struct io_kiocb *req) +static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; - - io_async_find_and_cancel(ctx, req, req->cancel.addr, 0); - return 0; -} - -static int io_files_update_prep(struct io_kiocb *req, - const struct io_uring_sqe *sqe) -{ - if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL)) - return -EINVAL; - if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) - return -EINVAL; - if (sqe->ioprio || sqe->rw_flags) - return -EINVAL; - - req->files_update.offset = READ_ONCE(sqe->off); - req->files_update.nr_args = READ_ONCE(sqe->len); - if (!req->files_update.nr_args) - return -EINVAL; - req->files_update.arg = READ_ONCE(sqe->addr); - return 0; -} - -static int io_files_update(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) -{ - struct io_ring_ctx *ctx = req->ctx; - struct io_uring_files_update up; + u64 sqe_addr = req->cancel.addr; + struct io_tctx_node *node; int ret; - if (force_nonblock) - return -EAGAIN; + ret = io_try_cancel_userdata(req, sqe_addr); + if (ret != -ENOENT) + goto done; - up.offset = req->files_update.offset; - up.fds = req->files_update.arg; + /* slow path, try all io-wq's */ + io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + ret = -ENOENT; + list_for_each_entry(node, &ctx->tctx_list, ctx_node) { + struct io_uring_task *tctx = node->task->io_uring; - mutex_lock(&ctx->uring_lock); - ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args); - mutex_unlock(&ctx->uring_lock); + ret = io_async_cancel_one(tctx, req->cancel.addr, ctx); + if (ret != -ENOENT) + break; + } + io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); +done: + if (ret < 0) + req_set_fail(req); + io_req_complete_post(req, ret, 0); + return 0; +} + +static int io_rsrc_update_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) + return -EINVAL; + if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in) + return -EINVAL; + + req->rsrc_update.offset = READ_ONCE(sqe->off); + req->rsrc_update.nr_args = READ_ONCE(sqe->len); + if (!req->rsrc_update.nr_args) + return -EINVAL; + req->rsrc_update.arg = READ_ONCE(sqe->addr); + return 0; +} + +static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_uring_rsrc_update2 up; + int ret; + + up.offset = req->rsrc_update.offset; + up.data = req->rsrc_update.arg; + up.nr = 0; + up.tags = 0; + up.resv = 0; + up.resv2 = 0; + + io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE, + &up, req->rsrc_update.nr_args); + io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); if (ret < 0) - req_set_fail_links(req); - __io_req_complete(req, ret, 0, cs); + req_set_fail(req); + __io_req_complete(req, issue_flags, ret, 0); return 0; } @@ -5831,11 +6323,11 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) case IORING_OP_POLL_ADD: return io_poll_add_prep(req, sqe); case IORING_OP_POLL_REMOVE: - return io_poll_remove_prep(req, sqe); + return io_poll_update_prep(req, sqe); case IORING_OP_FSYNC: - return io_prep_fsync(req, sqe); + return io_fsync_prep(req, sqe); case IORING_OP_SYNC_FILE_RANGE: - return io_prep_sfr(req, sqe); + return io_sfr_prep(req, sqe); case IORING_OP_SENDMSG: case IORING_OP_SEND: return io_sendmsg_prep(req, sqe); @@ -5861,7 +6353,7 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) case IORING_OP_CLOSE: return io_close_prep(req, sqe); case IORING_OP_FILES_UPDATE: - return io_files_update_prep(req, sqe); + return io_rsrc_update_prep(req, sqe); case IORING_OP_STATX: return io_statx_prep(req, sqe); case IORING_OP_FADVISE: @@ -5880,100 +6372,131 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return io_remove_buffers_prep(req, sqe); case IORING_OP_TEE: return io_tee_prep(req, sqe); + case IORING_OP_SHUTDOWN: + return io_shutdown_prep(req, sqe); + case IORING_OP_RENAMEAT: + return io_renameat_prep(req, sqe); + case IORING_OP_UNLINKAT: + return io_unlinkat_prep(req, sqe); } printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n", req->opcode); - return-EINVAL; + return -EINVAL; } -static int io_req_defer_prep(struct io_kiocb *req, - const struct io_uring_sqe *sqe) +static int io_req_prep_async(struct io_kiocb *req) { - if (!sqe) + if (!io_op_defs[req->opcode].needs_async_setup) return 0; + if (WARN_ON_ONCE(req->async_data)) + return -EFAULT; if (io_alloc_async_data(req)) return -EAGAIN; - return io_req_prep(req, sqe); + + switch (req->opcode) { + case IORING_OP_READV: + return io_rw_prep_async(req, READ); + case IORING_OP_WRITEV: + return io_rw_prep_async(req, WRITE); + case IORING_OP_SENDMSG: + return io_sendmsg_prep_async(req); + case IORING_OP_RECVMSG: + return io_recvmsg_prep_async(req); + case IORING_OP_CONNECT: + return io_connect_prep_async(req); + } + printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n", + req->opcode); + return -EFAULT; } static u32 io_get_sequence(struct io_kiocb *req) { - struct io_kiocb *pos; - struct io_ring_ctx *ctx = req->ctx; - u32 total_submitted, nr_reqs = 1; + u32 seq = req->ctx->cached_sq_head; - if (req->flags & REQ_F_LINK_HEAD) - list_for_each_entry(pos, &req->link_list, link_list) - nr_reqs++; - - total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped; - return total_submitted - nr_reqs; + /* need original cached_sq_head, but it was increased for each req */ + io_for_each_link(req, req) + seq--; + return seq; } -static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static bool io_drain_req(struct io_kiocb *req) { + struct io_kiocb *pos; struct io_ring_ctx *ctx = req->ctx; struct io_defer_entry *de; int ret; u32 seq; + if (req->flags & REQ_F_FAIL) { + io_req_complete_fail_submit(req); + return true; + } + + /* + * If we need to drain a request in the middle of a link, drain the + * head request and the next request/link after the current link. + * Considering sequential execution of links, IOSQE_IO_DRAIN will be + * maintained for every request of our link. + */ + if (ctx->drain_next) { + req->flags |= REQ_F_IO_DRAIN; + ctx->drain_next = false; + } + /* not interested in head, start from the first linked */ + io_for_each_link(pos, req->link) { + if (pos->flags & REQ_F_IO_DRAIN) { + ctx->drain_next = true; + req->flags |= REQ_F_IO_DRAIN; + break; + } + } + /* Still need defer if there is pending req in defer list. */ + spin_lock(&ctx->completion_lock); if (likely(list_empty_careful(&ctx->defer_list) && - !(req->flags & REQ_F_IO_DRAIN))) - return 0; + !(req->flags & REQ_F_IO_DRAIN))) { + spin_unlock(&ctx->completion_lock); + ctx->drain_active = false; + return false; + } + spin_unlock(&ctx->completion_lock); seq = io_get_sequence(req); /* Still a chance to pass the sequence check */ if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) - return 0; + return false; - if (!req->async_data) { - ret = io_req_defer_prep(req, sqe); - if (ret) - return ret; - } + ret = io_req_prep_async(req); + if (ret) + goto fail; io_prep_async_link(req); de = kmalloc(sizeof(*de), GFP_KERNEL); - if (!de) - return -ENOMEM; + if (!de) { + ret = -ENOMEM; +fail: + io_req_complete_failed(req, ret); + return true; + } - spin_lock_irq(&ctx->completion_lock); + spin_lock(&ctx->completion_lock); if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { - spin_unlock_irq(&ctx->completion_lock); + spin_unlock(&ctx->completion_lock); kfree(de); - io_queue_async_work(req); - return -EIOCBQUEUED; + io_queue_async_work(req, NULL); + return true; } trace_io_uring_defer(ctx, req, req->user_data); de->req = req; de->seq = seq; list_add_tail(&de->list, &ctx->defer_list); - spin_unlock_irq(&ctx->completion_lock); - return -EIOCBQUEUED; + spin_unlock(&ctx->completion_lock); + return true; } -static void io_req_drop_files(struct io_kiocb *req) -{ - struct io_ring_ctx *ctx = req->ctx; - struct io_uring_task *tctx = req->task->io_uring; - unsigned long flags; - - if (req->work.flags & IO_WQ_WORK_FILES) { - put_files_struct(req->work.identity->files); - put_nsproxy(req->work.identity->nsproxy); - } - spin_lock_irqsave(&ctx->inflight_lock, flags); - list_del(&req->inflight_entry); - spin_unlock_irqrestore(&ctx->inflight_lock, flags); - req->flags &= ~REQ_F_INFLIGHT; - req->work.flags &= ~IO_WQ_WORK_FILES; - if (atomic_read(&tctx->in_idle)) - wake_up(&tctx->wait); -} - -static void __io_clean_op(struct io_kiocb *req) +static void io_clean_op(struct io_kiocb *req) { if (req->flags & REQ_F_BUFFER_SELECTED) { switch (req->opcode) { @@ -5987,7 +6510,6 @@ static void __io_clean_op(struct io_kiocb *req) kfree(req->sr_msg.kbuf); break; } - req->flags &= ~REQ_F_BUFFER_SELECTED; } if (req->flags & REQ_F_NEED_CLEANUP) { @@ -5999,588 +6521,423 @@ static void __io_clean_op(struct io_kiocb *req) case IORING_OP_WRITE_FIXED: case IORING_OP_WRITE: { struct io_async_rw *io = req->async_data; - if (io->free_iovec) - kfree(io->free_iovec); + + kfree(io->free_iovec); break; } case IORING_OP_RECVMSG: case IORING_OP_SENDMSG: { struct io_async_msghdr *io = req->async_data; - if (io->iov != io->fast_iov) - kfree(io->iov); + + kfree(io->free_iov); break; } - case IORING_OP_SPLICE: - case IORING_OP_TEE: - io_put_file(req, req->splice.file_in, - (req->splice.flags & SPLICE_F_FD_IN_FIXED)); - break; case IORING_OP_OPENAT: case IORING_OP_OPENAT2: if (req->open.filename) putname(req->open.filename); break; + case IORING_OP_RENAMEAT: + putname(req->rename.oldpath); + putname(req->rename.newpath); + break; + case IORING_OP_UNLINKAT: + putname(req->unlink.filename); + break; } - req->flags &= ~REQ_F_NEED_CLEANUP; } + if ((req->flags & REQ_F_POLLED) && req->apoll) { + kfree(req->apoll->double_poll); + kfree(req->apoll); + req->apoll = NULL; + } + if (req->flags & REQ_F_INFLIGHT) { + struct io_uring_task *tctx = req->task->io_uring; + + atomic_dec(&tctx->inflight_tracked); + } + if (req->flags & REQ_F_CREDS) + put_cred(req->creds); + + req->flags &= ~IO_REQ_CLEAN_FLAGS; } -static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock, - struct io_comp_state *cs) +static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; + const struct cred *creds = NULL; int ret; + if ((req->flags & REQ_F_CREDS) && req->creds != current_cred()) + creds = override_creds(req->creds); + switch (req->opcode) { case IORING_OP_NOP: - ret = io_nop(req, cs); + ret = io_nop(req, issue_flags); break; case IORING_OP_READV: case IORING_OP_READ_FIXED: case IORING_OP_READ: - ret = io_read(req, force_nonblock, cs); + ret = io_read(req, issue_flags); break; case IORING_OP_WRITEV: case IORING_OP_WRITE_FIXED: case IORING_OP_WRITE: - ret = io_write(req, force_nonblock, cs); + ret = io_write(req, issue_flags); break; case IORING_OP_FSYNC: - ret = io_fsync(req, force_nonblock); + ret = io_fsync(req, issue_flags); break; case IORING_OP_POLL_ADD: - ret = io_poll_add(req); + ret = io_poll_add(req, issue_flags); break; case IORING_OP_POLL_REMOVE: - ret = io_poll_remove(req); + ret = io_poll_update(req, issue_flags); break; case IORING_OP_SYNC_FILE_RANGE: - ret = io_sync_file_range(req, force_nonblock); + ret = io_sync_file_range(req, issue_flags); break; case IORING_OP_SENDMSG: - ret = io_sendmsg(req, force_nonblock, cs); + ret = io_sendmsg(req, issue_flags); break; case IORING_OP_SEND: - ret = io_send(req, force_nonblock, cs); + ret = io_send(req, issue_flags); break; case IORING_OP_RECVMSG: - ret = io_recvmsg(req, force_nonblock, cs); + ret = io_recvmsg(req, issue_flags); break; case IORING_OP_RECV: - ret = io_recv(req, force_nonblock, cs); + ret = io_recv(req, issue_flags); break; case IORING_OP_TIMEOUT: - ret = io_timeout(req); + ret = io_timeout(req, issue_flags); break; case IORING_OP_TIMEOUT_REMOVE: - ret = io_timeout_remove(req); + ret = io_timeout_remove(req, issue_flags); break; case IORING_OP_ACCEPT: - ret = io_accept(req, force_nonblock, cs); + ret = io_accept(req, issue_flags); break; case IORING_OP_CONNECT: - ret = io_connect(req, force_nonblock, cs); + ret = io_connect(req, issue_flags); break; case IORING_OP_ASYNC_CANCEL: - ret = io_async_cancel(req); + ret = io_async_cancel(req, issue_flags); break; case IORING_OP_FALLOCATE: - ret = io_fallocate(req, force_nonblock); + ret = io_fallocate(req, issue_flags); break; case IORING_OP_OPENAT: - ret = io_openat(req, force_nonblock); + ret = io_openat(req, issue_flags); break; case IORING_OP_CLOSE: - ret = io_close(req, force_nonblock, cs); + ret = io_close(req, issue_flags); break; case IORING_OP_FILES_UPDATE: - ret = io_files_update(req, force_nonblock, cs); + ret = io_files_update(req, issue_flags); break; case IORING_OP_STATX: - ret = io_statx(req, force_nonblock); + ret = io_statx(req, issue_flags); break; case IORING_OP_FADVISE: - ret = io_fadvise(req, force_nonblock); + ret = io_fadvise(req, issue_flags); break; case IORING_OP_MADVISE: - ret = io_madvise(req, force_nonblock); + ret = io_madvise(req, issue_flags); break; case IORING_OP_OPENAT2: - ret = io_openat2(req, force_nonblock); + ret = io_openat2(req, issue_flags); break; case IORING_OP_EPOLL_CTL: - ret = io_epoll_ctl(req, force_nonblock, cs); + ret = io_epoll_ctl(req, issue_flags); break; case IORING_OP_SPLICE: - ret = io_splice(req, force_nonblock); + ret = io_splice(req, issue_flags); break; case IORING_OP_PROVIDE_BUFFERS: - ret = io_provide_buffers(req, force_nonblock, cs); + ret = io_provide_buffers(req, issue_flags); break; case IORING_OP_REMOVE_BUFFERS: - ret = io_remove_buffers(req, force_nonblock, cs); + ret = io_remove_buffers(req, issue_flags); break; case IORING_OP_TEE: - ret = io_tee(req, force_nonblock); + ret = io_tee(req, issue_flags); + break; + case IORING_OP_SHUTDOWN: + ret = io_shutdown(req, issue_flags); + break; + case IORING_OP_RENAMEAT: + ret = io_renameat(req, issue_flags); + break; + case IORING_OP_UNLINKAT: + ret = io_unlinkat(req, issue_flags); break; default: ret = -EINVAL; break; } + if (creds) + revert_creds(creds); if (ret) return ret; - /* If the op doesn't have a file, we're not polling for it */ - if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) { - const bool in_async = io_wq_current_is_worker(); - - /* workqueue context doesn't hold uring_lock, grab it now */ - if (in_async) - mutex_lock(&ctx->uring_lock); - + if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) io_iopoll_req_issued(req); - if (in_async) - mutex_unlock(&ctx->uring_lock); - } - return 0; } -static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work) +static struct io_wq_work *io_wq_free_work(struct io_wq_work *work) +{ + struct io_kiocb *req = container_of(work, struct io_kiocb, work); + + req = io_put_req_find_next(req); + return req ? &req->work : NULL; +} + +static void io_wq_submit_work(struct io_wq_work *work) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_kiocb *timeout; int ret = 0; + /* one will be dropped by ->io_free_work() after returning to io-wq */ + if (!(req->flags & REQ_F_REFCOUNT)) + __io_req_set_refcount(req, 2); + else + req_ref_get(req); + timeout = io_prep_linked_timeout(req); if (timeout) io_queue_linked_timeout(timeout); - /* if NO_CANCEL is set, we must still run the work */ - if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) == - IO_WQ_WORK_CANCEL) { + /* either cancelled or io-wq is dying, so don't touch tctx->iowq */ + if (work->flags & IO_WQ_WORK_CANCEL) ret = -ECANCELED; - } if (!ret) { do { - ret = io_issue_sqe(req, false, NULL); + ret = io_issue_sqe(req, 0); /* * We can get EAGAIN for polled IO even though we're * forcing a sync submission from here, since we can't * wait for request slots on the block side. */ - if (ret != -EAGAIN) + if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL)) break; cond_resched(); } while (1); } - if (ret) { - struct io_ring_ctx *lock_ctx = NULL; + /* avoid locking problems by failing it from a clean context */ + if (ret) + io_req_task_queue_fail(req, ret); +} - if (req->ctx->flags & IORING_SETUP_IOPOLL) - lock_ctx = req->ctx; - - /* - * io_iopoll_complete() does not hold completion_lock to - * complete polled io, so here for polled io, we can not call - * io_req_complete() directly, otherwise there maybe concurrent - * access to cqring, defer_list, etc, which is not safe. Given - * that io_iopoll_complete() is always called under uring_lock, - * so here for polled io, we also get uring_lock to complete - * it. - */ - if (lock_ctx) - mutex_lock(&lock_ctx->uring_lock); - - req_set_fail_links(req); - io_req_complete(req, ret); - - if (lock_ctx) - mutex_unlock(&lock_ctx->uring_lock); - } - - return io_steal_work(req); +static inline struct io_fixed_file *io_fixed_file_slot(struct io_file_table *table, + unsigned i) +{ + return &table->files[i]; } static inline struct file *io_file_from_index(struct io_ring_ctx *ctx, int index) { - struct fixed_file_table *table; + struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index); - table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT]; - return table->files[index & IORING_FILE_TABLE_MASK]; + return (struct file *) (slot->file_ptr & FFS_MASK); } -static struct file *io_file_get(struct io_submit_state *state, - struct io_kiocb *req, int fd, bool fixed) +static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file) +{ + unsigned long file_ptr = (unsigned long) file; + + if (__io_file_supports_nowait(file, READ)) + file_ptr |= FFS_ASYNC_READ; + if (__io_file_supports_nowait(file, WRITE)) + file_ptr |= FFS_ASYNC_WRITE; + if (S_ISREG(file_inode(file)->i_mode)) + file_ptr |= FFS_ISREG; + file_slot->file_ptr = file_ptr; +} + +static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx, + struct io_kiocb *req, int fd) { - struct io_ring_ctx *ctx = req->ctx; struct file *file; + unsigned long file_ptr; - if (fixed) { - if (unlikely((unsigned int)fd >= ctx->nr_user_files)) - return NULL; - fd = array_index_nospec(fd, ctx->nr_user_files); - file = io_file_from_index(ctx, fd); - if (file) { - req->fixed_file_refs = &ctx->file_data->node->refs; - percpu_ref_get(req->fixed_file_refs); - } - } else { - trace_io_uring_file_get(ctx, fd); - file = __io_file_get(state, fd); - } - - if (file && file->f_op == &io_uring_fops && - !(req->flags & REQ_F_INFLIGHT)) { - io_req_init_async(req); - req->flags |= REQ_F_INFLIGHT; - - spin_lock_irq(&ctx->inflight_lock); - list_add(&req->inflight_entry, &ctx->inflight_list); - spin_unlock_irq(&ctx->inflight_lock); - } - + if (unlikely((unsigned int)fd >= ctx->nr_user_files)) + return NULL; + fd = array_index_nospec(fd, ctx->nr_user_files); + file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr; + file = (struct file *) (file_ptr & FFS_MASK); + file_ptr &= ~FFS_MASK; + /* mask in overlapping REQ_F and FFS bits */ + req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT); + io_req_set_rsrc_node(req); return file; } -static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req, - int fd) +static struct file *io_file_get_normal(struct io_ring_ctx *ctx, + struct io_kiocb *req, int fd) { - bool fixed; + struct file *file = fget(fd); - fixed = (req->flags & REQ_F_FIXED_FILE) != 0; - if (unlikely(!fixed && io_async_submit(req->ctx))) - return -EBADF; + trace_io_uring_file_get(ctx, fd); - req->file = io_file_get(state, req, fd, fixed); - if (req->file || io_op_defs[req->opcode].needs_file_no_error) - return 0; - return -EBADF; + /* we don't allow fixed io_uring files */ + if (file && unlikely(file->f_op == &io_uring_fops)) + io_req_track_inflight(req); + return file; +} + +static inline struct file *io_file_get(struct io_ring_ctx *ctx, + struct io_kiocb *req, int fd, bool fixed) +{ + if (fixed) + return io_file_get_fixed(ctx, req, fd); + else + return io_file_get_normal(ctx, req, fd); +} + +static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) +{ + struct io_kiocb *prev = req->timeout.prev; + int ret = -ENOENT; + + if (prev) { + if (!(req->task->flags & PF_EXITING)) + ret = io_try_cancel_userdata(req, prev->user_data); + io_req_complete_post(req, ret ?: -ETIME, 0); + io_put_req(prev); + } else { + io_req_complete_post(req, -ETIME, 0); + } } static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) { struct io_timeout_data *data = container_of(timer, struct io_timeout_data, timer); - struct io_kiocb *req = data->req; + struct io_kiocb *prev, *req = data->req; struct io_ring_ctx *ctx = req->ctx; - struct io_kiocb *prev = NULL; unsigned long flags; - spin_lock_irqsave(&ctx->completion_lock, flags); + spin_lock_irqsave(&ctx->timeout_lock, flags); + prev = req->timeout.head; + req->timeout.head = NULL; /* * We don't expect the list to be empty, that will only happen if we * race with the completion of the linked work. */ - if (!list_empty(&req->link_list)) { - prev = list_entry(req->link_list.prev, struct io_kiocb, - link_list); - list_del_init(&req->link_list); - if (!refcount_inc_not_zero(&prev->refs)) + if (prev) { + io_remove_next_linked(prev); + if (!req_ref_inc_not_zero(prev)) prev = NULL; } - list_del(&req->timeout.list); - spin_unlock_irqrestore(&ctx->completion_lock, flags); + req->timeout.prev = prev; + spin_unlock_irqrestore(&ctx->timeout_lock, flags); - if (prev) { - io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME); - io_put_req_deferred(prev, 1); - } else { - io_cqring_add_event(req, -ETIME, 0); - io_put_req_deferred(req, 1); - } + req->io_task_work.func = io_req_task_link_timeout; + io_req_task_work_add(req); return HRTIMER_NORESTART; } -static void __io_queue_linked_timeout(struct io_kiocb *req) -{ - /* - * If the list is now empty, then our linked request finished before - * we got a chance to setup the timer - */ - if (!list_empty(&req->link_list)) { - struct io_timeout_data *data = req->async_data; - - data->timer.function = io_link_timeout_fn; - hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), - data->mode); - } -} - static void io_queue_linked_timeout(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - spin_lock_irq(&ctx->completion_lock); - __io_queue_linked_timeout(req); - spin_unlock_irq(&ctx->completion_lock); + spin_lock_irq(&ctx->timeout_lock); + /* + * If the back reference is NULL, then our linked request finished + * before we got a chance to setup the timer + */ + if (req->timeout.head) { + struct io_timeout_data *data = req->async_data; + data->timer.function = io_link_timeout_fn; + hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), + data->mode); + list_add_tail(&req->timeout.list, &ctx->ltimeout_list); + } + spin_unlock_irq(&ctx->timeout_lock); /* drop submission reference */ io_put_req(req); } -static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) -{ - struct io_kiocb *nxt; - - if (!(req->flags & REQ_F_LINK_HEAD)) - return NULL; - if (req->flags & REQ_F_LINK_TIMEOUT) - return NULL; - - nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, - link_list); - if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT) - return NULL; - - nxt->flags |= REQ_F_LTIMEOUT_ACTIVE; - req->flags |= REQ_F_LINK_TIMEOUT; - return nxt; -} - -static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs) +static void __io_queue_sqe(struct io_kiocb *req) + __must_hold(&req->ctx->uring_lock) { struct io_kiocb *linked_timeout; - const struct cred *old_creds = NULL; int ret; -again: - linked_timeout = io_prep_linked_timeout(req); - - if ((req->flags & REQ_F_WORK_INITIALIZED) && - (req->work.flags & IO_WQ_WORK_CREDS) && - req->work.identity->creds != current_cred()) { - if (old_creds) - revert_creds(old_creds); - if (old_creds == req->work.identity->creds) - old_creds = NULL; /* restored original creds */ - else - old_creds = override_creds(req->work.identity->creds); - } - - ret = io_issue_sqe(req, true, cs); +issue_sqe: + ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); /* * We async punt it if the file wasn't marked NOWAIT, or if the file * doesn't support non-blocking read/write attempts */ - if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { - if (!io_arm_poll_handler(req)) { + if (likely(!ret)) { + if (req->flags & REQ_F_COMPLETE_INLINE) { + struct io_ring_ctx *ctx = req->ctx; + struct io_submit_state *state = &ctx->submit_state; + + state->compl_reqs[state->compl_nr++] = req; + if (state->compl_nr == ARRAY_SIZE(state->compl_reqs)) + io_submit_flush_completions(ctx); + return; + } + + linked_timeout = io_prep_linked_timeout(req); + if (linked_timeout) + io_queue_linked_timeout(linked_timeout); + } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { + linked_timeout = io_prep_linked_timeout(req); + + switch (io_arm_poll_handler(req)) { + case IO_APOLL_READY: + if (linked_timeout) + io_queue_linked_timeout(linked_timeout); + goto issue_sqe; + case IO_APOLL_ABORTED: /* * Queued up for async execution, worker will release * submit reference when the iocb is actually submitted. */ - io_queue_async_work(req); + io_queue_async_work(req, NULL); + break; } if (linked_timeout) io_queue_linked_timeout(linked_timeout); - } else if (likely(!ret)) { - /* drop submission reference */ - req = io_put_req_find_next(req); - if (linked_timeout) - io_queue_linked_timeout(linked_timeout); - - if (req) { - if (!(req->flags & REQ_F_FORCE_ASYNC)) - goto again; - io_queue_async_work(req); - } } else { - /* un-prep timeout, so it'll be killed as any other linked */ - req->flags &= ~REQ_F_LINK_TIMEOUT; - req_set_fail_links(req); - io_put_req(req); - io_req_complete(req, ret); - } - - if (old_creds) - revert_creds(old_creds); -} - -static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_comp_state *cs) -{ - int ret; - - ret = io_req_defer(req, sqe); - if (ret) { - if (ret != -EIOCBQUEUED) { -fail_req: - req_set_fail_links(req); - io_put_req(req); - io_req_complete(req, ret); - } - } else if (req->flags & REQ_F_FORCE_ASYNC) { - if (!req->async_data) { - ret = io_req_defer_prep(req, sqe); - if (unlikely(ret)) - goto fail_req; - } - io_queue_async_work(req); - } else { - if (sqe) { - ret = io_req_prep(req, sqe); - if (unlikely(ret)) - goto fail_req; - } - __io_queue_sqe(req, cs); + io_req_complete_failed(req, ret); } } -static inline void io_queue_link_head(struct io_kiocb *req, - struct io_comp_state *cs) +static inline void io_queue_sqe(struct io_kiocb *req) + __must_hold(&req->ctx->uring_lock) { - if (unlikely(req->flags & REQ_F_FAIL_LINK)) { - io_put_req(req); - io_req_complete(req, -ECANCELED); - } else - io_queue_sqe(req, NULL, cs); -} + if (unlikely(req->ctx->drain_active) && io_drain_req(req)) + return; -static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **link, struct io_comp_state *cs) -{ - struct io_ring_ctx *ctx = req->ctx; - int ret; - - /* - * If we already have a head request, queue this one for async - * submittal once the head completes. If we don't have a head but - * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be - * submitted sync once the chain is complete. If none of those - * conditions are true (normal request), then just queue it. - */ - if (*link) { - struct io_kiocb *head = *link; - - /* - * Taking sequential execution of a link, draining both sides - * of the link also fullfils IOSQE_IO_DRAIN semantics for all - * requests in the link. So, it drains the head and the - * next after the link request. The last one is done via - * drain_next flag to persist the effect across calls. - */ - if (req->flags & REQ_F_IO_DRAIN) { - head->flags |= REQ_F_IO_DRAIN; - ctx->drain_next = 1; - } - ret = io_req_defer_prep(req, sqe); - if (unlikely(ret)) { - /* fail even hard links since we don't submit */ - head->flags |= REQ_F_FAIL_LINK; - return ret; - } - trace_io_uring_link(ctx, req, head); - list_add_tail(&req->link_list, &head->link_list); - - /* last request of a link, enqueue the link */ - if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { - io_queue_link_head(head, cs); - *link = NULL; - } + if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) { + __io_queue_sqe(req); + } else if (req->flags & REQ_F_FAIL) { + io_req_complete_fail_submit(req); } else { - if (unlikely(ctx->drain_next)) { - req->flags |= REQ_F_IO_DRAIN; - ctx->drain_next = 0; - } - if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { - req->flags |= REQ_F_LINK_HEAD; - INIT_LIST_HEAD(&req->link_list); + int ret = io_req_prep_async(req); - ret = io_req_defer_prep(req, sqe); - if (unlikely(ret)) - req->flags |= REQ_F_FAIL_LINK; - *link = req; - } else { - io_queue_sqe(req, sqe, cs); - } + if (unlikely(ret)) + io_req_complete_failed(req, ret); + else + io_queue_async_work(req, NULL); } - - return 0; -} - -/* - * Batched submission is done, ensure local IO is flushed out. - */ -static void io_submit_state_end(struct io_submit_state *state) -{ - if (!list_empty(&state->comp.list)) - io_submit_flush_completions(&state->comp); - blk_finish_plug(&state->plug); - io_state_file_put(state); - if (state->free_reqs) - kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs); -} - -/* - * Start submission side cache. - */ -static void io_submit_state_start(struct io_submit_state *state, - struct io_ring_ctx *ctx, unsigned int max_ios) -{ - blk_start_plug(&state->plug); - state->comp.nr = 0; - INIT_LIST_HEAD(&state->comp.list); - state->comp.ctx = ctx; - state->free_reqs = 0; - state->file = NULL; - state->ios_left = max_ios; -} - -static void io_commit_sqring(struct io_ring_ctx *ctx) -{ - struct io_rings *rings = ctx->rings; - - /* - * Ensure any loads from the SQEs are done at this point, - * since once we write the new head, the application could - * write new data to them. - */ - smp_store_release(&rings->sq.head, ctx->cached_sq_head); -} - -/* - * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory - * that is mapped by userspace. This means that care needs to be taken to - * ensure that reads are stable, as we cannot rely on userspace always - * being a good citizen. If members of the sqe are validated and then later - * used, it's important that those reads are done through READ_ONCE() to - * prevent a re-load down the line. - */ -static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx) -{ - u32 *sq_array = ctx->sq_array; - unsigned head; - - /* - * The cached sq head (or cq tail) serves two purposes: - * - * 1) allows us to batch the cost of updating the user visible - * head updates. - * 2) allows the kernel side to track the head on its own, even - * though the application is the one updating it. - */ - head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]); - if (likely(head < ctx->sq_entries)) - return &ctx->sq_sqes[head]; - - /* drop invalid entries */ - ctx->cached_sq_dropped++; - WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped); - return NULL; -} - -static inline void io_consume_sqe(struct io_ring_ctx *ctx) -{ - ctx->cached_sq_head++; } /* @@ -6592,7 +6949,7 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx, struct io_kiocb *req, unsigned int sqe_flags) { - if (!ctx->restricted) + if (likely(!ctx->restricted)) return true; if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) @@ -6609,382 +6966,459 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx, return true; } -#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \ - IOSQE_IO_HARDLINK | IOSQE_ASYNC | \ - IOSQE_BUFFER_SELECT) - static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, - const struct io_uring_sqe *sqe, - struct io_submit_state *state) + const struct io_uring_sqe *sqe) + __must_hold(&ctx->uring_lock) { + struct io_submit_state *state; unsigned int sqe_flags; - int id, ret; + int personality, ret = 0; + /* req is partially pre-initialised, see io_preinit_req() */ req->opcode = READ_ONCE(sqe->opcode); + /* same numerical values with corresponding REQ_F_*, safe to copy */ + req->flags = sqe_flags = READ_ONCE(sqe->flags); req->user_data = READ_ONCE(sqe->user_data); - req->async_data = NULL; req->file = NULL; - req->ctx = ctx; - req->flags = 0; - /* one is dropped after submission, the other at completion */ - refcount_set(&req->refs, 2); + req->fixed_rsrc_refs = NULL; req->task = current; - req->result = 0; - if (unlikely(req->opcode >= IORING_OP_LAST)) - return -EINVAL; - - if (unlikely(io_sq_thread_acquire_mm(ctx, req))) - return -EFAULT; - - sqe_flags = READ_ONCE(sqe->flags); /* enforce forwards compatibility on users */ if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) return -EINVAL; - - if (unlikely(!io_check_restriction(ctx, req, sqe_flags))) + if (unlikely(req->opcode >= IORING_OP_LAST)) + return -EINVAL; + if (!io_check_restriction(ctx, req, sqe_flags)) return -EACCES; if ((sqe_flags & IOSQE_BUFFER_SELECT) && !io_op_defs[req->opcode].buffer_select) return -EOPNOTSUPP; + if (unlikely(sqe_flags & IOSQE_IO_DRAIN)) + ctx->drain_active = true; - id = READ_ONCE(sqe->personality); - if (id) { - struct io_identity *iod; - - iod = xa_load(&ctx->personalities, id); - if (unlikely(!iod)) + personality = READ_ONCE(sqe->personality); + if (personality) { + req->creds = xa_load(&ctx->personalities, personality); + if (!req->creds) return -EINVAL; - refcount_inc(&iod->count); + get_cred(req->creds); + req->flags |= REQ_F_CREDS; + } + state = &ctx->submit_state; - __io_req_init_async(req); - get_cred(iod->creds); - req->work.identity = iod; - req->work.flags |= IO_WQ_WORK_CREDS; + /* + * Plug now if we have more than 1 IO left after this, and the target + * is potentially a read/write to block based storage. + */ + if (!state->plug_started && state->ios_left > 1 && + io_op_defs[req->opcode].plug) { + blk_start_plug(&state->plug); + state->plug_started = true; } - /* same numerical values with corresponding REQ_F_*, safe to copy */ - req->flags |= sqe_flags; + if (io_op_defs[req->opcode].needs_file) { + req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd), + (sqe_flags & IOSQE_FIXED_FILE)); + if (unlikely(!req->file)) + ret = -EBADF; + } - if (!io_op_defs[req->opcode].needs_file) - return 0; - - ret = io_req_set_file(state, req, READ_ONCE(sqe->fd)); state->ios_left--; return ret; } -static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) +static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, + const struct io_uring_sqe *sqe) + __must_hold(&ctx->uring_lock) { - struct io_submit_state state; - struct io_kiocb *link = NULL; - int i, submitted = 0; + struct io_submit_link *link = &ctx->submit_state.link; + int ret; - /* if we have a backlog and couldn't flush it all, return BUSY */ - if (test_bit(0, &ctx->sq_check_overflow)) { - if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL)) - return -EBUSY; + ret = io_init_req(ctx, req, sqe); + if (unlikely(ret)) { +fail_req: + /* fail even hard links since we don't submit */ + if (link->head) { + /* + * we can judge a link req is failed or cancelled by if + * REQ_F_FAIL is set, but the head is an exception since + * it may be set REQ_F_FAIL because of other req's failure + * so let's leverage req->result to distinguish if a head + * is set REQ_F_FAIL because of its failure or other req's + * failure so that we can set the correct ret code for it. + * init result here to avoid affecting the normal path. + */ + if (!(link->head->flags & REQ_F_FAIL)) + req_fail_link_node(link->head, -ECANCELED); + } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { + /* + * the current req is a normal req, we should return + * error and thus break the submittion loop. + */ + io_req_complete_failed(req, ret); + return ret; + } + req_fail_link_node(req, ret); + } else { + ret = io_req_prep(req, sqe); + if (unlikely(ret)) + goto fail_req; } + /* don't need @sqe from now on */ + trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data, + req->flags, true, + ctx->flags & IORING_SETUP_SQPOLL); + + /* + * If we already have a head request, queue this one for async + * submittal once the head completes. If we don't have a head but + * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be + * submitted sync once the chain is complete. If none of those + * conditions are true (normal request), then just queue it. + */ + if (link->head) { + struct io_kiocb *head = link->head; + + if (!(req->flags & REQ_F_FAIL)) { + ret = io_req_prep_async(req); + if (unlikely(ret)) { + req_fail_link_node(req, ret); + if (!(head->flags & REQ_F_FAIL)) + req_fail_link_node(head, -ECANCELED); + } + } + trace_io_uring_link(ctx, req, head); + link->last->link = req; + link->last = req; + + /* last request of a link, enqueue the link */ + if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { + link->head = NULL; + io_queue_sqe(head); + } + } else { + if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { + link->head = req; + link->last = req; + } else { + io_queue_sqe(req); + } + } + + return 0; +} + +/* + * Batched submission is done, ensure local IO is flushed out. + */ +static void io_submit_state_end(struct io_submit_state *state, + struct io_ring_ctx *ctx) +{ + if (state->link.head) + io_queue_sqe(state->link.head); + if (state->compl_nr) + io_submit_flush_completions(ctx); + if (state->plug_started) + blk_finish_plug(&state->plug); +} + +/* + * Start submission side cache. + */ +static void io_submit_state_start(struct io_submit_state *state, + unsigned int max_ios) +{ + state->plug_started = false; + state->ios_left = max_ios; + /* set only head, no need to init link_last in advance */ + state->link.head = NULL; +} + +static void io_commit_sqring(struct io_ring_ctx *ctx) +{ + struct io_rings *rings = ctx->rings; + + /* + * Ensure any loads from the SQEs are done at this point, + * since once we write the new head, the application could + * write new data to them. + */ + smp_store_release(&rings->sq.head, ctx->cached_sq_head); +} + +/* + * Fetch an sqe, if one is available. Note this returns a pointer to memory + * that is mapped by userspace. This means that care needs to be taken to + * ensure that reads are stable, as we cannot rely on userspace always + * being a good citizen. If members of the sqe are validated and then later + * used, it's important that those reads are done through READ_ONCE() to + * prevent a re-load down the line. + */ +static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx) +{ + unsigned head, mask = ctx->sq_entries - 1; + unsigned sq_idx = ctx->cached_sq_head++ & mask; + + /* + * The cached sq head (or cq tail) serves two purposes: + * + * 1) allows us to batch the cost of updating the user visible + * head updates. + * 2) allows the kernel side to track the head on its own, even + * though the application is the one updating it. + */ + head = READ_ONCE(ctx->sq_array[sq_idx]); + if (likely(head < ctx->sq_entries)) + return &ctx->sq_sqes[head]; + + /* drop invalid entries */ + ctx->cq_extra--; + WRITE_ONCE(ctx->rings->sq_dropped, + READ_ONCE(ctx->rings->sq_dropped) + 1); + return NULL; +} + +static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) + __must_hold(&ctx->uring_lock) +{ + int submitted = 0; + /* make sure SQ entry isn't read before tail */ nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx)); - if (!percpu_ref_tryget_many(&ctx->refs, nr)) return -EAGAIN; + io_get_task_refs(nr); - percpu_counter_add(¤t->io_uring->inflight, nr); - refcount_add(nr, ¤t->usage); - - io_submit_state_start(&state, ctx, nr); - - for (i = 0; i < nr; i++) { + io_submit_state_start(&ctx->submit_state, nr); + while (submitted < nr) { const struct io_uring_sqe *sqe; struct io_kiocb *req; - int err; - sqe = io_get_sqe(ctx); - if (unlikely(!sqe)) { - io_consume_sqe(ctx); - break; - } - req = io_alloc_req(ctx, &state); + req = io_alloc_req(ctx); if (unlikely(!req)) { if (!submitted) submitted = -EAGAIN; break; } - io_consume_sqe(ctx); - /* will complete beyond this point, count as submitted */ - submitted++; - - err = io_init_req(ctx, req, sqe, &state); - if (unlikely(err)) { -fail_req: - io_put_req(req); - io_req_complete(req, err); + sqe = io_get_sqe(ctx); + if (unlikely(!sqe)) { + list_add(&req->inflight_entry, &ctx->submit_state.free_list); break; } - - trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data, - true, io_async_submit(ctx)); - err = io_submit_sqe(req, sqe, &link, &state.comp); - if (err) - goto fail_req; + /* will complete beyond this point, count as submitted */ + submitted++; + if (io_submit_sqe(ctx, req, sqe)) + break; } if (unlikely(submitted != nr)) { int ref_used = (submitted == -EAGAIN) ? 0 : submitted; - struct io_uring_task *tctx = current->io_uring; int unused = nr - ref_used; + current->io_uring->cached_refs += unused; percpu_ref_put_many(&ctx->refs, unused); - percpu_counter_sub(&tctx->inflight, unused); - put_task_struct_many(current, unused); } - if (link) - io_queue_link_head(link, &state.comp); - io_submit_state_end(&state); + io_submit_state_end(&ctx->submit_state, ctx); /* Commit SQ ring head once we've consumed and submitted all SQEs */ io_commit_sqring(ctx); return submitted; } +static inline bool io_sqd_events_pending(struct io_sq_data *sqd) +{ + return READ_ONCE(sqd->state); +} + static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx) { /* Tell userspace we may need a wakeup call */ - spin_lock_irq(&ctx->completion_lock); - ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP; - spin_unlock_irq(&ctx->completion_lock); + spin_lock(&ctx->completion_lock); + WRITE_ONCE(ctx->rings->sq_flags, + ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP); + spin_unlock(&ctx->completion_lock); } static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx) { - spin_lock_irq(&ctx->completion_lock); - ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; - spin_unlock_irq(&ctx->completion_lock); + spin_lock(&ctx->completion_lock); + WRITE_ONCE(ctx->rings->sq_flags, + ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP); + spin_unlock(&ctx->completion_lock); } -static int io_sq_wake_function(struct wait_queue_entry *wqe, unsigned mode, - int sync, void *key) +static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) { - struct io_ring_ctx *ctx = container_of(wqe, struct io_ring_ctx, sqo_wait_entry); - int ret; - - ret = autoremove_wake_function(wqe, mode, sync, key); - if (ret) { - unsigned long flags; - - spin_lock_irqsave(&ctx->completion_lock, flags); - ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; - spin_unlock_irqrestore(&ctx->completion_lock, flags); - } - return ret; -} - -enum sq_ret { - SQT_IDLE = 1, - SQT_SPIN = 2, - SQT_DID_WORK = 4, -}; - -static enum sq_ret __io_sq_thread(struct io_ring_ctx *ctx, - unsigned long start_jiffies, bool cap_entries) -{ - unsigned long timeout = start_jiffies + ctx->sq_thread_idle; - struct io_sq_data *sqd = ctx->sq_data; unsigned int to_submit; int ret = 0; -again: - if (!list_empty(&ctx->iopoll_list)) { + to_submit = io_sqring_entries(ctx); + /* if we're handling multiple rings, cap submit size for fairness */ + if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE) + to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE; + + if (!list_empty(&ctx->iopoll_list) || to_submit) { unsigned nr_events = 0; + const struct cred *creds = NULL; + + if (ctx->sq_creds != current_cred()) + creds = override_creds(ctx->sq_creds); mutex_lock(&ctx->uring_lock); - if (!list_empty(&ctx->iopoll_list) && !need_resched()) + if (!list_empty(&ctx->iopoll_list)) io_do_iopoll(ctx, &nr_events, 0); + + /* + * Don't submit if refs are dying, good for io_uring_register(), + * but also it is relied upon by io_ring_exit_work() + */ + if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) && + !(ctx->flags & IORING_SETUP_R_DISABLED)) + ret = io_submit_sqes(ctx, to_submit); mutex_unlock(&ctx->uring_lock); + + if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait)) + wake_up(&ctx->sqo_sq_wait); + if (creds) + revert_creds(creds); } - to_submit = io_sqring_entries(ctx); - - /* - * If submit got -EBUSY, flag us as needing the application - * to enter the kernel to reap and flush events. - */ - if (!to_submit || ret == -EBUSY || need_resched()) { - /* - * Drop cur_mm before scheduling, we can't hold it for - * long periods (or over schedule()). Do this before - * adding ourselves to the waitqueue, as the unuse/drop - * may sleep. - */ - io_sq_thread_drop_mm(); - - /* - * We're polling. If we're within the defined idle - * period, then let us spin without work before going - * to sleep. The exception is if we got EBUSY doing - * more IO, we should wait for the application to - * reap events and wake us up. - */ - if (!list_empty(&ctx->iopoll_list) || need_resched() || - (!time_after(jiffies, timeout) && ret != -EBUSY && - !percpu_ref_is_dying(&ctx->refs))) - return SQT_SPIN; - - prepare_to_wait(&sqd->wait, &ctx->sqo_wait_entry, - TASK_INTERRUPTIBLE); - - /* - * While doing polled IO, before going to sleep, we need - * to check if there are new reqs added to iopoll_list, - * it is because reqs may have been punted to io worker - * and will be added to iopoll_list later, hence check - * the iopoll_list again. - */ - if ((ctx->flags & IORING_SETUP_IOPOLL) && - !list_empty_careful(&ctx->iopoll_list)) { - finish_wait(&sqd->wait, &ctx->sqo_wait_entry); - goto again; - } - - to_submit = io_sqring_entries(ctx); - if (!to_submit || ret == -EBUSY) - return SQT_IDLE; - } - - finish_wait(&sqd->wait, &ctx->sqo_wait_entry); - io_ring_clear_wakeup_flag(ctx); - - /* if we're handling multiple rings, cap submit size for fairness */ - if (cap_entries && to_submit > 8) - to_submit = 8; - - mutex_lock(&ctx->uring_lock); - if (likely(!percpu_ref_is_dying(&ctx->refs) && !ctx->sqo_dead)) - ret = io_submit_sqes(ctx, to_submit); - mutex_unlock(&ctx->uring_lock); - - if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait)) - wake_up(&ctx->sqo_sq_wait); - - return SQT_DID_WORK; + return ret; } -static void io_sqd_init_new(struct io_sq_data *sqd) +static void io_sqd_update_thread_idle(struct io_sq_data *sqd) { struct io_ring_ctx *ctx; + unsigned sq_thread_idle = 0; - while (!list_empty(&sqd->ctx_new_list)) { - ctx = list_first_entry(&sqd->ctx_new_list, struct io_ring_ctx, sqd_list); - init_wait(&ctx->sqo_wait_entry); - ctx->sqo_wait_entry.func = io_sq_wake_function; - list_move_tail(&ctx->sqd_list, &sqd->ctx_list); - complete(&ctx->sq_thread_comp); + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) + sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle); + sqd->sq_thread_idle = sq_thread_idle; +} + +static bool io_sqd_handle_event(struct io_sq_data *sqd) +{ + bool did_sig = false; + struct ksignal ksig; + + if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || + signal_pending(current)) { + mutex_unlock(&sqd->lock); + if (signal_pending(current)) + did_sig = get_signal(&ksig); + cond_resched(); + mutex_lock(&sqd->lock); } + return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); } static int io_sq_thread(void *data) { - struct cgroup_subsys_state *cur_css = NULL; - const struct cred *old_cred = NULL; struct io_sq_data *sqd = data; struct io_ring_ctx *ctx; - unsigned long start_jiffies; + unsigned long timeout = 0; + char buf[TASK_COMM_LEN]; + DEFINE_WAIT(wait); - start_jiffies = jiffies; - while (!kthread_should_stop()) { - enum sq_ret ret = 0; - bool cap_entries; + snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid); + set_task_comm(current, buf); - /* - * Any changes to the sqd lists are synchronized through the - * kthread parking. This synchronizes the thread vs users, - * the users are synchronized on the sqd->ctx_lock. - */ - if (kthread_should_park()) { - kthread_parkme(); - /* - * When sq thread is unparked, in case the previous park operation - * comes from io_put_sq_data(), which means that sq thread is going - * to be stopped, so here needs to have a check. - */ - if (kthread_should_stop()) + if (sqd->sq_cpu != -1) + set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); + else + set_cpus_allowed_ptr(current, cpu_online_mask); + current->flags |= PF_NO_SETAFFINITY; + + mutex_lock(&sqd->lock); + while (1) { + bool cap_entries, sqt_spin = false; + + if (io_sqd_events_pending(sqd) || signal_pending(current)) { + if (io_sqd_handle_event(sqd)) break; + timeout = jiffies + sqd->sq_thread_idle; } - if (unlikely(!list_empty(&sqd->ctx_new_list))) - io_sqd_init_new(sqd); - cap_entries = !list_is_singular(&sqd->ctx_list); - list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { - if (current->cred != ctx->creds) { - if (old_cred) - revert_creds(old_cred); - old_cred = override_creds(ctx->creds); - } - io_sq_thread_associate_blkcg(ctx, &cur_css); -#ifdef CONFIG_AUDIT - current->loginuid = ctx->loginuid; - current->sessionid = ctx->sessionid; -#endif + int ret = __io_sq_thread(ctx, cap_entries); - ret |= __io_sq_thread(ctx, start_jiffies, cap_entries); + if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list))) + sqt_spin = true; + } + if (io_run_task_work()) + sqt_spin = true; - io_sq_thread_drop_mm(); + if (sqt_spin || !time_after(jiffies, timeout)) { + cond_resched(); + if (sqt_spin) + timeout = jiffies + sqd->sq_thread_idle; + continue; } - if (ret & SQT_SPIN) { - io_run_task_work(); - io_sq_thread_drop_mm(); - cond_resched(); - } else if (ret == SQT_IDLE) { - if (kthread_should_park()) - continue; - list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) + prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE); + if (!io_sqd_events_pending(sqd) && !current->task_works) { + bool needs_sched = true; + + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { io_ring_set_wakeup_flag(ctx); - schedule(); - start_jiffies = jiffies; + + if ((ctx->flags & IORING_SETUP_IOPOLL) && + !list_empty_careful(&ctx->iopoll_list)) { + needs_sched = false; + break; + } + if (io_sqring_entries(ctx)) { + needs_sched = false; + break; + } + } + + if (needs_sched) { + mutex_unlock(&sqd->lock); + schedule(); + mutex_lock(&sqd->lock); + } list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) io_ring_clear_wakeup_flag(ctx); } + + finish_wait(&sqd->wait, &wait); + timeout = jiffies + sqd->sq_thread_idle; } + io_uring_cancel_generic(true, sqd); + sqd->thread = NULL; + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) + io_ring_set_wakeup_flag(ctx); io_run_task_work(); - io_sq_thread_drop_mm(); + mutex_unlock(&sqd->lock); - if (cur_css) - io_sq_thread_unassociate_blkcg(); - if (old_cred) - revert_creds(old_cred); - - kthread_parkme(); - - return 0; + complete(&sqd->exited); + do_exit(0); } struct io_wait_queue { struct wait_queue_entry wq; struct io_ring_ctx *ctx; - unsigned to_wait; + unsigned cq_tail; unsigned nr_timeouts; }; static inline bool io_should_wake(struct io_wait_queue *iowq) { struct io_ring_ctx *ctx = iowq->ctx; + int dist = ctx->cached_cq_tail - (int) iowq->cq_tail; /* * Wake up if we have enough events, or if a timeout occurred since we * started waiting. For timeouts, we always want to return to userspace, * regardless of event count. */ - return io_cqring_events(ctx) >= iowq->to_wait || - atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; + return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; } static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, @@ -6997,7 +7431,7 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, * Cannot safely flush overflowed CQEs from here, ensure we wake up * the task, and the next invocation will do it. */ - if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow)) + if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->check_cq_overflow)) return autoremove_wake_function(curr, mode, wake_flags, key); return -1; } @@ -7008,43 +7442,60 @@ static int io_run_task_work_sig(void) return 1; if (!signal_pending(current)) return 0; - if (current->jobctl & JOBCTL_TASK_WORK) { - spin_lock_irq(¤t->sighand->siglock); - current->jobctl &= ~JOBCTL_TASK_WORK; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - return 1; - } + if (test_thread_flag(TIF_NOTIFY_SIGNAL)) + return -ERESTARTSYS; return -EINTR; } +/* when returns >0, the caller should retry */ +static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, + struct io_wait_queue *iowq, + ktime_t timeout) +{ + int ret; + + /* make sure we run task_work before checking for signals */ + ret = io_run_task_work_sig(); + if (ret || io_should_wake(iowq)) + return ret; + /* let the caller flush overflows, retry */ + if (test_bit(0, &ctx->check_cq_overflow)) + return 1; + + if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS)) + return -ETIME; + return 1; +} + /* * Wait until events become available, if we don't already have some. The * application must reap them itself, as they reside on the shared cq ring. */ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, - const sigset_t __user *sig, size_t sigsz) + const sigset_t __user *sig, size_t sigsz, + struct __kernel_timespec __user *uts) { - struct io_wait_queue iowq = { - .wq = { - .private = current, - .func = io_wake_function, - .entry = LIST_HEAD_INIT(iowq.wq.entry), - }, - .ctx = ctx, - .to_wait = min_events, - }; + struct io_wait_queue iowq; struct io_rings *rings = ctx->rings; - int ret = 0; + ktime_t timeout = KTIME_MAX; + int ret; do { - io_cqring_overflow_flush(ctx, false, NULL, NULL); + io_cqring_overflow_flush(ctx); if (io_cqring_events(ctx) >= min_events) return 0; if (!io_run_task_work()) break; } while (1); + if (uts) { + struct timespec64 ts; + + if (get_timespec64(&ts, uts)) + return -EFAULT; + timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns()); + } + if (sig) { #ifdef CONFIG_COMPAT if (in_compat_syscall()) @@ -7058,35 +7509,269 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, return ret; } + init_waitqueue_func_entry(&iowq.wq, io_wake_function); + iowq.wq.private = current; + INIT_LIST_HEAD(&iowq.wq.entry); + iowq.ctx = ctx; iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); + iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events; + trace_io_uring_cqring_wait(ctx, min_events); do { - io_cqring_overflow_flush(ctx, false, NULL, NULL); - prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, + /* if we can't even flush overflow, don't wait for more */ + if (!io_cqring_overflow_flush(ctx)) { + ret = -EBUSY; + break; + } + prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq, TASK_INTERRUPTIBLE); - /* make sure we run task_work before checking for signals */ - ret = io_run_task_work_sig(); - if (ret > 0) { - finish_wait(&ctx->wait, &iowq.wq); - continue; - } - else if (ret < 0) - break; - if (io_should_wake(&iowq)) - break; - if (test_bit(0, &ctx->cq_check_overflow)) { - finish_wait(&ctx->wait, &iowq.wq); - continue; - } - schedule(); - } while (1); - finish_wait(&ctx->wait, &iowq.wq); + ret = io_cqring_wait_schedule(ctx, &iowq, timeout); + finish_wait(&ctx->cq_wait, &iowq.wq); + cond_resched(); + } while (ret > 0); restore_saved_sigmask_unless(ret == -EINTR); return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; } +static void io_free_page_table(void **table, size_t size) +{ + unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE); + + for (i = 0; i < nr_tables; i++) + kfree(table[i]); + kfree(table); +} + +static void **io_alloc_page_table(size_t size) +{ + unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE); + size_t init_size = size; + void **table; + + table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT); + if (!table) + return NULL; + + for (i = 0; i < nr_tables; i++) { + unsigned int this_size = min_t(size_t, size, PAGE_SIZE); + + table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT); + if (!table[i]) { + io_free_page_table(table, init_size); + return NULL; + } + size -= this_size; + } + return table; +} + +static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node) +{ + percpu_ref_exit(&ref_node->refs); + kfree(ref_node); +} + +static void io_rsrc_node_ref_zero(struct percpu_ref *ref) +{ + struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs); + struct io_ring_ctx *ctx = node->rsrc_data->ctx; + unsigned long flags; + bool first_add = false; + unsigned long delay = HZ; + + spin_lock_irqsave(&ctx->rsrc_ref_lock, flags); + node->done = true; + + /* if we are mid-quiesce then do not delay */ + if (node->rsrc_data->quiesce) + delay = 0; + + while (!list_empty(&ctx->rsrc_ref_list)) { + node = list_first_entry(&ctx->rsrc_ref_list, + struct io_rsrc_node, node); + /* recycle ref nodes in order */ + if (!node->done) + break; + list_del(&node->node); + first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist); + } + spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags); + + if (first_add) + mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay); +} + +static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx) +{ + struct io_rsrc_node *ref_node; + + ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL); + if (!ref_node) + return NULL; + + if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero, + 0, GFP_KERNEL)) { + kfree(ref_node); + return NULL; + } + INIT_LIST_HEAD(&ref_node->node); + INIT_LIST_HEAD(&ref_node->rsrc_list); + ref_node->done = false; + return ref_node; +} + +static void io_rsrc_node_switch(struct io_ring_ctx *ctx, + struct io_rsrc_data *data_to_kill) +{ + WARN_ON_ONCE(!ctx->rsrc_backup_node); + WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node); + + if (data_to_kill) { + struct io_rsrc_node *rsrc_node = ctx->rsrc_node; + + rsrc_node->rsrc_data = data_to_kill; + spin_lock_irq(&ctx->rsrc_ref_lock); + list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list); + spin_unlock_irq(&ctx->rsrc_ref_lock); + + atomic_inc(&data_to_kill->refs); + percpu_ref_kill(&rsrc_node->refs); + ctx->rsrc_node = NULL; + } + + if (!ctx->rsrc_node) { + ctx->rsrc_node = ctx->rsrc_backup_node; + ctx->rsrc_backup_node = NULL; + } +} + +static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx) +{ + if (ctx->rsrc_backup_node) + return 0; + ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx); + return ctx->rsrc_backup_node ? 0 : -ENOMEM; +} + +static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx) +{ + int ret; + + /* As we may drop ->uring_lock, other task may have started quiesce */ + if (data->quiesce) + return -ENXIO; + + data->quiesce = true; + do { + ret = io_rsrc_node_switch_start(ctx); + if (ret) + break; + io_rsrc_node_switch(ctx, data); + + /* kill initial ref, already quiesced if zero */ + if (atomic_dec_and_test(&data->refs)) + break; + mutex_unlock(&ctx->uring_lock); + flush_delayed_work(&ctx->rsrc_put_work); + ret = wait_for_completion_interruptible(&data->done); + if (!ret) { + mutex_lock(&ctx->uring_lock); + if (atomic_read(&data->refs) > 0) { + /* + * it has been revived by another thread while + * we were unlocked + */ + mutex_unlock(&ctx->uring_lock); + } else { + break; + } + } + + atomic_inc(&data->refs); + /* wait for all works potentially completing data->done */ + flush_delayed_work(&ctx->rsrc_put_work); + reinit_completion(&data->done); + + ret = io_run_task_work_sig(); + mutex_lock(&ctx->uring_lock); + } while (ret >= 0); + data->quiesce = false; + + return ret; +} + +static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx) +{ + unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK; + unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT; + + return &data->tags[table_idx][off]; +} + +static void io_rsrc_data_free(struct io_rsrc_data *data) +{ + size_t size = data->nr * sizeof(data->tags[0][0]); + + if (data->tags) + io_free_page_table((void **)data->tags, size); + kfree(data); +} + +static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put, + u64 __user *utags, unsigned nr, + struct io_rsrc_data **pdata) +{ + struct io_rsrc_data *data; + int ret = -ENOMEM; + unsigned i; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0])); + if (!data->tags) { + kfree(data); + return -ENOMEM; + } + + data->nr = nr; + data->ctx = ctx; + data->do_put = do_put; + if (utags) { + ret = -EFAULT; + for (i = 0; i < nr; i++) { + u64 *tag_slot = io_get_tag_slot(data, i); + + if (copy_from_user(tag_slot, &utags[i], + sizeof(*tag_slot))) + goto fail; + } + } + + atomic_set(&data->refs, 1); + init_completion(&data->done); + *pdata = data; + return 0; +fail: + io_rsrc_data_free(data); + return ret; +} + +static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files) +{ + table->files = kvcalloc(nr_files, sizeof(table->files[0]), + GFP_KERNEL_ACCOUNT); + return !!table->files; +} + +static void io_free_file_tables(struct io_file_table *table) +{ + kvfree(table->files); + table->files = NULL; +} + static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) { #if defined(CONFIG_UNIX) @@ -7108,92 +7793,97 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) fput(file); } #endif -} - -static void io_file_ref_kill(struct percpu_ref *ref) -{ - struct fixed_file_data *data; - - data = container_of(ref, struct fixed_file_data, refs); - complete(&data->done); -} - -static void io_sqe_files_set_node(struct fixed_file_data *file_data, - struct fixed_file_ref_node *ref_node) -{ - spin_lock_bh(&file_data->lock); - file_data->node = ref_node; - list_add_tail(&ref_node->node, &file_data->ref_list); - spin_unlock_bh(&file_data->lock); - percpu_ref_get(&file_data->refs); + io_free_file_tables(&ctx->file_table); + io_rsrc_data_free(ctx->file_data); + ctx->file_data = NULL; + ctx->nr_user_files = 0; } static int io_sqe_files_unregister(struct io_ring_ctx *ctx) { - struct fixed_file_data *data = ctx->file_data; - struct fixed_file_ref_node *backup_node, *ref_node = NULL; - unsigned nr_tables, i; + unsigned nr = ctx->nr_user_files; int ret; - if (!data) + if (!ctx->file_data) return -ENXIO; - backup_node = alloc_fixed_file_ref_node(ctx); - if (!backup_node) - return -ENOMEM; - spin_lock_bh(&data->lock); - ref_node = data->node; - spin_unlock_bh(&data->lock); - if (ref_node) - percpu_ref_kill(&ref_node->refs); - - percpu_ref_kill(&data->refs); - - /* wait for all refs nodes to complete */ - flush_delayed_work(&ctx->file_put_work); - do { - ret = wait_for_completion_interruptible(&data->done); - if (!ret) - break; - ret = io_run_task_work_sig(); - if (ret < 0) { - percpu_ref_resurrect(&data->refs); - reinit_completion(&data->done); - io_sqe_files_set_node(data, backup_node); - return ret; - } - } while (1); - - __io_sqe_files_unregister(ctx); - nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE); - for (i = 0; i < nr_tables; i++) - kfree(data->table[i].files); - kfree(data->table); - percpu_ref_exit(&data->refs); - kfree(data); - ctx->file_data = NULL; + /* + * Quiesce may unlock ->uring_lock, and while it's not held + * prevent new requests using the table. + */ ctx->nr_user_files = 0; - destroy_fixed_file_ref_node(backup_node); - return 0; + ret = io_rsrc_ref_quiesce(ctx->file_data, ctx); + ctx->nr_user_files = nr; + if (!ret) + __io_sqe_files_unregister(ctx); + return ret; +} + +static void io_sq_thread_unpark(struct io_sq_data *sqd) + __releases(&sqd->lock) +{ + WARN_ON_ONCE(sqd->thread == current); + + /* + * Do the dance but not conditional clear_bit() because it'd race with + * other threads incrementing park_pending and setting the bit. + */ + clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); + if (atomic_dec_return(&sqd->park_pending)) + set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); + mutex_unlock(&sqd->lock); +} + +static void io_sq_thread_park(struct io_sq_data *sqd) + __acquires(&sqd->lock) +{ + WARN_ON_ONCE(sqd->thread == current); + + atomic_inc(&sqd->park_pending); + set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); + mutex_lock(&sqd->lock); + if (sqd->thread) + wake_up_process(sqd->thread); +} + +static void io_sq_thread_stop(struct io_sq_data *sqd) +{ + WARN_ON_ONCE(sqd->thread == current); + WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)); + + set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); + mutex_lock(&sqd->lock); + if (sqd->thread) + wake_up_process(sqd->thread); + mutex_unlock(&sqd->lock); + wait_for_completion(&sqd->exited); } static void io_put_sq_data(struct io_sq_data *sqd) { if (refcount_dec_and_test(&sqd->refs)) { - /* - * The park is a bit of a work-around, without it we get - * warning spews on shutdown with SQPOLL set and affinity - * set to a single CPU. - */ - if (sqd->thread) { - kthread_park(sqd->thread); - kthread_stop(sqd->thread); - } + WARN_ON_ONCE(atomic_read(&sqd->park_pending)); + io_sq_thread_stop(sqd); kfree(sqd); } } +static void io_sq_thread_finish(struct io_ring_ctx *ctx) +{ + struct io_sq_data *sqd = ctx->sq_data; + + if (sqd) { + io_sq_thread_park(sqd); + list_del_init(&ctx->sqd_list); + io_sqd_update_thread_idle(sqd); + io_sq_thread_unpark(sqd); + + io_put_sq_data(sqd); + ctx->sq_data = NULL; + } +} + static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p) { struct io_ring_ctx *ctx_attach; @@ -7214,92 +7904,46 @@ static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p) fdput(f); return ERR_PTR(-EINVAL); } + if (sqd->task_tgid != current->tgid) { + fdput(f); + return ERR_PTR(-EPERM); + } refcount_inc(&sqd->refs); fdput(f); return sqd; } -static struct io_sq_data *io_get_sq_data(struct io_uring_params *p) +static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, + bool *attached) { struct io_sq_data *sqd; - if (p->flags & IORING_SETUP_ATTACH_WQ) - return io_attach_sq_data(p); + *attached = false; + if (p->flags & IORING_SETUP_ATTACH_WQ) { + sqd = io_attach_sq_data(p); + if (!IS_ERR(sqd)) { + *attached = true; + return sqd; + } + /* fall through for EPERM case, setup new sqd/task */ + if (PTR_ERR(sqd) != -EPERM) + return sqd; + } sqd = kzalloc(sizeof(*sqd), GFP_KERNEL); if (!sqd) return ERR_PTR(-ENOMEM); + atomic_set(&sqd->park_pending, 0); refcount_set(&sqd->refs, 1); INIT_LIST_HEAD(&sqd->ctx_list); - INIT_LIST_HEAD(&sqd->ctx_new_list); - mutex_init(&sqd->ctx_lock); mutex_init(&sqd->lock); init_waitqueue_head(&sqd->wait); + init_completion(&sqd->exited); return sqd; } -static void io_sq_thread_unpark(struct io_sq_data *sqd) - __releases(&sqd->lock) -{ - if (!sqd->thread) - return; - kthread_unpark(sqd->thread); - mutex_unlock(&sqd->lock); -} - -static void io_sq_thread_park(struct io_sq_data *sqd) - __acquires(&sqd->lock) -{ - if (!sqd->thread) - return; - mutex_lock(&sqd->lock); - kthread_park(sqd->thread); -} - -static void io_sq_thread_stop(struct io_ring_ctx *ctx) -{ - struct io_sq_data *sqd = ctx->sq_data; - - if (sqd) { - if (sqd->thread) { - /* - * We may arrive here from the error branch in - * io_sq_offload_create() where the kthread is created - * without being waked up, thus wake it up now to make - * sure the wait will complete. - */ - wake_up_process(sqd->thread); - wait_for_completion(&ctx->sq_thread_comp); - - io_sq_thread_park(sqd); - } - - mutex_lock(&sqd->ctx_lock); - list_del(&ctx->sqd_list); - mutex_unlock(&sqd->ctx_lock); - - if (sqd->thread) { - finish_wait(&sqd->wait, &ctx->sqo_wait_entry); - io_sq_thread_unpark(sqd); - } - - io_put_sq_data(sqd); - ctx->sq_data = NULL; - } -} - -static void io_finish_async(struct io_ring_ctx *ctx) -{ - io_sq_thread_stop(ctx); - - if (ctx->io_wq) { - io_wq_destroy(ctx->io_wq); - ctx->io_wq = NULL; - } -} - #if defined(CONFIG_UNIX) /* * Ensure the UNIX gc is aware of our file set, so we are certain that @@ -7324,9 +7968,10 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) } skb->sk = sk; + skb->scm_io_uring = 1; nr_files = 0; - fpl->user = get_uid(ctx->user); + fpl->user = get_uid(current_user()); for (i = 0; i < nr; i++) { struct file *file = io_file_from_index(ctx, i + offset); @@ -7345,8 +7990,12 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) refcount_add(skb->truesize, &sk->sk_wmem_alloc); skb_queue_head(&sk->sk_receive_queue, skb); - for (i = 0; i < nr_files; i++) - fput(fpl->fp[i]); + for (i = 0; i < nr; i++) { + struct file *file = io_file_from_index(ctx, i + offset); + + if (file) + fput(file); + } } else { kfree_skb(skb); free_uid(fpl->user); @@ -7398,35 +8047,9 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx) } #endif -static int io_sqe_alloc_file_tables(struct fixed_file_data *file_data, - unsigned nr_tables, unsigned nr_files) -{ - int i; - - for (i = 0; i < nr_tables; i++) { - struct fixed_file_table *table = &file_data->table[i]; - unsigned this_files; - - this_files = min(nr_files, IORING_MAX_FILES_TABLE); - table->files = kcalloc(this_files, sizeof(struct file *), - GFP_KERNEL_ACCOUNT); - if (!table->files) - break; - nr_files -= this_files; - } - - if (i == nr_tables) - return 0; - - for (i = 0; i < nr_tables; i++) { - struct fixed_file_table *table = &file_data->table[i]; - kfree(table->files); - } - return 1; -} - -static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file) +static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc) { + struct file *file = prsrc->file; #if defined(CONFIG_UNIX) struct sock *sock = ctx->ring_sock->sk; struct sk_buff_head list, *head = &sock->sk_receive_queue; @@ -7487,117 +8110,61 @@ static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file) #endif } -struct io_file_put { - struct list_head list; - struct file *file; -}; - -static void __io_file_put_work(struct fixed_file_ref_node *ref_node) +static void __io_rsrc_put_work(struct io_rsrc_node *ref_node) { - struct fixed_file_data *file_data = ref_node->file_data; - struct io_ring_ctx *ctx = file_data->ctx; - struct io_file_put *pfile, *tmp; + struct io_rsrc_data *rsrc_data = ref_node->rsrc_data; + struct io_ring_ctx *ctx = rsrc_data->ctx; + struct io_rsrc_put *prsrc, *tmp; - list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) { - list_del(&pfile->list); - io_ring_file_put(ctx, pfile->file); - kfree(pfile); + list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) { + list_del(&prsrc->list); + + if (prsrc->tag) { + bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL; + + io_ring_submit_lock(ctx, lock_ring); + spin_lock(&ctx->completion_lock); + io_fill_cqe_aux(ctx, prsrc->tag, 0, 0); + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); + io_cqring_ev_posted(ctx); + io_ring_submit_unlock(ctx, lock_ring); + } + + rsrc_data->do_put(ctx, prsrc); + kfree(prsrc); } - percpu_ref_exit(&ref_node->refs); - kfree(ref_node); - percpu_ref_put(&file_data->refs); + io_rsrc_node_destroy(ref_node); + if (atomic_dec_and_test(&rsrc_data->refs)) + complete(&rsrc_data->done); } -static void io_file_put_work(struct work_struct *work) +static void io_rsrc_put_work(struct work_struct *work) { struct io_ring_ctx *ctx; struct llist_node *node; - ctx = container_of(work, struct io_ring_ctx, file_put_work.work); - node = llist_del_all(&ctx->file_put_llist); + ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work); + node = llist_del_all(&ctx->rsrc_put_llist); while (node) { - struct fixed_file_ref_node *ref_node; + struct io_rsrc_node *ref_node; struct llist_node *next = node->next; - ref_node = llist_entry(node, struct fixed_file_ref_node, llist); - __io_file_put_work(ref_node); + ref_node = llist_entry(node, struct io_rsrc_node, llist); + __io_rsrc_put_work(ref_node); node = next; } } -static void io_file_data_ref_zero(struct percpu_ref *ref) -{ - struct fixed_file_ref_node *ref_node; - struct fixed_file_data *data; - struct io_ring_ctx *ctx; - bool first_add = false; - int delay = HZ; - - ref_node = container_of(ref, struct fixed_file_ref_node, refs); - data = ref_node->file_data; - ctx = data->ctx; - - spin_lock_bh(&data->lock); - ref_node->done = true; - - while (!list_empty(&data->ref_list)) { - ref_node = list_first_entry(&data->ref_list, - struct fixed_file_ref_node, node); - /* recycle ref nodes in order */ - if (!ref_node->done) - break; - list_del(&ref_node->node); - first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist); - } - spin_unlock_bh(&data->lock); - - if (percpu_ref_is_dying(&data->refs)) - delay = 0; - - if (!delay) - mod_delayed_work(system_wq, &ctx->file_put_work, 0); - else if (first_add) - queue_delayed_work(system_wq, &ctx->file_put_work, delay); -} - -static struct fixed_file_ref_node *alloc_fixed_file_ref_node( - struct io_ring_ctx *ctx) -{ - struct fixed_file_ref_node *ref_node; - - ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL); - if (!ref_node) - return NULL; - - if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero, - 0, GFP_KERNEL)) { - kfree(ref_node); - return NULL; - } - INIT_LIST_HEAD(&ref_node->node); - INIT_LIST_HEAD(&ref_node->file_list); - ref_node->file_data = ctx->file_data; - ref_node->done = false; - return ref_node; -} - -static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node) -{ - percpu_ref_exit(&ref_node->refs); - kfree(ref_node); -} - static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, - unsigned nr_args) + unsigned nr_args, u64 __user *tags) { __s32 __user *fds = (__s32 __user *) arg; - unsigned nr_tables, i; struct file *file; - int fd, ret = -ENOMEM; - struct fixed_file_ref_node *ref_node; - struct fixed_file_data *file_data; + int fd, ret; + unsigned i; if (ctx->file_data) return -EBUSY; @@ -7607,44 +8174,34 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, return -EMFILE; if (nr_args > rlimit(RLIMIT_NOFILE)) return -EMFILE; + ret = io_rsrc_node_switch_start(ctx); + if (ret) + return ret; + ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args, + &ctx->file_data); + if (ret) + return ret; - file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL_ACCOUNT); - if (!file_data) - return -ENOMEM; - file_data->ctx = ctx; - init_completion(&file_data->done); - INIT_LIST_HEAD(&file_data->ref_list); - spin_lock_init(&file_data->lock); - - nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE); - file_data->table = kcalloc(nr_tables, sizeof(*file_data->table), - GFP_KERNEL_ACCOUNT); - if (!file_data->table) + ret = -ENOMEM; + if (!io_alloc_file_tables(&ctx->file_table, nr_args)) goto out_free; - if (percpu_ref_init(&file_data->refs, io_file_ref_kill, - PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) - goto out_free; - - if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args)) - goto out_ref; - ctx->file_data = file_data; - for (i = 0; i < nr_args; i++, ctx->nr_user_files++) { - struct fixed_file_table *table; - unsigned index; - if (copy_from_user(&fd, &fds[i], sizeof(fd))) { ret = -EFAULT; goto out_fput; } /* allow sparse sets */ - if (fd == -1) + if (fd == -1) { + ret = -EINVAL; + if (unlikely(*io_get_tag_slot(ctx->file_data, i))) + goto out_fput; continue; + } file = fget(fd); ret = -EBADF; - if (!file) + if (unlikely(!file)) goto out_fput; /* @@ -7658,24 +8215,16 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, fput(file); goto out_fput; } - table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT]; - index = i & IORING_FILE_TABLE_MASK; - table->files[index] = file; + io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file); } ret = io_sqe_files_scm(ctx); if (ret) { - io_sqe_files_unregister(ctx); + __io_sqe_files_unregister(ctx); return ret; } - ref_node = alloc_fixed_file_ref_node(ctx); - if (!ref_node) { - io_sqe_files_unregister(ctx); - return -ENOMEM; - } - - io_sqe_files_set_node(file_data, ref_node); + io_rsrc_node_switch(ctx, NULL); return ret; out_fput: for (i = 0; i < ctx->nr_user_files; i++) { @@ -7683,14 +8232,10 @@ out_fput: if (file) fput(file); } - for (i = 0; i < nr_tables; i++) - kfree(file_data->table[i].files); + io_free_file_tables(&ctx->file_table); ctx->nr_user_files = 0; -out_ref: - percpu_ref_exit(&file_data->refs); out_free: - kfree(file_data->table); - kfree(file_data); + io_rsrc_data_free(ctx->file_data); ctx->file_data = NULL; return ret; } @@ -7738,63 +8283,159 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file, #endif } -static int io_queue_file_removal(struct fixed_file_data *data, - struct file *file) +static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, + struct io_rsrc_node *node, void *rsrc) { - struct io_file_put *pfile; - struct fixed_file_ref_node *ref_node = data->node; + u64 *tag_slot = io_get_tag_slot(data, idx); + struct io_rsrc_put *prsrc; - pfile = kzalloc(sizeof(*pfile), GFP_KERNEL); - if (!pfile) + prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL); + if (!prsrc) return -ENOMEM; - pfile->file = file; - list_add(&pfile->list, &ref_node->file_list); - + prsrc->tag = *tag_slot; + *tag_slot = 0; + prsrc->rsrc = rsrc; + list_add(&prsrc->list, &node->rsrc_list); return 0; } +static int io_install_fixed_file(struct io_kiocb *req, struct file *file, + unsigned int issue_flags, u32 slot_index) +{ + struct io_ring_ctx *ctx = req->ctx; + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + bool needs_switch = false; + struct io_fixed_file *file_slot; + int ret = -EBADF; + + io_ring_submit_lock(ctx, !force_nonblock); + if (file->f_op == &io_uring_fops) + goto err; + ret = -ENXIO; + if (!ctx->file_data) + goto err; + ret = -EINVAL; + if (slot_index >= ctx->nr_user_files) + goto err; + + slot_index = array_index_nospec(slot_index, ctx->nr_user_files); + file_slot = io_fixed_file_slot(&ctx->file_table, slot_index); + + if (file_slot->file_ptr) { + struct file *old_file; + + ret = io_rsrc_node_switch_start(ctx); + if (ret) + goto err; + + old_file = (struct file *)(file_slot->file_ptr & FFS_MASK); + ret = io_queue_rsrc_removal(ctx->file_data, slot_index, + ctx->rsrc_node, old_file); + if (ret) + goto err; + file_slot->file_ptr = 0; + needs_switch = true; + } + + *io_get_tag_slot(ctx->file_data, slot_index) = 0; + io_fixed_file_set(file_slot, file); + ret = io_sqe_file_register(ctx, file, slot_index); + if (ret) { + file_slot->file_ptr = 0; + goto err; + } + + ret = 0; +err: + if (needs_switch) + io_rsrc_node_switch(ctx, ctx->file_data); + io_ring_submit_unlock(ctx, !force_nonblock); + if (ret) + fput(file); + return ret; +} + +static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) +{ + unsigned int offset = req->close.file_slot - 1; + struct io_ring_ctx *ctx = req->ctx; + struct io_fixed_file *file_slot; + struct file *file; + int ret; + + io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + ret = -ENXIO; + if (unlikely(!ctx->file_data)) + goto out; + ret = -EINVAL; + if (offset >= ctx->nr_user_files) + goto out; + ret = io_rsrc_node_switch_start(ctx); + if (ret) + goto out; + + offset = array_index_nospec(offset, ctx->nr_user_files); + file_slot = io_fixed_file_slot(&ctx->file_table, offset); + ret = -EBADF; + if (!file_slot->file_ptr) + goto out; + + file = (struct file *)(file_slot->file_ptr & FFS_MASK); + ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file); + if (ret) + goto out; + + file_slot->file_ptr = 0; + io_rsrc_node_switch(ctx, ctx->file_data); + ret = 0; +out: + io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + return ret; +} + static int __io_sqe_files_update(struct io_ring_ctx *ctx, - struct io_uring_files_update *up, + struct io_uring_rsrc_update2 *up, unsigned nr_args) { - struct fixed_file_data *data = ctx->file_data; - struct fixed_file_ref_node *ref_node; + u64 __user *tags = u64_to_user_ptr(up->tags); + __s32 __user *fds = u64_to_user_ptr(up->data); + struct io_rsrc_data *data = ctx->file_data; + struct io_fixed_file *file_slot; struct file *file; - __s32 __user *fds; - int fd, i, err; - __u32 done; + int fd, i, err = 0; + unsigned int done; bool needs_switch = false; - if (check_add_overflow(up->offset, nr_args, &done)) - return -EOVERFLOW; - if (done > ctx->nr_user_files) + if (!ctx->file_data) + return -ENXIO; + if (up->offset + nr_args > ctx->nr_user_files) return -EINVAL; - ref_node = alloc_fixed_file_ref_node(ctx); - if (!ref_node) - return -ENOMEM; + for (done = 0; done < nr_args; done++) { + u64 tag = 0; - done = 0; - fds = u64_to_user_ptr(up->fds); - while (nr_args) { - struct fixed_file_table *table; - unsigned index; - - err = 0; - if (copy_from_user(&fd, &fds[done], sizeof(fd))) { + if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) || + copy_from_user(&fd, &fds[done], sizeof(fd))) { err = -EFAULT; break; } - i = array_index_nospec(up->offset, ctx->nr_user_files); - table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT]; - index = i & IORING_FILE_TABLE_MASK; - if (table->files[index]) { - file = table->files[index]; - err = io_queue_file_removal(data, file); + if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) { + err = -EINVAL; + break; + } + if (fd == IORING_REGISTER_FILES_SKIP) + continue; + + i = array_index_nospec(up->offset + done, ctx->nr_user_files); + file_slot = io_fixed_file_slot(&ctx->file_table, i); + + if (file_slot->file_ptr) { + file = (struct file *)(file_slot->file_ptr & FFS_MASK); + err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file); if (err) break; - table->files[index] = NULL; + file_slot->file_ptr = 0; needs_switch = true; } if (fd != -1) { @@ -7816,106 +8457,61 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, err = -EBADF; break; } - table->files[index] = file; + *io_get_tag_slot(data, i) = tag; + io_fixed_file_set(file_slot, file); err = io_sqe_file_register(ctx, file, i); if (err) { - table->files[index] = NULL; + file_slot->file_ptr = 0; fput(file); break; } } - nr_args--; - done++; - up->offset++; } - if (needs_switch) { - percpu_ref_kill(&data->node->refs); - io_sqe_files_set_node(data, ref_node); - } else - destroy_fixed_file_ref_node(ref_node); - + if (needs_switch) + io_rsrc_node_switch(ctx, data); return done ? done : err; } -static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg, - unsigned nr_args) -{ - struct io_uring_files_update up; - - if (!ctx->file_data) - return -ENXIO; - if (!nr_args) - return -EINVAL; - if (copy_from_user(&up, arg, sizeof(up))) - return -EFAULT; - if (up.resv) - return -EINVAL; - - return __io_sqe_files_update(ctx, &up, nr_args); -} - -static void io_free_work(struct io_wq_work *work) -{ - struct io_kiocb *req = container_of(work, struct io_kiocb, work); - - /* Consider that io_steal_work() relies on this ref */ - io_put_req(req); -} - -static int io_init_wq_offload(struct io_ring_ctx *ctx, - struct io_uring_params *p) +static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx, + struct task_struct *task) { + struct io_wq_hash *hash; struct io_wq_data data; - struct fd f; - struct io_ring_ctx *ctx_attach; unsigned int concurrency; - int ret = 0; - data.user = ctx->user; - data.free_work = io_free_work; + mutex_lock(&ctx->uring_lock); + hash = ctx->hash_map; + if (!hash) { + hash = kzalloc(sizeof(*hash), GFP_KERNEL); + if (!hash) { + mutex_unlock(&ctx->uring_lock); + return ERR_PTR(-ENOMEM); + } + refcount_set(&hash->refs, 1); + init_waitqueue_head(&hash->wait); + ctx->hash_map = hash; + } + mutex_unlock(&ctx->uring_lock); + + data.hash = hash; + data.task = task; + data.free_work = io_wq_free_work; data.do_work = io_wq_submit_work; - if (!(p->flags & IORING_SETUP_ATTACH_WQ)) { - /* Do QD, or 4 * CPUS, whatever is smallest */ - concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); + /* Do QD, or 4 * CPUS, whatever is smallest */ + concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); - ctx->io_wq = io_wq_create(concurrency, &data); - if (IS_ERR(ctx->io_wq)) { - ret = PTR_ERR(ctx->io_wq); - ctx->io_wq = NULL; - } - return ret; - } - - f = fdget(p->wq_fd); - if (!f.file) - return -EBADF; - - if (f.file->f_op != &io_uring_fops) { - ret = -EINVAL; - goto out_fput; - } - - ctx_attach = f.file->private_data; - /* @io_wq is protected by holding the fd */ - if (!io_wq_get(ctx_attach->io_wq, &data)) { - ret = -EINVAL; - goto out_fput; - } - - ctx->io_wq = ctx_attach->io_wq; -out_fput: - fdput(f); - return ret; + return io_wq_create(concurrency, &data); } -static int io_uring_alloc_task_context(struct task_struct *task) +static int io_uring_alloc_task_context(struct task_struct *task, + struct io_ring_ctx *ctx) { struct io_uring_task *tctx; int ret; - tctx = kmalloc(sizeof(*tctx), GFP_KERNEL); + tctx = kzalloc(sizeof(*tctx), GFP_KERNEL); if (unlikely(!tctx)) return -ENOMEM; @@ -7925,14 +8521,22 @@ static int io_uring_alloc_task_context(struct task_struct *task) return ret; } + tctx->io_wq = io_init_wq_offload(ctx, task); + if (IS_ERR(tctx->io_wq)) { + ret = PTR_ERR(tctx->io_wq); + percpu_counter_destroy(&tctx->inflight); + kfree(tctx); + return ret; + } + xa_init(&tctx->xa); init_waitqueue_head(&tctx->wait); - tctx->last = NULL; atomic_set(&tctx->in_idle, 0); - tctx->sqpoll = false; - io_init_identity(&tctx->__identity); - tctx->identity = &tctx->__identity; + atomic_set(&tctx->inflight_tracked, 0); task->io_uring = tctx; + spin_lock_init(&tctx->task_lock); + INIT_WQ_LIST(&tctx->task_list); + init_task_work(&tctx->task_work, tctx_task_work); return 0; } @@ -7941,9 +8545,9 @@ void __io_uring_free(struct task_struct *tsk) struct io_uring_task *tctx = tsk->io_uring; WARN_ON_ONCE(!xa_empty(&tctx->xa)); - WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1); - if (tctx->identity != &tctx->__identity) - kfree(tctx->identity); + WARN_ON_ONCE(tctx->io_wq); + WARN_ON_ONCE(tctx->cached_refs); + percpu_counter_destroy(&tctx->inflight); kfree(tctx); tsk->io_uring = NULL; @@ -7954,54 +8558,71 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, { int ret; + /* Retain compatibility with failing for an invalid attach attempt */ + if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) == + IORING_SETUP_ATTACH_WQ) { + struct fd f; + + f = fdget(p->wq_fd); + if (!f.file) + return -ENXIO; + if (f.file->f_op != &io_uring_fops) { + fdput(f); + return -EINVAL; + } + fdput(f); + } if (ctx->flags & IORING_SETUP_SQPOLL) { + struct task_struct *tsk; struct io_sq_data *sqd; + bool attached; - ret = -EPERM; - if (!capable(CAP_SYS_ADMIN)) - goto err; - - sqd = io_get_sq_data(p); + sqd = io_get_sq_data(p, &attached); if (IS_ERR(sqd)) { ret = PTR_ERR(sqd); goto err; } + ctx->sq_creds = get_current_cred(); ctx->sq_data = sqd; - io_sq_thread_park(sqd); - mutex_lock(&sqd->ctx_lock); - list_add(&ctx->sqd_list, &sqd->ctx_new_list); - mutex_unlock(&sqd->ctx_lock); - io_sq_thread_unpark(sqd); - ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); if (!ctx->sq_thread_idle) ctx->sq_thread_idle = HZ; - if (sqd->thread) - goto done; + io_sq_thread_park(sqd); + list_add(&ctx->sqd_list, &sqd->ctx_list); + io_sqd_update_thread_idle(sqd); + /* don't attach to a dying SQPOLL thread, would be racy */ + ret = (attached && !sqd->thread) ? -ENXIO : 0; + io_sq_thread_unpark(sqd); + + if (ret < 0) + goto err; + if (attached) + return 0; if (p->flags & IORING_SETUP_SQ_AFF) { int cpu = p->sq_thread_cpu; ret = -EINVAL; - if (cpu >= nr_cpu_ids) - goto err; - if (!cpu_online(cpu)) - goto err; - - sqd->thread = kthread_create_on_cpu(io_sq_thread, sqd, - cpu, "io_uring-sq"); + if (cpu >= nr_cpu_ids || !cpu_online(cpu)) + goto err_sqpoll; + sqd->sq_cpu = cpu; } else { - sqd->thread = kthread_create(io_sq_thread, sqd, - "io_uring-sq"); + sqd->sq_cpu = -1; } - if (IS_ERR(sqd->thread)) { - ret = PTR_ERR(sqd->thread); - sqd->thread = NULL; - goto err; + + sqd->task_pid = current->pid; + sqd->task_tgid = current->tgid; + tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE); + if (IS_ERR(tsk)) { + ret = PTR_ERR(tsk); + goto err_sqpoll; } - ret = io_uring_alloc_task_context(sqd->thread); + + sqd->thread = tsk; + ret = io_uring_alloc_task_context(tsk, ctx); + wake_up_new_task(tsk); if (ret) goto err; } else if (p->flags & IORING_SETUP_SQ_AFF) { @@ -8010,26 +8631,14 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, goto err; } -done: - ret = io_init_wq_offload(ctx, p); - if (ret) - goto err; - return 0; +err_sqpoll: + complete(&ctx->sq_data->exited); err: - io_finish_async(ctx); + io_sq_thread_finish(ctx); return ret; } -static void io_sq_offload_start(struct io_ring_ctx *ctx) -{ - struct io_sq_data *sqd = ctx->sq_data; - - ctx->flags &= ~IORING_SETUP_R_DISABLED; - if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd && sqd->thread) - wake_up_process(sqd->thread); -} - static inline void __io_unaccount_mem(struct user_struct *user, unsigned long nr_pages) { @@ -8055,37 +8664,27 @@ static inline int __io_account_mem(struct user_struct *user, return 0; } -static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages, - enum io_mem_account acct) +static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) { - if (ctx->limit_mem) + if (ctx->user) __io_unaccount_mem(ctx->user, nr_pages); - if (ctx->mm_account) { - if (acct == ACCT_LOCKED) - ctx->mm_account->locked_vm -= nr_pages; - else if (acct == ACCT_PINNED) - atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm); - } + if (ctx->mm_account) + atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm); } -static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages, - enum io_mem_account acct) +static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) { int ret; - if (ctx->limit_mem) { + if (ctx->user) { ret = __io_account_mem(ctx->user, nr_pages); if (ret) return ret; } - if (ctx->mm_account) { - if (acct == ACCT_LOCKED) - ctx->mm_account->locked_vm += nr_pages; - else if (acct == ACCT_PINNED) - atomic64_add(nr_pages, &ctx->mm_account->pinned_vm); - } + if (ctx->mm_account) + atomic64_add(nr_pages, &ctx->mm_account->pinned_vm); return 0; } @@ -8104,10 +8703,9 @@ static void io_mem_free(void *ptr) static void *io_mem_alloc(size_t size) { - gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP | - __GFP_NORETRY; + gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP; - return (void *) __get_free_pages(gfp_flags, get_order(size)); + return (void *) __get_free_pages(gfp, get_order(size)); } static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries, @@ -8139,41 +8737,58 @@ static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries, return off; } -static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries) +static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot) { - size_t pages; + struct io_mapped_ubuf *imu = *slot; + unsigned int i; - pages = (size_t)1 << get_order( - rings_size(sq_entries, cq_entries, NULL)); - pages += (size_t)1 << get_order( - array_size(sizeof(struct io_uring_sqe), sq_entries)); - - return pages; + if (imu != ctx->dummy_ubuf) { + for (i = 0; i < imu->nr_bvecs; i++) + unpin_user_page(imu->bvec[i].bv_page); + if (imu->acct_pages) + io_unaccount_mem(ctx, imu->acct_pages); + kvfree(imu); + } + *slot = NULL; } -static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx) +static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc) { - int i, j; + io_buffer_unmap(ctx, &prsrc->buf); + prsrc->buf = NULL; +} - if (!ctx->user_bufs) +static void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx) +{ + unsigned int i; + + for (i = 0; i < ctx->nr_user_bufs; i++) + io_buffer_unmap(ctx, &ctx->user_bufs[i]); + kfree(ctx->user_bufs); + io_rsrc_data_free(ctx->buf_data); + ctx->user_bufs = NULL; + ctx->buf_data = NULL; + ctx->nr_user_bufs = 0; +} + +static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx) +{ + unsigned nr = ctx->nr_user_bufs; + int ret; + + if (!ctx->buf_data) return -ENXIO; - for (i = 0; i < ctx->nr_user_bufs; i++) { - struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; - - for (j = 0; j < imu->nr_bvecs; j++) - unpin_user_page(imu->bvec[j].bv_page); - - if (imu->acct_pages) - io_unaccount_mem(ctx, imu->acct_pages, ACCT_PINNED); - kvfree(imu->bvec); - imu->nr_bvecs = 0; - } - - kfree(ctx->user_bufs); - ctx->user_bufs = NULL; + /* + * Quiesce may unlock ->uring_lock, and while it's not held + * prevent new requests using the table. + */ ctx->nr_user_bufs = 0; - return 0; + ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx); + ctx->nr_user_bufs = nr; + if (!ret) + __io_sqe_buffers_unregister(ctx); + return ret; } static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst, @@ -8225,7 +8840,7 @@ static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages, /* check previously registered pages */ for (i = 0; i < ctx->nr_user_bufs; i++) { - struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; + struct io_mapped_ubuf *imu = ctx->user_bufs[i]; for (j = 0; j < imu->nr_bvecs; j++) { if (!PageCompound(imu->bvec[j].bv_page)) @@ -8244,6 +8859,7 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages, { int i, ret; + imu->acct_pages = 0; for (i = 0; i < nr_pages; i++) { if (!PageCompound(pages[i])) { imu->acct_pages++; @@ -8263,149 +8879,254 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages, if (!imu->acct_pages) return 0; - ret = io_account_mem(ctx, imu->acct_pages, ACCT_PINNED); + ret = io_account_mem(ctx, imu->acct_pages); if (ret) imu->acct_pages = 0; return ret; } -static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg, - unsigned nr_args) +static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, + struct io_mapped_ubuf **pimu, + struct page **last_hpage) { + struct io_mapped_ubuf *imu = NULL; struct vm_area_struct **vmas = NULL; struct page **pages = NULL; + unsigned long off, start, end, ubuf; + size_t size; + int ret, pret, nr_pages, i; + + if (!iov->iov_base) { + *pimu = ctx->dummy_ubuf; + return 0; + } + + ubuf = (unsigned long) iov->iov_base; + end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; + start = ubuf >> PAGE_SHIFT; + nr_pages = end - start; + + *pimu = NULL; + ret = -ENOMEM; + + pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); + if (!pages) + goto done; + + vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *), + GFP_KERNEL); + if (!vmas) + goto done; + + imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL); + if (!imu) + goto done; + + ret = 0; + mmap_read_lock(current->mm); + pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM, + pages, vmas); + if (pret == nr_pages) { + /* don't support file backed memory */ + for (i = 0; i < nr_pages; i++) { + struct vm_area_struct *vma = vmas[i]; + + if (vma_is_shmem(vma)) + continue; + if (vma->vm_file && + !is_file_hugepages(vma->vm_file)) { + ret = -EOPNOTSUPP; + break; + } + } + } else { + ret = pret < 0 ? pret : -EFAULT; + } + mmap_read_unlock(current->mm); + if (ret) { + /* + * if we did partial map, or found file backed vmas, + * release any pages we did get + */ + if (pret > 0) + unpin_user_pages(pages, pret); + goto done; + } + + ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage); + if (ret) { + unpin_user_pages(pages, pret); + goto done; + } + + off = ubuf & ~PAGE_MASK; + size = iov->iov_len; + for (i = 0; i < nr_pages; i++) { + size_t vec_len; + + vec_len = min_t(size_t, size, PAGE_SIZE - off); + imu->bvec[i].bv_page = pages[i]; + imu->bvec[i].bv_len = vec_len; + imu->bvec[i].bv_offset = off; + off = 0; + size -= vec_len; + } + /* store original address for later verification */ + imu->ubuf = ubuf; + imu->ubuf_end = ubuf + iov->iov_len; + imu->nr_bvecs = nr_pages; + *pimu = imu; + ret = 0; +done: + if (ret) + kvfree(imu); + kvfree(pages); + kvfree(vmas); + return ret; +} + +static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args) +{ + ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL); + return ctx->user_bufs ? 0 : -ENOMEM; +} + +static int io_buffer_validate(struct iovec *iov) +{ + unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1); + + /* + * Don't impose further limits on the size and buffer + * constraints here, we'll -EINVAL later when IO is + * submitted if they are wrong. + */ + if (!iov->iov_base) + return iov->iov_len ? -EFAULT : 0; + if (!iov->iov_len) + return -EFAULT; + + /* arbitrary limit, but we need something */ + if (iov->iov_len > SZ_1G) + return -EFAULT; + + if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp)) + return -EOVERFLOW; + + return 0; +} + +static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, + unsigned int nr_args, u64 __user *tags) +{ struct page *last_hpage = NULL; - int i, j, got_pages = 0; - int ret = -EINVAL; + struct io_rsrc_data *data; + int i, ret; + struct iovec iov; if (ctx->user_bufs) return -EBUSY; - if (!nr_args || nr_args > UIO_MAXIOV) + if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS) return -EINVAL; + ret = io_rsrc_node_switch_start(ctx); + if (ret) + return ret; + ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data); + if (ret) + return ret; + ret = io_buffers_map_alloc(ctx, nr_args); + if (ret) { + io_rsrc_data_free(data); + return ret; + } - ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf), - GFP_KERNEL); - if (!ctx->user_bufs) - return -ENOMEM; - - for (i = 0; i < nr_args; i++) { - struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; - unsigned long off, start, end, ubuf; - int pret, nr_pages; - struct iovec iov; - size_t size; - + for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) { ret = io_copy_iov(ctx, &iov, arg, i); if (ret) - goto err; - - /* - * Don't impose further limits on the size and buffer - * constraints here, we'll -EINVAL later when IO is - * submitted if they are wrong. - */ - ret = -EFAULT; - if (!iov.iov_base || !iov.iov_len) - goto err; - - /* arbitrary limit, but we need something */ - if (iov.iov_len > SZ_1G) - goto err; - - ubuf = (unsigned long) iov.iov_base; - end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; - start = ubuf >> PAGE_SHIFT; - nr_pages = end - start; - - ret = 0; - if (!pages || nr_pages > got_pages) { - kvfree(vmas); - kvfree(pages); - pages = kvmalloc_array(nr_pages, sizeof(struct page *), - GFP_KERNEL); - vmas = kvmalloc_array(nr_pages, - sizeof(struct vm_area_struct *), - GFP_KERNEL); - if (!pages || !vmas) { - ret = -ENOMEM; - goto err; - } - got_pages = nr_pages; + break; + ret = io_buffer_validate(&iov); + if (ret) + break; + if (!iov.iov_base && *io_get_tag_slot(data, i)) { + ret = -EINVAL; + break; } - imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec), - GFP_KERNEL); - ret = -ENOMEM; - if (!imu->bvec) - goto err; - - ret = 0; - mmap_read_lock(current->mm); - pret = pin_user_pages(ubuf, nr_pages, - FOLL_WRITE | FOLL_LONGTERM, - pages, vmas); - if (pret == nr_pages) { - /* don't support file backed memory */ - for (j = 0; j < nr_pages; j++) { - struct vm_area_struct *vma = vmas[j]; - - if (vma->vm_file && - !is_file_hugepages(vma->vm_file)) { - ret = -EOPNOTSUPP; - break; - } - } - } else { - ret = pret < 0 ? pret : -EFAULT; - } - mmap_read_unlock(current->mm); - if (ret) { - /* - * if we did partial map, or found file backed vmas, - * release any pages we did get - */ - if (pret > 0) - unpin_user_pages(pages, pret); - kvfree(imu->bvec); - goto err; - } - - ret = io_buffer_account_pin(ctx, pages, pret, imu, &last_hpage); - if (ret) { - unpin_user_pages(pages, pret); - kvfree(imu->bvec); - goto err; - } - - off = ubuf & ~PAGE_MASK; - size = iov.iov_len; - for (j = 0; j < nr_pages; j++) { - size_t vec_len; - - vec_len = min_t(size_t, size, PAGE_SIZE - off); - imu->bvec[j].bv_page = pages[j]; - imu->bvec[j].bv_len = vec_len; - imu->bvec[j].bv_offset = off; - off = 0; - size -= vec_len; - } - /* store original address for later verification */ - imu->ubuf = ubuf; - imu->len = iov.iov_len; - imu->nr_bvecs = nr_pages; - - ctx->nr_user_bufs++; + ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i], + &last_hpage); + if (ret) + break; } - kvfree(pages); - kvfree(vmas); - return 0; -err: - kvfree(pages); - kvfree(vmas); - io_sqe_buffer_unregister(ctx); + + WARN_ON_ONCE(ctx->buf_data); + + ctx->buf_data = data; + if (ret) + __io_sqe_buffers_unregister(ctx); + else + io_rsrc_node_switch(ctx, NULL); return ret; } +static int __io_sqe_buffers_update(struct io_ring_ctx *ctx, + struct io_uring_rsrc_update2 *up, + unsigned int nr_args) +{ + u64 __user *tags = u64_to_user_ptr(up->tags); + struct iovec iov, __user *iovs = u64_to_user_ptr(up->data); + struct page *last_hpage = NULL; + bool needs_switch = false; + __u32 done; + int i, err; + + if (!ctx->buf_data) + return -ENXIO; + if (up->offset + nr_args > ctx->nr_user_bufs) + return -EINVAL; + + for (done = 0; done < nr_args; done++) { + struct io_mapped_ubuf *imu; + int offset = up->offset + done; + u64 tag = 0; + + err = io_copy_iov(ctx, &iov, iovs, done); + if (err) + break; + if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) { + err = -EFAULT; + break; + } + err = io_buffer_validate(&iov); + if (err) + break; + if (!iov.iov_base && tag) { + err = -EINVAL; + break; + } + err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage); + if (err) + break; + + i = array_index_nospec(offset, ctx->nr_user_bufs); + if (ctx->user_bufs[i] != ctx->dummy_ubuf) { + err = io_queue_rsrc_removal(ctx->buf_data, i, + ctx->rsrc_node, ctx->user_bufs[i]); + if (unlikely(err)) { + io_buffer_unmap(ctx, &imu); + break; + } + ctx->user_bufs[i] = NULL; + needs_switch = true; + } + + ctx->user_bufs[i] = imu; + *io_get_tag_slot(ctx->buf_data, offset) = tag; + } + + if (needs_switch) + io_rsrc_node_switch(ctx, ctx->buf_data); + return done ? done : err; +} + static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg) { __s32 __user *fds = arg; @@ -8420,6 +9141,7 @@ static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg) ctx->cq_ev_fd = eventfd_ctx_fdget(fd); if (IS_ERR(ctx->cq_ev_fd)) { int ret = PTR_ERR(ctx->cq_ev_fd); + ctx->cq_ev_fd = NULL; return ret; } @@ -8447,26 +9169,68 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx) __io_remove_buffers(ctx, buf, index, -1U); } -static void io_ring_ctx_free(struct io_ring_ctx *ctx) +static void io_req_cache_free(struct list_head *list) { - io_finish_async(ctx); - io_sqe_buffer_unregister(ctx); + struct io_kiocb *req, *nxt; - if (ctx->sqo_task) { - put_task_struct(ctx->sqo_task); - ctx->sqo_task = NULL; - mmdrop(ctx->mm_account); - ctx->mm_account = NULL; + list_for_each_entry_safe(req, nxt, list, inflight_entry) { + list_del(&req->inflight_entry); + kmem_cache_free(req_cachep, req); + } +} + +static void io_req_caches_free(struct io_ring_ctx *ctx) +{ + struct io_submit_state *state = &ctx->submit_state; + + mutex_lock(&ctx->uring_lock); + + if (state->free_reqs) { + kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs); + state->free_reqs = 0; } -#ifdef CONFIG_BLK_CGROUP - if (ctx->sqo_blkcg_css) - css_put(ctx->sqo_blkcg_css); -#endif + io_flush_cached_locked_reqs(ctx, state); + io_req_cache_free(&state->free_list); + mutex_unlock(&ctx->uring_lock); +} - io_sqe_files_unregister(ctx); +static void io_wait_rsrc_data(struct io_rsrc_data *data) +{ + if (data && !atomic_dec_and_test(&data->refs)) + wait_for_completion(&data->done); +} + +static void io_ring_ctx_free(struct io_ring_ctx *ctx) +{ + io_sq_thread_finish(ctx); + + /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */ + io_wait_rsrc_data(ctx->buf_data); + io_wait_rsrc_data(ctx->file_data); + + mutex_lock(&ctx->uring_lock); + if (ctx->buf_data) + __io_sqe_buffers_unregister(ctx); + if (ctx->file_data) + __io_sqe_files_unregister(ctx); + if (ctx->rings) + __io_cqring_overflow_flush(ctx, true); + mutex_unlock(&ctx->uring_lock); io_eventfd_unregister(ctx); io_destroy_buffers(ctx); + if (ctx->sq_creds) + put_cred(ctx->sq_creds); + + /* there are no registered resources left, nobody uses it */ + if (ctx->rsrc_node) + io_rsrc_node_destroy(ctx->rsrc_node); + if (ctx->rsrc_backup_node) + io_rsrc_node_destroy(ctx->rsrc_backup_node); + flush_delayed_work(&ctx->rsrc_put_work); + + WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)); + WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist)); #if defined(CONFIG_UNIX) if (ctx->ring_sock) { @@ -8474,15 +9238,23 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) sock_release(ctx->ring_sock); } #endif + WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list)); + + if (ctx->mm_account) { + mmdrop(ctx->mm_account); + ctx->mm_account = NULL; + } io_mem_free(ctx->rings); io_mem_free(ctx->sq_sqes); percpu_ref_exit(&ctx->refs); free_uid(ctx->user); - put_cred(ctx->creds); + io_req_caches_free(ctx); + if (ctx->hash_map) + io_wq_put_hash(ctx->hash_map); kfree(ctx->cancel_hash); - kmem_cache_free(req_cachep, ctx->fallback_req); + kfree(ctx->dummy_ubuf); kfree(ctx); } @@ -8491,7 +9263,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait) struct io_ring_ctx *ctx = file->private_data; __poll_t mask = 0; - poll_wait(file, &ctx->cq_wait, wait); + poll_wait(file, &ctx->poll_wait, wait); /* * synchronizes with barrier from wq_has_sleeper call in * io_commit_cqring @@ -8513,49 +9285,46 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait) * Users may get EPOLLIN meanwhile seeing nothing in cqring, this * pushs them to do the flush. */ - if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow)) + if (io_cqring_events(ctx) || test_bit(0, &ctx->check_cq_overflow)) mask |= EPOLLIN | EPOLLRDNORM; return mask; } -static int io_uring_fasync(int fd, struct file *file, int on) -{ - struct io_ring_ctx *ctx = file->private_data; - - return fasync_helper(fd, file, on, &ctx->cq_fasync); -} - static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id) { - struct io_identity *iod; + const struct cred *creds; - iod = xa_erase(&ctx->personalities, id); - if (iod) { - put_cred(iod->creds); - if (refcount_dec_and_test(&iod->count)) - kfree(iod); + creds = xa_erase(&ctx->personalities, id); + if (creds) { + put_cred(creds); return 0; } return -EINVAL; } -static void io_ring_exit_work(struct work_struct *work) -{ - struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, - exit_work); +struct io_tctx_exit { + struct callback_head task_work; + struct completion completion; + struct io_ring_ctx *ctx; +}; +static void io_tctx_exit_cb(struct callback_head *cb) +{ + struct io_uring_task *tctx = current->io_uring; + struct io_tctx_exit *work; + + work = container_of(cb, struct io_tctx_exit, task_work); /* - * If we're doing polled IO and end up having requests being - * submitted async (out-of-line), then completions can come in while - * we're waiting for refs to drop. We need to reap these manually, - * as nobody else will be looking for them. + * When @in_idle, we're in cancellation and it's racy to remove the + * node. It'll be removed by the end of cancellation, just ignore it. + * tctx can be NULL if the queueing of this task_work raced with + * work cancelation off the exec path. */ - do { - io_iopoll_try_reap_events(ctx); - } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)); - io_ring_ctx_free(ctx); + if (tctx && !atomic_read(&tctx->in_idle)) + io_uring_del_tctx_node((unsigned long)work->ctx); + complete(&work->completion); } static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) @@ -8565,41 +9334,116 @@ static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) return req->ctx == data; } +static void io_ring_exit_work(struct work_struct *work) +{ + struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work); + unsigned long timeout = jiffies + HZ * 60 * 5; + unsigned long interval = HZ / 20; + struct io_tctx_exit exit; + struct io_tctx_node *node; + int ret; + + /* + * If we're doing polled IO and end up having requests being + * submitted async (out-of-line), then completions can come in while + * we're waiting for refs to drop. We need to reap these manually, + * as nobody else will be looking for them. + */ + do { + io_uring_try_cancel_requests(ctx, NULL, true); + if (ctx->sq_data) { + struct io_sq_data *sqd = ctx->sq_data; + struct task_struct *tsk; + + io_sq_thread_park(sqd); + tsk = sqd->thread; + if (tsk && tsk->io_uring && tsk->io_uring->io_wq) + io_wq_cancel_cb(tsk->io_uring->io_wq, + io_cancel_ctx_cb, ctx, true); + io_sq_thread_unpark(sqd); + } + + if (WARN_ON_ONCE(time_after(jiffies, timeout))) { + /* there is little hope left, don't run it too often */ + interval = HZ * 60; + } + } while (!wait_for_completion_timeout(&ctx->ref_comp, interval)); + + init_completion(&exit.completion); + init_task_work(&exit.task_work, io_tctx_exit_cb); + exit.ctx = ctx; + /* + * Some may use context even when all refs and requests have been put, + * and they are free to do so while still holding uring_lock or + * completion_lock, see io_req_task_submit(). Apart from other work, + * this lock/unlock section also waits them to finish. + */ + mutex_lock(&ctx->uring_lock); + while (!list_empty(&ctx->tctx_list)) { + WARN_ON_ONCE(time_after(jiffies, timeout)); + + node = list_first_entry(&ctx->tctx_list, struct io_tctx_node, + ctx_node); + /* don't spin on a single task if cancellation failed */ + list_rotate_left(&ctx->tctx_list); + ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL); + if (WARN_ON_ONCE(ret)) + continue; + wake_up_process(node->task); + + mutex_unlock(&ctx->uring_lock); + wait_for_completion(&exit.completion); + mutex_lock(&ctx->uring_lock); + } + mutex_unlock(&ctx->uring_lock); + spin_lock(&ctx->completion_lock); + spin_unlock(&ctx->completion_lock); + + io_ring_ctx_free(ctx); +} + +/* Returns true if we found and killed one or more timeouts */ +static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, + bool cancel_all) +{ + struct io_kiocb *req, *tmp; + int canceled = 0; + + spin_lock(&ctx->completion_lock); + spin_lock_irq(&ctx->timeout_lock); + list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { + if (io_match_task(req, tsk, cancel_all)) { + io_kill_timeout(req, -ECANCELED); + canceled++; + } + } + spin_unlock_irq(&ctx->timeout_lock); + if (canceled != 0) + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); + if (canceled != 0) + io_cqring_ev_posted(ctx); + return canceled != 0; +} + static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) { unsigned long index; - struct io_identify *iod; + struct creds *creds; mutex_lock(&ctx->uring_lock); percpu_ref_kill(&ctx->refs); - /* if force is set, the ring is going away. always drop after that */ - - if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead)) - ctx->sqo_dead = 1; - - ctx->cq_overflow_flushed = 1; if (ctx->rings) - __io_cqring_overflow_flush(ctx, true, NULL, NULL); + __io_cqring_overflow_flush(ctx, true); + xa_for_each(&ctx->personalities, index, creds) + io_unregister_personality(ctx, index); mutex_unlock(&ctx->uring_lock); - io_kill_timeouts(ctx, NULL, NULL); - io_poll_remove_all(ctx, NULL, NULL); - - if (ctx->io_wq) - io_wq_cancel_cb(ctx->io_wq, io_cancel_ctx_cb, ctx, true); + io_kill_timeouts(ctx, NULL, true); + io_poll_remove_all(ctx, NULL, true); /* if we failed setting up the ctx, we might not have any rings */ io_iopoll_try_reap_events(ctx); - xa_for_each(&ctx->personalities, index, iod) - io_unregister_personality(ctx, index); - - /* - * Do this upfront, so we won't have a grace period where the ring - * is closed but resources aren't reaped yet. This can cause - * spurious failure in setting up a new ring. - */ - io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries), - ACCT_LOCKED); INIT_WORK(&ctx->exit_work, io_ring_exit_work); /* @@ -8622,352 +9466,290 @@ static int io_uring_release(struct inode *inode, struct file *file) struct io_task_cancel { struct task_struct *task; - struct files_struct *files; + bool all; }; static bool io_cancel_task_cb(struct io_wq_work *work, void *data) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_task_cancel *cancel = data; - bool ret; - if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) { - unsigned long flags; - struct io_ring_ctx *ctx = req->ctx; - - /* protect against races with linked timeouts */ - spin_lock_irqsave(&ctx->completion_lock, flags); - ret = io_match_task(req, cancel->task, cancel->files); - spin_unlock_irqrestore(&ctx->completion_lock, flags); - } else { - ret = io_match_task(req, cancel->task, cancel->files); - } - return ret; + return io_match_task_safe(req, cancel->task, cancel->all); } -static void io_cancel_defer_files(struct io_ring_ctx *ctx, - struct task_struct *task, - struct files_struct *files) +static bool io_cancel_defer_files(struct io_ring_ctx *ctx, + struct task_struct *task, bool cancel_all) { - struct io_defer_entry *de = NULL; + struct io_defer_entry *de; LIST_HEAD(list); - spin_lock_irq(&ctx->completion_lock); + spin_lock(&ctx->completion_lock); list_for_each_entry_reverse(de, &ctx->defer_list, list) { - if (io_match_task(de->req, task, files)) { + if (io_match_task_safe(de->req, task, cancel_all)) { list_cut_position(&list, &ctx->defer_list, &de->list); break; } } - spin_unlock_irq(&ctx->completion_lock); + spin_unlock(&ctx->completion_lock); + if (list_empty(&list)) + return false; while (!list_empty(&list)) { de = list_first_entry(&list, struct io_defer_entry, list); list_del_init(&de->list); - req_set_fail_links(de->req); - io_put_req(de->req); - io_req_complete(de->req, -ECANCELED); + io_req_complete_failed(de->req, -ECANCELED); kfree(de); } + return true; } -static int io_uring_count_inflight(struct io_ring_ctx *ctx, - struct task_struct *task, - struct files_struct *files) +static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) { - struct io_kiocb *req; - int cnt = 0; + struct io_tctx_node *node; + enum io_wq_cancel cret; + bool ret = false; - spin_lock_irq(&ctx->inflight_lock); - list_for_each_entry(req, &ctx->inflight_list, inflight_entry) - cnt += io_match_task(req, task, files); - spin_unlock_irq(&ctx->inflight_lock); - return cnt; -} + mutex_lock(&ctx->uring_lock); + list_for_each_entry(node, &ctx->tctx_list, ctx_node) { + struct io_uring_task *tctx = node->task->io_uring; -static void io_uring_cancel_files(struct io_ring_ctx *ctx, - struct task_struct *task, - struct files_struct *files) -{ - while (!list_empty_careful(&ctx->inflight_list)) { - struct io_task_cancel cancel = { .task = task, .files = files }; - DEFINE_WAIT(wait); - int inflight; - - inflight = io_uring_count_inflight(ctx, task, files); - if (!inflight) - break; - - io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true); - io_poll_remove_all(ctx, task, files); - io_kill_timeouts(ctx, task, files); - /* cancellations _may_ trigger task work */ - io_run_task_work(); - - prepare_to_wait(&task->io_uring->wait, &wait, - TASK_UNINTERRUPTIBLE); - if (inflight == io_uring_count_inflight(ctx, task, files)) - schedule(); - finish_wait(&task->io_uring->wait, &wait); + /* + * io_wq will stay alive while we hold uring_lock, because it's + * killed after ctx nodes, which requires to take the lock. + */ + if (!tctx || !tctx->io_wq) + continue; + cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true); + ret |= (cret != IO_WQ_CANCEL_NOTFOUND); } + mutex_unlock(&ctx->uring_lock); + + return ret; } -static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, - struct task_struct *task) +static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, + struct task_struct *task, + bool cancel_all) { + struct io_task_cancel cancel = { .task = task, .all = cancel_all, }; + struct io_uring_task *tctx = task ? task->io_uring : NULL; + while (1) { - struct io_task_cancel cancel = { .task = task, .files = NULL, }; enum io_wq_cancel cret; bool ret = false; - cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true); - if (cret != IO_WQ_CANCEL_NOTFOUND) - ret = true; + if (!task) { + ret |= io_uring_try_cancel_iowq(ctx); + } else if (tctx && tctx->io_wq) { + /* + * Cancels requests of all rings, not only @ctx, but + * it's fine as the task is in exit/exec. + */ + cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb, + &cancel, true); + ret |= (cret != IO_WQ_CANCEL_NOTFOUND); + } /* SQPOLL thread does its own polling */ - if (!(ctx->flags & IORING_SETUP_SQPOLL)) { + if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) || + (ctx->sq_data && ctx->sq_data->thread == current)) { while (!list_empty_careful(&ctx->iopoll_list)) { io_iopoll_try_reap_events(ctx); ret = true; } } - ret |= io_poll_remove_all(ctx, task, NULL); - ret |= io_kill_timeouts(ctx, task, NULL); + ret |= io_cancel_defer_files(ctx, task, cancel_all); + ret |= io_poll_remove_all(ctx, task, cancel_all); + ret |= io_kill_timeouts(ctx, task, cancel_all); + if (task) + ret |= io_run_task_work(); if (!ret) break; - io_run_task_work(); cond_resched(); } } -static void io_disable_sqo_submit(struct io_ring_ctx *ctx) +static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx) { - mutex_lock(&ctx->uring_lock); - ctx->sqo_dead = 1; - if (ctx->flags & IORING_SETUP_R_DISABLED) - io_sq_offload_start(ctx); - mutex_unlock(&ctx->uring_lock); + struct io_uring_task *tctx = current->io_uring; + struct io_tctx_node *node; + int ret; - /* make sure callers enter the ring to get error */ - if (ctx->rings) - io_ring_set_wakeup_flag(ctx); -} + if (unlikely(!tctx)) { + ret = io_uring_alloc_task_context(current, ctx); + if (unlikely(ret)) + return ret; -/* - * We need to iteratively cancel requests, in case a request has dependent - * hard links. These persist even for failure of cancelations, hence keep - * looping until none are found. - */ -static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, - struct files_struct *files) -{ - struct task_struct *task = current; + tctx = current->io_uring; + if (ctx->iowq_limits_set) { + unsigned int limits[2] = { ctx->iowq_limits[0], + ctx->iowq_limits[1], }; - if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { - io_disable_sqo_submit(ctx); - task = ctx->sq_data->thread; - atomic_inc(&task->io_uring->in_idle); - io_sq_thread_park(ctx->sq_data); + ret = io_wq_max_workers(tctx->io_wq, limits); + if (ret) + return ret; + } } + if (!xa_load(&tctx->xa, (unsigned long)ctx)) { + node = kmalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + node->ctx = ctx; + node->task = current; - io_cancel_defer_files(ctx, task, files); - io_cqring_overflow_flush(ctx, true, task, files); + ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx, + node, GFP_KERNEL)); + if (ret) { + kfree(node); + return ret; + } - if (!files) - __io_uring_cancel_task_requests(ctx, task); - else - io_uring_cancel_files(ctx, task, files); - - if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { - atomic_dec(&task->io_uring->in_idle); - io_sq_thread_unpark(ctx->sq_data); + mutex_lock(&ctx->uring_lock); + list_add(&node->ctx_node, &ctx->tctx_list); + mutex_unlock(&ctx->uring_lock); } + tctx->last = ctx; + return 0; } /* * Note that this task has used io_uring. We use it for cancelation purposes. */ -static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file) +static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx) { struct io_uring_task *tctx = current->io_uring; - int ret; - if (unlikely(!tctx)) { - ret = io_uring_alloc_task_context(current); - if (unlikely(ret)) - return ret; - tctx = current->io_uring; - } - if (tctx->last != file) { - void *old = xa_load(&tctx->xa, (unsigned long)file); - - if (!old) { - get_file(file); - ret = xa_err(xa_store(&tctx->xa, (unsigned long)file, - file, GFP_KERNEL)); - if (ret) { - fput(file); - return ret; - } - } - tctx->last = file; - } - - /* - * This is race safe in that the task itself is doing this, hence it - * cannot be going through the exit/cancel paths at the same time. - * This cannot be modified while exit/cancel is running. - */ - if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL)) - tctx->sqpoll = true; - - return 0; + if (likely(tctx && tctx->last == ctx)) + return 0; + return __io_uring_add_tctx_node(ctx); } /* * Remove this io_uring_file -> task mapping. */ -static void io_uring_del_task_file(struct file *file) +static void io_uring_del_tctx_node(unsigned long index) { struct io_uring_task *tctx = current->io_uring; + struct io_tctx_node *node; - if (tctx->last == file) + if (!tctx) + return; + node = xa_erase(&tctx->xa, index); + if (!node) + return; + + WARN_ON_ONCE(current != node->task); + WARN_ON_ONCE(list_empty(&node->ctx_node)); + + mutex_lock(&node->ctx->uring_lock); + list_del(&node->ctx_node); + mutex_unlock(&node->ctx->uring_lock); + + if (tctx->last == node->ctx) tctx->last = NULL; - file = xa_erase(&tctx->xa, (unsigned long)file); - if (file) - fput(file); + kfree(node); } -static void io_uring_remove_task_files(struct io_uring_task *tctx) +static void io_uring_clean_tctx(struct io_uring_task *tctx) { - struct file *file; + struct io_wq *wq = tctx->io_wq; + struct io_tctx_node *node; unsigned long index; - xa_for_each(&tctx->xa, index, file) - io_uring_del_task_file(file); -} - -void __io_uring_files_cancel(struct files_struct *files) -{ - struct io_uring_task *tctx = current->io_uring; - struct file *file; - unsigned long index; - - /* make sure overflow events are dropped */ - atomic_inc(&tctx->in_idle); - xa_for_each(&tctx->xa, index, file) - io_uring_cancel_task_requests(file->private_data, files); - atomic_dec(&tctx->in_idle); - - if (files) - io_uring_remove_task_files(tctx); -} - -static s64 tctx_inflight(struct io_uring_task *tctx) -{ - unsigned long index; - struct file *file; - s64 inflight; - - inflight = percpu_counter_sum(&tctx->inflight); - if (!tctx->sqpoll) - return inflight; - - /* - * If we have SQPOLL rings, then we need to iterate and find them, and - * add the pending count for those. - */ - xa_for_each(&tctx->xa, index, file) { - struct io_ring_ctx *ctx = file->private_data; - - if (ctx->flags & IORING_SETUP_SQPOLL) { - struct io_uring_task *__tctx = ctx->sqo_task->io_uring; - - inflight += percpu_counter_sum(&__tctx->inflight); - } + xa_for_each(&tctx->xa, index, node) { + io_uring_del_tctx_node(index); + cond_resched(); } + if (wq) { + /* + * Must be after io_uring_del_task_file() (removes nodes under + * uring_lock) to avoid race with io_uring_try_cancel_iowq(). + */ + io_wq_put_and_exit(wq); + tctx->io_wq = NULL; + } +} - return inflight; +static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) +{ + if (tracked) + return atomic_read(&tctx->inflight_tracked); + return percpu_counter_sum(&tctx->inflight); } /* - * Find any io_uring fd that this task has registered or done IO on, and cancel - * requests. + * Find any io_uring ctx that this task has registered or done IO on, and cancel + * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation. */ -void __io_uring_task_cancel(void) +static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) { struct io_uring_task *tctx = current->io_uring; - DEFINE_WAIT(wait); + struct io_ring_ctx *ctx; s64 inflight; + DEFINE_WAIT(wait); + + WARN_ON_ONCE(sqd && sqd->thread != current); + + if (!current->io_uring) + return; + if (tctx->io_wq) + io_wq_exit_start(tctx->io_wq); - /* make sure overflow events are dropped */ atomic_inc(&tctx->in_idle); - - /* trigger io_disable_sqo_submit() */ - if (tctx->sqpoll) - __io_uring_files_cancel(NULL); - do { + io_uring_drop_tctx_refs(current); /* read completions before cancelations */ - inflight = tctx_inflight(tctx); + inflight = tctx_inflight(tctx, !cancel_all); if (!inflight) break; - __io_uring_files_cancel(NULL); - prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE); + if (!sqd) { + struct io_tctx_node *node; + unsigned long index; + + xa_for_each(&tctx->xa, index, node) { + /* sqpoll task will cancel all its requests */ + if (node->ctx->sq_data) + continue; + io_uring_try_cancel_requests(node->ctx, current, + cancel_all); + } + } else { + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) + io_uring_try_cancel_requests(ctx, current, + cancel_all); + } + + prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE); + io_run_task_work(); + io_uring_drop_tctx_refs(current); /* * If we've seen completions, retry without waiting. This * avoids a race where a completion comes in before we did * prepare_to_wait(). */ - if (inflight == tctx_inflight(tctx)) + if (inflight == tctx_inflight(tctx, !cancel_all)) schedule(); finish_wait(&tctx->wait, &wait); } while (1); - atomic_dec(&tctx->in_idle); - - io_uring_remove_task_files(tctx); + io_uring_clean_tctx(tctx); + if (cancel_all) { + /* + * We shouldn't run task_works after cancel, so just leave + * ->in_idle set for normal exit. + */ + atomic_dec(&tctx->in_idle); + /* for exec all current's requests should be gone, kill tctx */ + __io_uring_free(current); + } } -static int io_uring_flush(struct file *file, void *data) +void __io_uring_cancel(bool cancel_all) { - struct io_uring_task *tctx = current->io_uring; - struct io_ring_ctx *ctx = file->private_data; - - if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) - io_uring_cancel_task_requests(ctx, NULL); - - if (!tctx) - return 0; - - /* we should have cancelled and erased it before PF_EXITING */ - WARN_ON_ONCE((current->flags & PF_EXITING) && - xa_load(&tctx->xa, (unsigned long)file)); - - /* - * fput() is pending, will be 2 if the only other ref is our potential - * task file note. If the task is exiting, drop regardless of count. - */ - if (atomic_long_read(&file->f_count) != 2) - return 0; - - if (ctx->flags & IORING_SETUP_SQPOLL) { - /* there is only one file note, which is owned by sqo_task */ - WARN_ON_ONCE(ctx->sqo_task != current && - xa_load(&tctx->xa, (unsigned long)file)); - /* sqo_dead check is for when this happens after cancellation */ - WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead && - !xa_load(&tctx->xa, (unsigned long)file)); - - io_disable_sqo_submit(ctx); - } - - if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current) - io_uring_del_task_file(file); - return 0; + io_uring_cancel_generic(cancel_all, NULL); } static void *io_uring_validate_mmap_request(struct file *file, @@ -9042,61 +9824,84 @@ static unsigned long io_uring_nommu_get_unmapped_area(struct file *file, static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx) { - int ret = 0; DEFINE_WAIT(wait); do { if (!io_sqring_full(ctx)) break; - prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE); - if (unlikely(ctx->sqo_dead)) { - ret = -EOWNERDEAD; - goto out; - } - if (!io_sqring_full(ctx)) break; - schedule(); } while (!signal_pending(current)); finish_wait(&ctx->sqo_sq_wait, &wait); -out: - return ret; + return 0; +} + +static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz, + struct __kernel_timespec __user **ts, + const sigset_t __user **sig) +{ + struct io_uring_getevents_arg arg; + + /* + * If EXT_ARG isn't set, then we have no timespec and the argp pointer + * is just a pointer to the sigset_t. + */ + if (!(flags & IORING_ENTER_EXT_ARG)) { + *sig = (const sigset_t __user *) argp; + *ts = NULL; + return 0; + } + + /* + * EXT_ARG is set - ensure we agree on the size of it and copy in our + * timespec and sigset_t pointers if good. + */ + if (*argsz != sizeof(arg)) + return -EINVAL; + if (copy_from_user(&arg, argp, sizeof(arg))) + return -EFAULT; + if (arg.pad) + return -EINVAL; + *sig = u64_to_user_ptr(arg.sigmask); + *argsz = arg.sigmask_sz; + *ts = u64_to_user_ptr(arg.ts); + return 0; } SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, - u32, min_complete, u32, flags, const sigset_t __user *, sig, - size_t, sigsz) + u32, min_complete, u32, flags, const void __user *, argp, + size_t, argsz) { struct io_ring_ctx *ctx; - long ret = -EBADF; int submitted = 0; struct fd f; + long ret; io_run_task_work(); - if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | - IORING_ENTER_SQ_WAIT)) + if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | + IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG))) return -EINVAL; f = fdget(fd); - if (!f.file) + if (unlikely(!f.file)) return -EBADF; ret = -EOPNOTSUPP; - if (f.file->f_op != &io_uring_fops) + if (unlikely(f.file->f_op != &io_uring_fops)) goto out_fput; ret = -ENXIO; ctx = f.file->private_data; - if (!percpu_ref_tryget(&ctx->refs)) + if (unlikely(!percpu_ref_tryget(&ctx->refs))) goto out_fput; ret = -EBADFD; - if (ctx->flags & IORING_SETUP_R_DISABLED) + if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED)) goto out; /* @@ -9106,9 +9911,9 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, */ ret = 0; if (ctx->flags & IORING_SETUP_SQPOLL) { - io_cqring_overflow_flush(ctx, false, NULL, NULL); + io_cqring_overflow_flush(ctx); - if (unlikely(ctx->sqo_dead)) { + if (unlikely(ctx->sq_data->thread == NULL)) { ret = -EOWNERDEAD; goto out; } @@ -9121,7 +9926,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, } submitted = to_submit; } else if (to_submit) { - ret = io_uring_add_task_file(ctx, f.file); + ret = io_uring_add_tctx_node(ctx); if (unlikely(ret)) goto out; mutex_lock(&ctx->uring_lock); @@ -9132,6 +9937,13 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, goto out; } if (flags & IORING_ENTER_GETEVENTS) { + const sigset_t __user *sig; + struct __kernel_timespec __user *ts; + + ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig); + if (unlikely(ret)) + goto out; + min_complete = min(min_complete, ctx->cq_entries); /* @@ -9144,7 +9956,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, !(ctx->flags & IORING_SETUP_SQPOLL)) { ret = io_iopoll_check(ctx, min_complete); } else { - ret = io_cqring_wait(ctx, min_complete, sig, sigsz); + ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts); } } @@ -9157,9 +9969,8 @@ out_fput: #ifdef CONFIG_PROC_FS static int io_uring_show_cred(struct seq_file *m, unsigned int id, - const struct io_identity *iod) + const struct cred *cred) { - const struct cred *cred = iod->creds; struct user_namespace *uns = seq_user_ns(m); struct group_info *gi; kernel_cap_t cap; @@ -9203,18 +10014,18 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) */ has_lock = mutex_trylock(&ctx->uring_lock); - if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) + if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) { sq = ctx->sq_data; + if (!sq->thread) + sq = NULL; + } seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1); seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1); seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files); for (i = 0; has_lock && i < ctx->nr_user_files; i++) { - struct fixed_file_table *table; - struct file *f; + struct file *f = io_file_from_index(ctx, i); - table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT]; - f = table->files[i & IORING_FILE_TABLE_MASK]; if (f) seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname); else @@ -9222,21 +10033,21 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) } seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs); for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) { - struct io_mapped_ubuf *buf = &ctx->user_bufs[i]; + struct io_mapped_ubuf *buf = ctx->user_bufs[i]; + unsigned int len = buf->ubuf_end - buf->ubuf; - seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, - (unsigned int) buf->len); + seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len); } if (has_lock && !xa_empty(&ctx->personalities)) { unsigned long index; - const struct io_identity *iod; + const struct cred *cred; seq_printf(m, "Personalities:\n"); - xa_for_each(&ctx->personalities, index, iod) - io_uring_show_cred(m, index, iod); + xa_for_each(&ctx->personalities, index, cred) + io_uring_show_cred(m, index, cred); } seq_printf(m, "PollList:\n"); - spin_lock_irq(&ctx->completion_lock); + spin_lock(&ctx->completion_lock); for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { struct hlist_head *list = &ctx->cancel_hash[i]; struct io_kiocb *req; @@ -9245,7 +10056,7 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) seq_printf(m, " op=%d, task_works=%d\n", req->opcode, req->task->task_works != NULL); } - spin_unlock_irq(&ctx->completion_lock); + spin_unlock(&ctx->completion_lock); if (has_lock) mutex_unlock(&ctx->uring_lock); } @@ -9263,14 +10074,12 @@ static void io_uring_show_fdinfo(struct seq_file *m, struct file *f) static const struct file_operations io_uring_fops = { .release = io_uring_release, - .flush = io_uring_flush, .mmap = io_uring_mmap, #ifndef CONFIG_MMU .get_unmapped_area = io_uring_nommu_get_unmapped_area, .mmap_capabilities = io_uring_nommu_mmap_capabilities, #endif .poll = io_uring_poll, - .fasync = io_uring_fasync, #ifdef CONFIG_PROC_FS .show_fdinfo = io_uring_show_fdinfo, #endif @@ -9300,8 +10109,6 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx, rings->cq_ring_mask = p->cq_entries - 1; rings->sq_ring_entries = p->sq_entries; rings->cq_ring_entries = p->cq_entries; - ctx->sq_mask = rings->sq_ring_mask; - ctx->cq_mask = rings->cq_ring_mask; size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); if (size == SIZE_MAX) { @@ -9328,7 +10135,7 @@ static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file) if (fd < 0) return fd; - ret = io_uring_add_task_file(ctx, file); + ret = io_uring_add_tctx_node(ctx); if (ret) { put_unused_fd(fd); return ret; @@ -9371,10 +10178,8 @@ static struct file *io_uring_get_file(struct io_ring_ctx *ctx) static int io_uring_create(unsigned entries, struct io_uring_params *p, struct io_uring_params __user *params) { - struct user_struct *user = NULL; struct io_ring_ctx *ctx; struct file *file; - bool limit_mem; int ret; if (!entries) @@ -9414,34 +10219,12 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, p->cq_entries = 2 * p->sq_entries; } - user = get_uid(current_user()); - limit_mem = !capable(CAP_IPC_LOCK); - - if (limit_mem) { - ret = __io_account_mem(user, - ring_pages(p->sq_entries, p->cq_entries)); - if (ret) { - free_uid(user); - return ret; - } - } - ctx = io_ring_ctx_alloc(p); - if (!ctx) { - if (limit_mem) - __io_unaccount_mem(user, ring_pages(p->sq_entries, - p->cq_entries)); - free_uid(user); + if (!ctx) return -ENOMEM; - } ctx->compat = in_compat_syscall(); - ctx->user = user; - ctx->creds = get_current_cred(); -#ifdef CONFIG_AUDIT - ctx->loginuid = current->loginuid; - ctx->sessionid = current->sessionid; -#endif - ctx->sqo_task = get_task_struct(current); + if (!capable(CAP_IPC_LOCK)) + ctx->user = get_uid(current_user()); /* * This is just grabbed for accounting purposes. When a process exits, @@ -9452,35 +10235,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, mmgrab(current->mm); ctx->mm_account = current->mm; -#ifdef CONFIG_BLK_CGROUP - /* - * The sq thread will belong to the original cgroup it was inited in. - * If the cgroup goes offline (e.g. disabling the io controller), then - * issued bios will be associated with the closest cgroup later in the - * block layer. - */ - rcu_read_lock(); - ctx->sqo_blkcg_css = blkcg_css(); - ret = css_tryget_online(ctx->sqo_blkcg_css); - rcu_read_unlock(); - if (!ret) { - /* don't init against a dying cgroup, have the user try again */ - ctx->sqo_blkcg_css = NULL; - ret = -ENODEV; - goto err; - } -#endif - - /* - * Account memory _before_ installing the file descriptor. Once - * the descriptor is installed, it can get closed at any time. Also - * do this before hitting the general error path, as ring freeing - * will un-account as well. - */ - io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries), - ACCT_LOCKED); - ctx->limit_mem = limit_mem; - ret = io_allocate_scq_urings(ctx, p); if (ret) goto err; @@ -9488,9 +10242,11 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, ret = io_sq_offload_create(ctx, p); if (ret) goto err; - - if (!(p->flags & IORING_SETUP_R_DISABLED)) - io_sq_offload_start(ctx); + /* always set a rsrc node */ + ret = io_rsrc_node_switch_start(ctx); + if (ret) + goto err; + io_rsrc_node_switch(ctx, NULL); memset(&p->sq_off, 0, sizeof(p->sq_off)); p->sq_off.head = offsetof(struct io_rings, sq.head); @@ -9513,7 +10269,9 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS | IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL | - IORING_FEAT_POLL_32BITS; + IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED | + IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS | + IORING_FEAT_RSRC_TAGS; if (copy_to_user(params, p, sizeof(*p))) { ret = -EFAULT; @@ -9532,7 +10290,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, */ ret = io_uring_install_fd(ctx, file); if (ret < 0) { - io_disable_sqo_submit(ctx); /* fput will clean it up */ fput(file); return ret; @@ -9541,7 +10298,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); return ret; err: - io_disable_sqo_submit(ctx); io_ring_ctx_wait_and_kill(ctx); return ret; } @@ -9619,22 +10375,16 @@ out: static int io_register_personality(struct io_ring_ctx *ctx) { - struct io_identity *iod; + const struct cred *creds; u32 id; int ret; - iod = kmalloc(sizeof(*iod), GFP_KERNEL); - if (unlikely(!iod)) - return -ENOMEM; + creds = get_current_cred(); - io_init_identity(iod); - iod->creds = get_current_cred(); - - ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)iod, + ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds, XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL); if (ret < 0) { - put_cred(iod->creds); - kfree(iod); + put_cred(creds); return ret; } return id; @@ -9718,24 +10468,273 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx) if (ctx->restrictions.registered) ctx->restricted = 1; - io_sq_offload_start(ctx); + ctx->flags &= ~IORING_SETUP_R_DISABLED; + if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait)) + wake_up(&ctx->sq_data->wait); return 0; } +static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type, + struct io_uring_rsrc_update2 *up, + unsigned nr_args) +{ + __u32 tmp; + int err; + + if (check_add_overflow(up->offset, nr_args, &tmp)) + return -EOVERFLOW; + err = io_rsrc_node_switch_start(ctx); + if (err) + return err; + + switch (type) { + case IORING_RSRC_FILE: + return __io_sqe_files_update(ctx, up, nr_args); + case IORING_RSRC_BUFFER: + return __io_sqe_buffers_update(ctx, up, nr_args); + } + return -EINVAL; +} + +static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg, + unsigned nr_args) +{ + struct io_uring_rsrc_update2 up; + + if (!nr_args) + return -EINVAL; + memset(&up, 0, sizeof(up)); + if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update))) + return -EFAULT; + if (up.resv || up.resv2) + return -EINVAL; + return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args); +} + +static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg, + unsigned size, unsigned type) +{ + struct io_uring_rsrc_update2 up; + + if (size != sizeof(up)) + return -EINVAL; + if (copy_from_user(&up, arg, sizeof(up))) + return -EFAULT; + if (!up.nr || up.resv || up.resv2) + return -EINVAL; + return __io_register_rsrc_update(ctx, type, &up, up.nr); +} + +static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, + unsigned int size, unsigned int type) +{ + struct io_uring_rsrc_register rr; + + /* keep it extendible */ + if (size != sizeof(rr)) + return -EINVAL; + + memset(&rr, 0, sizeof(rr)); + if (copy_from_user(&rr, arg, size)) + return -EFAULT; + if (!rr.nr || rr.resv || rr.resv2) + return -EINVAL; + + switch (type) { + case IORING_RSRC_FILE: + return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data), + rr.nr, u64_to_user_ptr(rr.tags)); + case IORING_RSRC_BUFFER: + return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data), + rr.nr, u64_to_user_ptr(rr.tags)); + } + return -EINVAL; +} + +static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg, + unsigned len) +{ + struct io_uring_task *tctx = current->io_uring; + cpumask_var_t new_mask; + int ret; + + if (!tctx || !tctx->io_wq) + return -EINVAL; + + if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) + return -ENOMEM; + + cpumask_clear(new_mask); + if (len > cpumask_size()) + len = cpumask_size(); + +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) { + ret = compat_get_bitmap(cpumask_bits(new_mask), + (const compat_ulong_t __user *)arg, + len * 8 /* CHAR_BIT */); + } else { + ret = copy_from_user(new_mask, arg, len); + } +#else + ret = copy_from_user(new_mask, arg, len); +#endif + + if (ret) { + free_cpumask_var(new_mask); + return -EFAULT; + } + + ret = io_wq_cpu_affinity(tctx->io_wq, new_mask); + free_cpumask_var(new_mask); + return ret; +} + +static int io_unregister_iowq_aff(struct io_ring_ctx *ctx) +{ + struct io_uring_task *tctx = current->io_uring; + + if (!tctx || !tctx->io_wq) + return -EINVAL; + + return io_wq_cpu_affinity(tctx->io_wq, NULL); +} + +static int io_register_iowq_max_workers(struct io_ring_ctx *ctx, + void __user *arg) + __must_hold(&ctx->uring_lock) +{ + struct io_tctx_node *node; + struct io_uring_task *tctx = NULL; + struct io_sq_data *sqd = NULL; + __u32 new_count[2]; + int i, ret; + + if (copy_from_user(new_count, arg, sizeof(new_count))) + return -EFAULT; + for (i = 0; i < ARRAY_SIZE(new_count); i++) + if (new_count[i] > INT_MAX) + return -EINVAL; + + if (ctx->flags & IORING_SETUP_SQPOLL) { + sqd = ctx->sq_data; + if (sqd) { + /* + * Observe the correct sqd->lock -> ctx->uring_lock + * ordering. Fine to drop uring_lock here, we hold + * a ref to the ctx. + */ + refcount_inc(&sqd->refs); + mutex_unlock(&ctx->uring_lock); + mutex_lock(&sqd->lock); + mutex_lock(&ctx->uring_lock); + if (sqd->thread) + tctx = sqd->thread->io_uring; + } + } else { + tctx = current->io_uring; + } + + BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits)); + + for (i = 0; i < ARRAY_SIZE(new_count); i++) + if (new_count[i]) + ctx->iowq_limits[i] = new_count[i]; + ctx->iowq_limits_set = true; + + ret = -EINVAL; + if (tctx && tctx->io_wq) { + ret = io_wq_max_workers(tctx->io_wq, new_count); + if (ret) + goto err; + } else { + memset(new_count, 0, sizeof(new_count)); + } + + if (sqd) { + mutex_unlock(&sqd->lock); + io_put_sq_data(sqd); + } + + if (copy_to_user(arg, new_count, sizeof(new_count))) + return -EFAULT; + + /* that's it for SQPOLL, only the SQPOLL task creates requests */ + if (sqd) + return 0; + + /* now propagate the restriction to all registered users */ + list_for_each_entry(node, &ctx->tctx_list, ctx_node) { + struct io_uring_task *tctx = node->task->io_uring; + + if (WARN_ON_ONCE(!tctx->io_wq)) + continue; + + for (i = 0; i < ARRAY_SIZE(new_count); i++) + new_count[i] = ctx->iowq_limits[i]; + /* ignore errors, it always returns zero anyway */ + (void)io_wq_max_workers(tctx->io_wq, new_count); + } + return 0; +err: + if (sqd) { + mutex_unlock(&sqd->lock); + io_put_sq_data(sqd); + } + return ret; +} + static bool io_register_op_must_quiesce(int op) { switch (op) { + case IORING_REGISTER_BUFFERS: + case IORING_UNREGISTER_BUFFERS: + case IORING_REGISTER_FILES: case IORING_UNREGISTER_FILES: case IORING_REGISTER_FILES_UPDATE: case IORING_REGISTER_PROBE: case IORING_REGISTER_PERSONALITY: case IORING_UNREGISTER_PERSONALITY: + case IORING_REGISTER_FILES2: + case IORING_REGISTER_FILES_UPDATE2: + case IORING_REGISTER_BUFFERS2: + case IORING_REGISTER_BUFFERS_UPDATE: + case IORING_REGISTER_IOWQ_AFF: + case IORING_UNREGISTER_IOWQ_AFF: + case IORING_REGISTER_IOWQ_MAX_WORKERS: return false; default: return true; } } +static int io_ctx_quiesce(struct io_ring_ctx *ctx) +{ + long ret; + + percpu_ref_kill(&ctx->refs); + + /* + * Drop uring mutex before waiting for references to exit. If another + * thread is currently inside io_uring_enter() it might need to grab the + * uring_lock to make progress. If we hold it here across the drain + * wait, then we can deadlock. It's safe to drop the mutex here, since + * no new references will come in after we've killed the percpu ref. + */ + mutex_unlock(&ctx->uring_lock); + do { + ret = wait_for_completion_interruptible(&ctx->ref_comp); + if (!ret) + break; + ret = io_run_task_work_sig(); + } while (ret >= 0); + mutex_lock(&ctx->uring_lock); + + if (ret) + io_refs_resurrect(&ctx->refs, &ctx->ref_comp); + return ret; +} + static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, void __user *arg, unsigned nr_args) __releases(ctx->uring_lock) @@ -9751,58 +10750,32 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, if (percpu_ref_is_dying(&ctx->refs)) return -ENXIO; - if (io_register_op_must_quiesce(opcode)) { - percpu_ref_kill(&ctx->refs); - - /* - * Drop uring mutex before waiting for references to exit. If - * another thread is currently inside io_uring_enter() it might - * need to grab the uring_lock to make progress. If we hold it - * here across the drain wait, then we can deadlock. It's safe - * to drop the mutex here, since no new references will come in - * after we've killed the percpu ref. - */ - mutex_unlock(&ctx->uring_lock); - do { - ret = wait_for_completion_interruptible(&ctx->ref_comp); - if (!ret) - break; - ret = io_run_task_work_sig(); - if (ret < 0) - break; - } while (1); - mutex_lock(&ctx->uring_lock); - - if (ret) { - io_refs_resurrect(&ctx->refs, &ctx->ref_comp); - return ret; - } + if (ctx->restricted) { + if (opcode >= IORING_REGISTER_LAST) + return -EINVAL; + opcode = array_index_nospec(opcode, IORING_REGISTER_LAST); + if (!test_bit(opcode, ctx->restrictions.register_op)) + return -EACCES; } - if (ctx->restricted) { - if (opcode >= IORING_REGISTER_LAST) { - ret = -EINVAL; - goto out; - } - - if (!test_bit(opcode, ctx->restrictions.register_op)) { - ret = -EACCES; - goto out; - } + if (io_register_op_must_quiesce(opcode)) { + ret = io_ctx_quiesce(ctx); + if (ret) + return ret; } switch (opcode) { case IORING_REGISTER_BUFFERS: - ret = io_sqe_buffer_register(ctx, arg, nr_args); + ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL); break; case IORING_UNREGISTER_BUFFERS: ret = -EINVAL; if (arg || nr_args) break; - ret = io_sqe_buffer_unregister(ctx); + ret = io_sqe_buffers_unregister(ctx); break; case IORING_REGISTER_FILES: - ret = io_sqe_files_register(ctx, arg, nr_args); + ret = io_sqe_files_register(ctx, arg, nr_args, NULL); break; case IORING_UNREGISTER_FILES: ret = -EINVAL; @@ -9811,7 +10784,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, ret = io_sqe_files_unregister(ctx); break; case IORING_REGISTER_FILES_UPDATE: - ret = io_sqe_files_update(ctx, arg, nr_args); + ret = io_register_files_update(ctx, arg, nr_args); break; case IORING_REGISTER_EVENTFD: case IORING_REGISTER_EVENTFD_ASYNC: @@ -9859,12 +10832,43 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, case IORING_REGISTER_RESTRICTIONS: ret = io_register_restrictions(ctx, arg, nr_args); break; + case IORING_REGISTER_FILES2: + ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE); + break; + case IORING_REGISTER_FILES_UPDATE2: + ret = io_register_rsrc_update(ctx, arg, nr_args, + IORING_RSRC_FILE); + break; + case IORING_REGISTER_BUFFERS2: + ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER); + break; + case IORING_REGISTER_BUFFERS_UPDATE: + ret = io_register_rsrc_update(ctx, arg, nr_args, + IORING_RSRC_BUFFER); + break; + case IORING_REGISTER_IOWQ_AFF: + ret = -EINVAL; + if (!arg || !nr_args) + break; + ret = io_register_iowq_aff(ctx, arg, nr_args); + break; + case IORING_UNREGISTER_IOWQ_AFF: + ret = -EINVAL; + if (arg || nr_args) + break; + ret = io_unregister_iowq_aff(ctx); + break; + case IORING_REGISTER_IOWQ_MAX_WORKERS: + ret = -EINVAL; + if (!arg || nr_args != 2) + break; + ret = io_register_iowq_max_workers(ctx, arg); + break; default: ret = -EINVAL; break; } -out: if (io_register_op_must_quiesce(opcode)) { /* bring the ctx back to life */ percpu_ref_reinit(&ctx->refs); @@ -9890,6 +10894,8 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, ctx = f.file->private_data; + io_run_task_work(); + mutex_lock(&ctx->uring_lock); ret = __io_uring_register(ctx, opcode, arg, nr_args); mutex_unlock(&ctx->uring_lock); @@ -9936,12 +10942,27 @@ static int __init io_uring_init(void) BUILD_BUG_SQE_ELEM(28, __u32, splice_flags); BUILD_BUG_SQE_ELEM(32, __u64, user_data); BUILD_BUG_SQE_ELEM(40, __u16, buf_index); + BUILD_BUG_SQE_ELEM(40, __u16, buf_group); BUILD_BUG_SQE_ELEM(42, __u16, personality); BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in); + BUILD_BUG_SQE_ELEM(44, __u32, file_index); + + BUILD_BUG_ON(sizeof(struct io_uring_files_update) != + sizeof(struct io_uring_rsrc_update)); + BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) > + sizeof(struct io_uring_rsrc_update2)); + + /* ->buf_index is u16 */ + BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16)); + + /* should fit into one byte */ + BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8)); BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST); - BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int)); - req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC); + BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int)); + + req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC | + SLAB_ACCOUNT); return 0; }; __initcall(io_uring_init); diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 05d2176cc471..86969de17084 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -45,6 +45,7 @@ struct mqueue_fs_context { struct ipc_namespace *ipc_ns; + bool newns; /* Set if newly created ipc namespace */ }; #define MQUEUE_MAGIC 0x19800202 @@ -424,6 +425,14 @@ static int mqueue_get_tree(struct fs_context *fc) { struct mqueue_fs_context *ctx = fc->fs_private; + /* + * With a newly created ipc namespace, we don't need to do a search + * for an ipc namespace match, but we still need to set s_fs_info. + */ + if (ctx->newns) { + fc->s_fs_info = ctx->ipc_ns; + return get_tree_nodev(fc, mqueue_fill_super); + } return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns); } @@ -451,6 +460,10 @@ static int mqueue_init_fs_context(struct fs_context *fc) return 0; } +/* + * mq_init_ns() is currently the only caller of mq_create_mount(). + * So the ns parameter is always a newly created ipc namespace. + */ static struct vfsmount *mq_create_mount(struct ipc_namespace *ns) { struct mqueue_fs_context *ctx; @@ -462,6 +475,7 @@ static struct vfsmount *mq_create_mount(struct ipc_namespace *ns) return ERR_CAST(fc); ctx = fc->fs_private; + ctx->newns = true; put_ipc_ns(ctx->ipc_ns); ctx->ipc_ns = get_ipc_ns(ns); put_user_ns(fc->user_ns); diff --git a/ipc/msg.c b/ipc/msg.c index 6e6c8e0c9380..8ded6b8f10a2 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -147,7 +147,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) key_t key = params->key; int msgflg = params->flg; - msq = kvmalloc(sizeof(*msq), GFP_KERNEL); + msq = kvmalloc(sizeof(*msq), GFP_KERNEL_ACCOUNT); if (unlikely(!msq)) return -ENOMEM; diff --git a/ipc/sem.c b/ipc/sem.c index 7d9c06b0ad6e..916f7a90be31 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -511,7 +511,7 @@ static struct sem_array *sem_alloc(size_t nsems) if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0])) return NULL; - sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL); + sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT); if (unlikely(!sma)) return NULL; @@ -1852,7 +1852,7 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) undo_list = current->sysvsem.undo_list; if (!undo_list) { - undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL); + undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT); if (undo_list == NULL) return -ENOMEM; spin_lock_init(&undo_list->lock); @@ -1937,7 +1937,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) rcu_read_unlock(); /* step 2: allocate new undo structure */ - new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); + new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL_ACCOUNT); if (!new) { ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return ERR_PTR(-ENOMEM); @@ -2190,14 +2190,15 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, * scenarios where we were awakened externally, during the * window between wake_q_add() and wake_up_q(). */ + rcu_read_lock(); error = READ_ONCE(queue.status); if (error != -EINTR) { /* see SEM_BARRIER_2 for purpose/pairing */ smp_acquire__after_ctrl_dep(); + rcu_read_unlock(); goto out_free; } - rcu_read_lock(); locknum = sem_lock(sma, sops, nsops); if (!ipc_valid_object(&sma->sem_perm)) diff --git a/ipc/shm.c b/ipc/shm.c index 471ac3e7498d..b418731d66e8 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -711,7 +711,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; - shp = kvmalloc(sizeof(*shp), GFP_KERNEL); + shp = kvmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT); if (unlikely(!shp)) return -ENOMEM; diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c index 5b3f01da172b..b2ebacd2f309 100644 --- a/kernel/audit_fsnotify.c +++ b/kernel/audit_fsnotify.c @@ -102,6 +102,7 @@ struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pa ret = fsnotify_add_inode_mark(&audit_mark->mark, inode, true); if (ret < 0) { + audit_mark->path = NULL; fsnotify_put_mark(&audit_mark->mark); audit_mark = ERR_PTR(ret); } diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 36c68dcea236..f241bda2679d 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -616,6 +616,11 @@ static int bpf_iter_init_array_map(void *priv_data, seq_info->percpu_value_buf = value_buf; } + /* bpf_iter_attach_map() acquires a map uref, and the uref may be + * released before or in the middle of iterating map elements, so + * acquire an extra map uref for iterator. + */ + bpf_map_inc_with_uref(map); seq_info->map = map; return 0; } @@ -624,6 +629,7 @@ static void bpf_iter_fini_array_map(void *priv_data) { struct bpf_iter_seq_array_map_info *seq_info = priv_data; + bpf_map_put_with_uref(seq_info->map); kfree(seq_info->percpu_value_buf); } diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c index 5d3a7af9ba9b..8aaaaef99f09 100644 --- a/kernel/bpf/bpf_local_storage.c +++ b/kernel/bpf/bpf_local_storage.c @@ -70,7 +70,7 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, selem = kzalloc(smap->elem_size, GFP_ATOMIC | __GFP_NOWARN); if (selem) { if (value) - memcpy(SDATA(selem)->data, value, smap->map.value_size); + copy_map_value(&smap->map, SDATA(selem)->data, value); return selem; } diff --git a/kernel/bpf/bpf_lru_list.h b/kernel/bpf/bpf_lru_list.h index 6b12f06ee18c..4ea227c9c1ad 100644 --- a/kernel/bpf/bpf_lru_list.h +++ b/kernel/bpf/bpf_lru_list.h @@ -4,6 +4,7 @@ #ifndef __BPF_LRU_LIST_H_ #define __BPF_LRU_LIST_H_ +#include #include #include diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index dc497eaf2266..9232938e3f96 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -2913,7 +2913,7 @@ static int btf_struct_resolve(struct btf_verifier_env *env, if (v->next_member) { const struct btf_type *last_member_type; const struct btf_member *last_member; - u16 last_member_type_id; + u32 last_member_type_id; last_member = btf_type_member(v->t) + v->next_member - 1; last_member_type_id = last_member->type; diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 6aa9e10c6335..6d92e393e1bc 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -653,6 +653,62 @@ static struct bpf_prog_list *find_detach_entry(struct list_head *progs, return ERR_PTR(-ENOENT); } +/** + * purge_effective_progs() - After compute_effective_progs fails to alloc new + * cgrp->bpf.inactive table we can recover by + * recomputing the array in place. + * + * @cgrp: The cgroup which descendants to travers + * @prog: A program to detach or NULL + * @link: A link to detach or NULL + * @type: Type of detach operation + */ +static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog, + struct bpf_cgroup_link *link, + enum bpf_attach_type type) +{ + struct cgroup_subsys_state *css; + struct bpf_prog_array *progs; + struct bpf_prog_list *pl; + struct list_head *head; + struct cgroup *cg; + int pos; + + /* recompute effective prog array in place */ + css_for_each_descendant_pre(css, &cgrp->self) { + struct cgroup *desc = container_of(css, struct cgroup, self); + + if (percpu_ref_is_zero(&desc->bpf.refcnt)) + continue; + + /* find position of link or prog in effective progs array */ + for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) { + if (pos && !(cg->bpf.flags[type] & BPF_F_ALLOW_MULTI)) + continue; + + head = &cg->bpf.progs[type]; + list_for_each_entry(pl, head, node) { + if (!prog_list_prog(pl)) + continue; + if (pl->prog == prog && pl->link == link) + goto found; + pos++; + } + } + + /* no link or prog match, skip the cgroup of this layer */ + continue; +found: + progs = rcu_dereference_protected( + desc->bpf.effective[type], + lockdep_is_held(&cgroup_mutex)); + + /* Remove the program from the array */ + WARN_ONCE(bpf_prog_array_delete_safe_at(progs, pos), + "Failed to purge a prog from array at index %d", pos); + } +} + /** * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and * propagate the change to descendants @@ -671,7 +727,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, u32 flags = cgrp->bpf.flags[type]; struct bpf_prog_list *pl; struct bpf_prog *old_prog; - int err; if (prog && link) /* only one of prog or link can be specified */ @@ -686,9 +741,12 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, pl->prog = NULL; pl->link = NULL; - err = update_effective_progs(cgrp, type); - if (err) - goto cleanup; + if (update_effective_progs(cgrp, type)) { + /* if update effective array failed replace the prog with a dummy prog*/ + pl->prog = old_prog; + pl->link = link; + purge_effective_progs(cgrp, old_prog, link, type); + } /* now can actually delete it from this cgroup list */ list_del(&pl->node); @@ -700,12 +758,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, bpf_prog_put(old_prog); static_branch_dec(&cgroup_bpf_enabled_key); return 0; - -cleanup: - /* restore back prog or link */ - pl->prog = old_prog; - pl->link = link; - return err; } /* Must be called with cgroup_mutex held to avoid races. */ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 41fc5020814e..234cf1cd08d0 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -68,11 +68,13 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns { u8 *ptr = NULL; - if (k >= SKF_NET_OFF) + if (k >= SKF_NET_OFF) { ptr = skb_network_header(skb) + k - SKF_NET_OFF; - else if (k >= SKF_LL_OFF) + } else if (k >= SKF_LL_OFF) { + if (unlikely(!skb_mac_header_was_set(skb))) + return NULL; ptr = skb_mac_header(skb) + k - SKF_LL_OFF; - + } if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) return ptr; @@ -1657,6 +1659,11 @@ out: CONT; \ LDX_MEM_##SIZEOP: \ DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ + CONT; \ + LDX_PROBE_MEM_##SIZEOP: \ + bpf_probe_read_kernel(&DST, sizeof(SIZE), \ + (const void *)(long) (SRC + insn->off)); \ + DST = *((SIZE *)&DST); \ CONT; LDST(B, u8) @@ -1664,15 +1671,6 @@ out: LDST(W, u32) LDST(DW, u64) #undef LDST -#define LDX_PROBE(SIZEOP, SIZE) \ - LDX_PROBE_MEM_##SIZEOP: \ - bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \ - CONT; - LDX_PROBE(B, 1) - LDX_PROBE(H, 2) - LDX_PROBE(W, 4) - LDX_PROBE(DW, 8) -#undef LDX_PROBE STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ atomic_add((u32) SRC, (atomic_t *)(unsigned long) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 6c444e815406..0ce445aadfdf 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1801,6 +1801,7 @@ static int bpf_iter_init_hash_map(void *priv_data, seq_info->percpu_value_buf = value_buf; } + bpf_map_inc_with_uref(map); seq_info->map = map; seq_info->htab = container_of(map, struct bpf_htab, map); return 0; @@ -1810,6 +1811,7 @@ static void bpf_iter_fini_hash_map(void *priv_data) { struct bpf_iter_seq_hash_map_info *seq_info = priv_data; + bpf_map_put_with_uref(seq_info->map); kfree(seq_info->percpu_value_buf); } diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c index 3d897de89061..bbab8bb4b2fd 100644 --- a/kernel/bpf/percpu_freelist.c +++ b/kernel/bpf/percpu_freelist.c @@ -102,22 +102,21 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, u32 nr_elems) { struct pcpu_freelist_head *head; - int i, cpu, pcpu_entries; + unsigned int cpu, cpu_idx, i, j, n, m; - pcpu_entries = nr_elems / num_possible_cpus() + 1; - i = 0; + n = nr_elems / num_possible_cpus(); + m = nr_elems % num_possible_cpus(); + cpu_idx = 0; for_each_possible_cpu(cpu) { -again: head = per_cpu_ptr(s->freelist, cpu); - /* No locking required as this is not visible yet. */ - pcpu_freelist_push_node(head, buf); - i++; - buf += elem_size; - if (i == nr_elems) - break; - if (i % pcpu_entries) - goto again; + j = n + (cpu_idx < m ? 1 : 0); + for (i = 0; i < j; i++) { + /* No locking required as this is not visible yet. */ + pcpu_freelist_push_node(head, buf); + buf += elem_size; + } + cpu_idx++; } } diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 4575d2d60cb1..0c5bf98d5576 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -121,8 +121,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) return ERR_PTR(-E2BIG); cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); - cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); - err = bpf_map_charge_init(&mem, cost); + err = bpf_map_charge_init(&mem, cost + attr->max_entries * + (sizeof(struct stack_map_bucket) + (u64)value_size)); if (err) return ERR_PTR(err); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 0c45e247f5e1..589332936c53 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3917,7 +3917,9 @@ static int bpf_task_fd_query(const union bpf_attr *attr, if (attr->task_fd_query.flags != 0) return -EINVAL; + rcu_read_lock(); task = get_pid_task(find_vpid(pid), PIDTYPE_PID); + rcu_read_unlock(); if (!task) return -ENOENT; diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index ac34532150f7..3988776e1628 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -381,7 +381,7 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) { enum bpf_tramp_prog_type kind; int err = 0; - int cnt; + int cnt = 0, i; kind = bpf_attach_type_to_tramp(prog); mutex_lock(&tr->mutex); @@ -392,7 +392,10 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) err = -EBUSY; goto out; } - cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT]; + + for (i = 0; i < BPF_TRAMP_MAX; i++) + cnt += tr->progs_cnt[i]; + if (kind == BPF_TRAMP_REPLACE) { /* Cannot attach extension if fentry/fexit are in use. */ if (cnt) { @@ -470,16 +473,19 @@ out: void bpf_trampoline_put(struct bpf_trampoline *tr) { + int i; + if (!tr) return; mutex_lock(&trampoline_mutex); if (!refcount_dec_and_test(&tr->refcnt)) goto out; WARN_ON_ONCE(mutex_is_locked(&tr->mutex)); - if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY]))) - goto out; - if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT]))) - goto out; + + for (i = 0; i < BPF_TRAMP_MAX; i++) + if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i]))) + goto out; + /* This code will be executed even when the last bpf_tramp_image * is alive. All progs are detached from the trampoline and the * trampoline image is patched with jmp into epilogue to skip diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index cec01739a47c..a276f3d057f2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1249,6 +1249,21 @@ static void __reg_bound_offset(struct bpf_reg_state *reg) reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); } +static void reg_bounds_sync(struct bpf_reg_state *reg) +{ + /* We might have learned new bounds from the var_off. */ + __update_reg_bounds(reg); + /* We might have learned something about the sign bit. */ + __reg_deduce_bounds(reg); + /* We might have learned some bits from the bounds. */ + __reg_bound_offset(reg); + /* Intersecting with the old var_off might have improved our bounds + * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), + * then new var_off is (0; 0x7f...fc) which improves our umax. + */ + __update_reg_bounds(reg); +} + static bool __reg32_bound_s64(s32 a) { return a >= 0 && a <= S32_MAX; @@ -1290,16 +1305,8 @@ static void __reg_combine_32_into_64(struct bpf_reg_state *reg) * so they do not impact tnum bounds calculation. */ __mark_reg64_unbounded(reg); - __update_reg_bounds(reg); } - - /* Intersecting with the old var_off might have improved our bounds - * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), - * then new var_off is (0; 0x7f...fc) which improves our umax. - */ - __reg_deduce_bounds(reg); - __reg_bound_offset(reg); - __update_reg_bounds(reg); + reg_bounds_sync(reg); } static bool __reg64_bound_s32(s64 a) @@ -1315,7 +1322,6 @@ static bool __reg64_bound_u32(u64 a) static void __reg_combine_64_into_32(struct bpf_reg_state *reg) { __mark_reg32_unbounded(reg); - if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) { reg->s32_min_value = (s32)reg->smin_value; reg->s32_max_value = (s32)reg->smax_value; @@ -1324,14 +1330,7 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg) reg->u32_min_value = (u32)reg->umin_value; reg->u32_max_value = (u32)reg->umax_value; } - - /* Intersecting with the old var_off might have improved our bounds - * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), - * then new var_off is (0; 0x7f...fc) which improves our umax. - */ - __reg_deduce_bounds(reg); - __reg_bound_offset(reg); - __update_reg_bounds(reg); + reg_bounds_sync(reg); } /* Mark a register as having a completely unknown (scalar) value. */ @@ -2979,7 +2978,9 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, regno); return -EACCES; } - err = __check_mem_access(env, regno, off, size, reg->range, + + err = reg->range < 0 ? -EINVAL : + __check_mem_access(env, regno, off, size, reg->range, zero_size_allowed); if (err) { verbose(env, "R%d offset is outside of the packet\n", regno); @@ -4992,50 +4993,41 @@ static int check_func_proto(const struct bpf_func_proto *fn, int func_id) /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] * are now invalid, so turn them into unknown SCALAR_VALUE. */ -static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, - struct bpf_func_state *state) -{ - struct bpf_reg_state *regs = state->regs, *reg; - int i; - - for (i = 0; i < MAX_BPF_REG; i++) - if (reg_is_pkt_pointer_any(®s[i])) - mark_reg_unknown(env, regs, i); - - bpf_for_each_spilled_reg(i, state, reg) { - if (!reg) - continue; - if (reg_is_pkt_pointer_any(reg)) - __mark_reg_unknown(env, reg); - } -} - static void clear_all_pkt_pointers(struct bpf_verifier_env *env) { - struct bpf_verifier_state *vstate = env->cur_state; - int i; + struct bpf_func_state *state; + struct bpf_reg_state *reg; - for (i = 0; i <= vstate->curframe; i++) - __clear_all_pkt_pointers(env, vstate->frame[i]); + bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ + if (reg_is_pkt_pointer_any(reg)) + __mark_reg_unknown(env, reg); + })); } -static void release_reg_references(struct bpf_verifier_env *env, - struct bpf_func_state *state, - int ref_obj_id) +enum { + AT_PKT_END = -1, + BEYOND_PKT_END = -2, +}; + +static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open) { - struct bpf_reg_state *regs = state->regs, *reg; - int i; + struct bpf_func_state *state = vstate->frame[vstate->curframe]; + struct bpf_reg_state *reg = &state->regs[regn]; - for (i = 0; i < MAX_BPF_REG; i++) - if (regs[i].ref_obj_id == ref_obj_id) - mark_reg_unknown(env, regs, i); + if (reg->type != PTR_TO_PACKET) + /* PTR_TO_PACKET_META is not supported yet */ + return; - bpf_for_each_spilled_reg(i, state, reg) { - if (!reg) - continue; - if (reg->ref_obj_id == ref_obj_id) - __mark_reg_unknown(env, reg); - } + /* The 'reg' is pkt > pkt_end or pkt >= pkt_end. + * How far beyond pkt_end it goes is unknown. + * if (!range_open) it's the case of pkt >= pkt_end + * if (range_open) it's the case of pkt > pkt_end + * hence this pointer is at least 1 byte bigger than pkt_end + */ + if (range_open) + reg->range = BEYOND_PKT_END; + else + reg->range = AT_PKT_END; } /* The pointer with the specified id has released its reference to kernel @@ -5044,16 +5036,22 @@ static void release_reg_references(struct bpf_verifier_env *env, static int release_reference(struct bpf_verifier_env *env, int ref_obj_id) { - struct bpf_verifier_state *vstate = env->cur_state; + struct bpf_func_state *state; + struct bpf_reg_state *reg; int err; - int i; err = release_reference_state(cur_func(env), ref_obj_id); if (err) return err; - for (i = 0; i <= vstate->curframe; i++) - release_reg_references(env, vstate->frame[i], ref_obj_id); + bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ + if (reg->ref_obj_id == ref_obj_id) { + if (!env->allow_ptr_leaks) + __mark_reg_not_init(env, reg); + else + __mark_reg_unknown(env, reg); + } + })); return 0; } @@ -5230,9 +5228,7 @@ static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, ret_reg->s32_max_value = meta->msize_max_value; ret_reg->smin_value = -MAX_ERRNO; ret_reg->s32_min_value = -MAX_ERRNO; - __reg_deduce_bounds(ret_reg); - __reg_bound_offset(ret_reg); - __update_reg_bounds(ret_reg); + reg_bounds_sync(ret_reg); } static int @@ -5285,8 +5281,7 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; struct bpf_reg_state *regs = cur_regs(env), *reg; struct bpf_map *map = meta->map_ptr; - struct tnum range; - u64 val; + u64 val, max; int err; if (func_id != BPF_FUNC_tail_call) @@ -5296,10 +5291,11 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, return -EINVAL; } - range = tnum_range(0, map->max_entries - 1); reg = ®s[BPF_REG_3]; + val = reg->var_off.value; + max = map->max_entries; - if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) { + if (!(register_is_const(reg) && val < max)) { bpf_map_key_store(aux, BPF_MAP_KEY_POISON); return 0; } @@ -5307,8 +5303,6 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, err = mark_chain_precision(env, BPF_REG_3); if (err) return err; - - val = reg->var_off.value; if (bpf_map_key_unseen(aux)) bpf_map_key_store(aux, val); else if (!bpf_map_key_poisoned(aux) && @@ -6197,11 +6191,7 @@ reject: if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) return -EINVAL; - - __update_reg_bounds(dst_reg); - __reg_deduce_bounds(dst_reg); - __reg_bound_offset(dst_reg); - + reg_bounds_sync(dst_reg); if (sanitize_check_bounds(env, insn, dst_reg) < 0) return -EACCES; if (sanitize_needed(opcode)) { @@ -6939,10 +6929,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, /* ALU32 ops are zero extended into 64bit register */ if (alu32) zext_32_to_64(dst_reg); - - __update_reg_bounds(dst_reg); - __reg_deduce_bounds(dst_reg); - __reg_bound_offset(dst_reg); + reg_bounds_sync(dst_reg); return 0; } @@ -7131,10 +7118,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) insn->dst_reg); } zext_32_to_64(dst_reg); - - __update_reg_bounds(dst_reg); - __reg_deduce_bounds(dst_reg); - __reg_bound_offset(dst_reg); + reg_bounds_sync(dst_reg); } } else { /* case: R = imm @@ -7206,35 +7190,14 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) return 0; } -static void __find_good_pkt_pointers(struct bpf_func_state *state, - struct bpf_reg_state *dst_reg, - enum bpf_reg_type type, u16 new_range) -{ - struct bpf_reg_state *reg; - int i; - - for (i = 0; i < MAX_BPF_REG; i++) { - reg = &state->regs[i]; - if (reg->type == type && reg->id == dst_reg->id) - /* keep the maximum range already checked */ - reg->range = max(reg->range, new_range); - } - - bpf_for_each_spilled_reg(i, state, reg) { - if (!reg) - continue; - if (reg->type == type && reg->id == dst_reg->id) - reg->range = max(reg->range, new_range); - } -} - static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, struct bpf_reg_state *dst_reg, enum bpf_reg_type type, bool range_right_open) { - u16 new_range; - int i; + struct bpf_func_state *state; + struct bpf_reg_state *reg; + int new_range; if (dst_reg->off < 0 || (dst_reg->off == 0 && range_right_open)) @@ -7299,9 +7262,11 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, * the range won't allow anything. * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. */ - for (i = 0; i <= vstate->curframe; i++) - __find_good_pkt_pointers(vstate->frame[i], dst_reg, type, - new_range); + bpf_for_each_reg_in_vstate(vstate, state, reg, ({ + if (reg->type == type && reg->id == dst_reg->id) + /* keep the maximum range already checked */ + reg->range = max(reg->range, new_range); + })); } static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode) @@ -7485,6 +7450,67 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, return is_branch64_taken(reg, val, opcode); } +static int flip_opcode(u32 opcode) +{ + /* How can we transform "a b" into "b a"? */ + static const u8 opcode_flip[16] = { + /* these stay the same */ + [BPF_JEQ >> 4] = BPF_JEQ, + [BPF_JNE >> 4] = BPF_JNE, + [BPF_JSET >> 4] = BPF_JSET, + /* these swap "lesser" and "greater" (L and G in the opcodes) */ + [BPF_JGE >> 4] = BPF_JLE, + [BPF_JGT >> 4] = BPF_JLT, + [BPF_JLE >> 4] = BPF_JGE, + [BPF_JLT >> 4] = BPF_JGT, + [BPF_JSGE >> 4] = BPF_JSLE, + [BPF_JSGT >> 4] = BPF_JSLT, + [BPF_JSLE >> 4] = BPF_JSGE, + [BPF_JSLT >> 4] = BPF_JSGT + }; + return opcode_flip[opcode >> 4]; +} + +static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg, + struct bpf_reg_state *src_reg, + u8 opcode) +{ + struct bpf_reg_state *pkt; + + if (src_reg->type == PTR_TO_PACKET_END) { + pkt = dst_reg; + } else if (dst_reg->type == PTR_TO_PACKET_END) { + pkt = src_reg; + opcode = flip_opcode(opcode); + } else { + return -1; + } + + if (pkt->range >= 0) + return -1; + + switch (opcode) { + case BPF_JLE: + /* pkt <= pkt_end */ + fallthrough; + case BPF_JGT: + /* pkt > pkt_end */ + if (pkt->range == BEYOND_PKT_END) + /* pkt has at last one extra byte beyond pkt_end */ + return opcode == BPF_JGT; + break; + case BPF_JLT: + /* pkt < pkt_end */ + fallthrough; + case BPF_JGE: + /* pkt >= pkt_end */ + if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END) + return opcode == BPF_JGE; + break; + } + return -1; +} + /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. @@ -7512,26 +7538,33 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, return; switch (opcode) { + /* JEQ/JNE comparison doesn't change the register equivalence. + * + * r1 = r2; + * if (r1 == 42) goto label; + * ... + * label: // here both r1 and r2 are known to be 42. + * + * Hence when marking register as known preserve it's ID. + */ case BPF_JEQ: - case BPF_JNE: - { - struct bpf_reg_state *reg = - opcode == BPF_JEQ ? true_reg : false_reg; - - /* JEQ/JNE comparison doesn't change the register equivalence. - * r1 = r2; - * if (r1 == 42) goto label; - * ... - * label: // here both r1 and r2 are known to be 42. - * - * Hence when marking register as known preserve it's ID. - */ - if (is_jmp32) - __mark_reg32_known(reg, val32); - else - ___mark_reg_known(reg, val); + if (is_jmp32) { + __mark_reg32_known(true_reg, val32); + true_32off = tnum_subreg(true_reg->var_off); + } else { + ___mark_reg_known(true_reg, val); + true_64off = true_reg->var_off; + } + break; + case BPF_JNE: + if (is_jmp32) { + __mark_reg32_known(false_reg, val32); + false_32off = tnum_subreg(false_reg->var_off); + } else { + ___mark_reg_known(false_reg, val); + false_64off = false_reg->var_off; + } break; - } case BPF_JSET: if (is_jmp32) { false_32off = tnum_and(false_32off, tnum_const(~val32)); @@ -7648,23 +7681,7 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, u64 val, u32 val32, u8 opcode, bool is_jmp32) { - /* How can we transform "a b" into "b a"? */ - static const u8 opcode_flip[16] = { - /* these stay the same */ - [BPF_JEQ >> 4] = BPF_JEQ, - [BPF_JNE >> 4] = BPF_JNE, - [BPF_JSET >> 4] = BPF_JSET, - /* these swap "lesser" and "greater" (L and G in the opcodes) */ - [BPF_JGE >> 4] = BPF_JLE, - [BPF_JGT >> 4] = BPF_JLT, - [BPF_JLE >> 4] = BPF_JGE, - [BPF_JLT >> 4] = BPF_JGT, - [BPF_JSGE >> 4] = BPF_JSLE, - [BPF_JSGT >> 4] = BPF_JSLT, - [BPF_JSLE >> 4] = BPF_JSGE, - [BPF_JSLT >> 4] = BPF_JSGT - }; - opcode = opcode_flip[opcode >> 4]; + opcode = flip_opcode(opcode); /* This uses zero as "not present in table"; luckily the zero opcode, * BPF_JA, can't get here. */ @@ -7686,21 +7703,8 @@ static void __reg_combine_min_max(struct bpf_reg_state *src_reg, dst_reg->smax_value); src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, dst_reg->var_off); - /* We might have learned new bounds from the var_off. */ - __update_reg_bounds(src_reg); - __update_reg_bounds(dst_reg); - /* We might have learned something about the sign bit. */ - __reg_deduce_bounds(src_reg); - __reg_deduce_bounds(dst_reg); - /* We might have learned some bits from the bounds. */ - __reg_bound_offset(src_reg); - __reg_bound_offset(dst_reg); - /* Intersecting with the old var_off might have improved our bounds - * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), - * then new var_off is (0; 0x7f...fc) which improves our umax. - */ - __update_reg_bounds(src_reg); - __update_reg_bounds(dst_reg); + reg_bounds_sync(src_reg); + reg_bounds_sync(dst_reg); } static void reg_combine_min_max(struct bpf_reg_state *true_src, @@ -7775,7 +7779,7 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, reg->ref_obj_id = 0; } else if (!reg_may_point_to_spin_lock(reg)) { /* For not-NULL ptr, reg->ref_obj_id will be reset - * in release_reg_references(). + * in release_reference(). * * reg->id is still used by spin_lock ptr. Other * than spin_lock ptr type, reg->id can be reset. @@ -7785,22 +7789,6 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, } } -static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id, - bool is_null) -{ - struct bpf_reg_state *reg; - int i; - - for (i = 0; i < MAX_BPF_REG; i++) - mark_ptr_or_null_reg(state, &state->regs[i], id, is_null); - - bpf_for_each_spilled_reg(i, state, reg) { - if (!reg) - continue; - mark_ptr_or_null_reg(state, reg, id, is_null); - } -} - /* The logic is similar to find_good_pkt_pointers(), both could eventually * be folded together at some point. */ @@ -7808,10 +7796,9 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, bool is_null) { struct bpf_func_state *state = vstate->frame[vstate->curframe]; - struct bpf_reg_state *regs = state->regs; + struct bpf_reg_state *regs = state->regs, *reg; u32 ref_obj_id = regs[regno].ref_obj_id; u32 id = regs[regno].id; - int i; if (ref_obj_id && ref_obj_id == id && is_null) /* regs[regno] is in the " == NULL" branch. @@ -7820,8 +7807,9 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, */ WARN_ON_ONCE(release_reference_state(state, id)); - for (i = 0; i <= vstate->curframe; i++) - __mark_ptr_or_null_regs(vstate->frame[i], id, is_null); + bpf_for_each_reg_in_vstate(vstate, state, reg, ({ + mark_ptr_or_null_reg(state, reg, id, is_null); + })); } static bool try_match_pkt_pointers(const struct bpf_insn *insn, @@ -7846,6 +7834,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, false); + mark_pkt_end(other_branch, insn->dst_reg, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && @@ -7853,6 +7842,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, /* pkt_end > pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, true); + mark_pkt_end(this_branch, insn->src_reg, false); } else { return false; } @@ -7865,6 +7855,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, true); + mark_pkt_end(this_branch, insn->dst_reg, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && @@ -7872,6 +7863,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, /* pkt_end < pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, false); + mark_pkt_end(other_branch, insn->src_reg, true); } else { return false; } @@ -7884,6 +7876,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, true); + mark_pkt_end(other_branch, insn->dst_reg, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && @@ -7891,6 +7884,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, false); + mark_pkt_end(this_branch, insn->src_reg, true); } else { return false; } @@ -7903,6 +7897,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, false); + mark_pkt_end(this_branch, insn->dst_reg, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && @@ -7910,6 +7905,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, true); + mark_pkt_end(other_branch, insn->src_reg, false); } else { return false; } @@ -7926,23 +7922,11 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate, { struct bpf_func_state *state; struct bpf_reg_state *reg; - int i, j; - for (i = 0; i <= vstate->curframe; i++) { - state = vstate->frame[i]; - for (j = 0; j < MAX_BPF_REG; j++) { - reg = &state->regs[j]; - if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) - *reg = *known_reg; - } - - bpf_for_each_spilled_reg(j, state, reg) { - if (!reg) - continue; - if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) - *reg = *known_reg; - } - } + bpf_for_each_reg_in_vstate(vstate, state, reg, ({ + if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) + *reg = *known_reg; + })); } static int check_cond_jmp_op(struct bpf_verifier_env *env, @@ -8009,6 +7993,10 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, src_reg->var_off.value, opcode, is_jmp32); + } else if (reg_is_pkt_pointer_any(dst_reg) && + reg_is_pkt_pointer_any(src_reg) && + !is_jmp32) { + pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode); } if (pred >= 0) { @@ -8017,7 +8005,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, */ if (!__is_pointer_value(false, dst_reg)) err = mark_chain_precision(env, insn->dst_reg); - if (BPF_SRC(insn->code) == BPF_X && !err) + if (BPF_SRC(insn->code) == BPF_X && !err && + !__is_pointer_value(false, src_reg)) err = mark_chain_precision(env, insn->src_reg); if (err) return err; @@ -11171,6 +11160,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) /* the btf and func_info will be freed only at prog->aux */ func[i]->aux->btf = prog->aux->btf; func[i]->aux->func_info = prog->aux->func_info; + func[i]->aux->func_info_cnt = prog->aux->func_info_cnt; for (j = 0; j < prog->aux->size_poke_tab; j++) { u32 insn_idx = prog->aux->poke_tab[j].insn_idx; @@ -11197,9 +11187,6 @@ static int jit_subprogs(struct bpf_verifier_env *env) } } - /* Use bpf_prog_F_tag to indicate functions in stack traces. - * Long term would need debug info to populate names - */ func[i]->aux->name[0] = 'F'; func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; func[i]->jit_requested = 1; diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index 6aca805a25c4..4307a0904295 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -169,7 +169,6 @@ extern struct mutex cgroup_mutex; extern spinlock_t css_set_lock; extern struct cgroup_subsys *cgroup_subsys[]; extern struct list_head cgroup_roots; -extern struct file_system_type cgroup_fs_type; /* iterate across the hierarchies */ #define for_each_root(root) \ diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 185fbb6a0011..ffcdf33deb92 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -58,6 +58,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) int retval = 0; mutex_lock(&cgroup_mutex); + cpus_read_lock(); percpu_down_write(&cgroup_threadgroup_rwsem); for_each_root(root) { struct cgroup *from_cgrp; @@ -74,6 +75,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) break; } percpu_up_write(&cgroup_threadgroup_rwsem); + cpus_read_unlock(); mutex_unlock(&cgroup_mutex); return retval; @@ -506,10 +508,11 @@ static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of, goto out_unlock; /* - * Even if we're attaching all tasks in the thread group, we only - * need to check permissions on one of them. + * Even if we're attaching all tasks in the thread group, we only need + * to check permissions on one of them. Check permissions using the + * credentials from file open to protect against inherited fd attacks. */ - cred = current_cred(); + cred = of->file->f_cred; tcred = get_task_cred(task); if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && !uid_eq(cred->euid, tcred->uid) && diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index c1e1a5c34e77..f08f898ea929 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -746,25 +746,28 @@ EXPORT_SYMBOL_GPL(of_css); * reference-counted, to improve performance when child cgroups * haven't been created. */ -struct css_set init_css_set = { - .refcount = REFCOUNT_INIT(1), - .dom_cset = &init_css_set, - .tasks = LIST_HEAD_INIT(init_css_set.tasks), - .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks), - .dying_tasks = LIST_HEAD_INIT(init_css_set.dying_tasks), - .task_iters = LIST_HEAD_INIT(init_css_set.task_iters), - .threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets), - .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), - .mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node), - .mg_node = LIST_HEAD_INIT(init_css_set.mg_node), - - /* - * The following field is re-initialized when this cset gets linked - * in cgroup_init(). However, let's initialize the field - * statically too so that the default cgroup can be accessed safely - * early during boot. - */ - .dfl_cgrp = &cgrp_dfl_root.cgrp, +struct ext_css_set init_ext_css_set = { + .cset = { + .refcount = REFCOUNT_INIT(1), + .dom_cset = &init_css_set, + .tasks = LIST_HEAD_INIT(init_css_set.tasks), + .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks), + .dying_tasks = LIST_HEAD_INIT(init_css_set.dying_tasks), + .task_iters = LIST_HEAD_INIT(init_css_set.task_iters), + .threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets), + .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), + .mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node), + .mg_node = LIST_HEAD_INIT(init_css_set.mg_node), + /* + * The following field is re-initialized when this cset gets linked + * in cgroup_init(). However, let's initialize the field + * statically too so that the default cgroup can be accessed safely + * early during boot. + */ + .dfl_cgrp = &cgrp_dfl_root.cgrp, + }, + .mg_src_preload_node = LIST_HEAD_INIT(init_ext_css_set.mg_src_preload_node), + .mg_dst_preload_node = LIST_HEAD_INIT(init_ext_css_set.mg_dst_preload_node), }; static int css_set_count = 1; /* 1 for init_css_set */ @@ -1191,6 +1194,7 @@ static struct css_set *find_css_set(struct css_set *old_cset, struct cgroup *cgrp) { struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { }; + struct ext_css_set *ext_cset; struct css_set *cset; struct list_head tmp_links; struct cgrp_cset_link *link; @@ -1211,9 +1215,10 @@ static struct css_set *find_css_set(struct css_set *old_cset, if (cset) return cset; - cset = kzalloc(sizeof(*cset), GFP_KERNEL); - if (!cset) + ext_cset = kzalloc(sizeof(*ext_cset), GFP_KERNEL); + if (!ext_cset) return NULL; + cset = &ext_cset->cset; /* Allocate all the cgrp_cset_link objects that we'll need */ if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) { @@ -1231,6 +1236,8 @@ static struct css_set *find_css_set(struct css_set *old_cset, INIT_HLIST_NODE(&cset->hlist); INIT_LIST_HEAD(&cset->cgrp_links); INIT_LIST_HEAD(&cset->mg_preload_node); + INIT_LIST_HEAD(&ext_cset->mg_src_preload_node); + INIT_LIST_HEAD(&ext_cset->mg_dst_preload_node); INIT_LIST_HEAD(&cset->mg_node); /* Copy the set of subsystem state objects generated in @@ -2321,6 +2328,47 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) } EXPORT_SYMBOL_GPL(task_cgroup_path); +/** + * cgroup_attach_lock - Lock for ->attach() + * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem + * + * cgroup migration sometimes needs to stabilize threadgroups against forks and + * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach() + * implementations (e.g. cpuset), also need to disable CPU hotplug. + * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can + * lead to deadlocks. + * + * Bringing up a CPU may involve creating and destroying tasks which requires + * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside + * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while + * write-locking threadgroup_rwsem, the locking order is reversed and we end up + * waiting for an on-going CPU hotplug operation which in turn is waiting for + * the threadgroup_rwsem to be released to create new tasks. For more details: + * + * http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu + * + * Resolve the situation by always acquiring cpus_read_lock() before optionally + * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that + * CPU hotplug is disabled on entry. + */ +static void cgroup_attach_lock(bool lock_threadgroup) +{ + cpus_read_lock(); + if (lock_threadgroup) + percpu_down_write(&cgroup_threadgroup_rwsem); +} + +/** + * cgroup_attach_unlock - Undo cgroup_attach_lock() + * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem + */ +static void cgroup_attach_unlock(bool lock_threadgroup) +{ + if (lock_threadgroup) + percpu_up_write(&cgroup_threadgroup_rwsem); + cpus_read_unlock(); +} + /** * cgroup_migrate_add_task - add a migration target task to a migration context * @task: target task @@ -2578,22 +2626,28 @@ int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp) */ void cgroup_migrate_finish(struct cgroup_mgctx *mgctx) { - LIST_HEAD(preloaded); - struct css_set *cset, *tmp_cset; + struct ext_css_set *cset, *tmp_cset; lockdep_assert_held(&cgroup_mutex); spin_lock_irq(&css_set_lock); - list_splice_tail_init(&mgctx->preloaded_src_csets, &preloaded); - list_splice_tail_init(&mgctx->preloaded_dst_csets, &preloaded); + list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_src_csets, + mg_src_preload_node) { + cset->cset.mg_src_cgrp = NULL; + cset->cset.mg_dst_cgrp = NULL; + cset->cset.mg_dst_cset = NULL; + list_del_init(&cset->mg_src_preload_node); + put_css_set_locked(&cset->cset); + } - list_for_each_entry_safe(cset, tmp_cset, &preloaded, mg_preload_node) { - cset->mg_src_cgrp = NULL; - cset->mg_dst_cgrp = NULL; - cset->mg_dst_cset = NULL; - list_del_init(&cset->mg_preload_node); - put_css_set_locked(cset); + list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_dst_csets, + mg_dst_preload_node) { + cset->cset.mg_src_cgrp = NULL; + cset->cset.mg_dst_cgrp = NULL; + cset->cset.mg_dst_cset = NULL; + list_del_init(&cset->mg_dst_preload_node); + put_css_set_locked(&cset->cset); } spin_unlock_irq(&css_set_lock); @@ -2620,6 +2674,7 @@ void cgroup_migrate_add_src(struct css_set *src_cset, struct cgroup_mgctx *mgctx) { struct cgroup *src_cgrp; + struct ext_css_set *ext_src_cset; lockdep_assert_held(&cgroup_mutex); lockdep_assert_held(&css_set_lock); @@ -2633,8 +2688,9 @@ void cgroup_migrate_add_src(struct css_set *src_cset, return; src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root); + ext_src_cset = container_of(src_cset, struct ext_css_set, cset); - if (!list_empty(&src_cset->mg_preload_node)) + if (!list_empty(&ext_src_cset->mg_src_preload_node)) return; WARN_ON(src_cset->mg_src_cgrp); @@ -2645,7 +2701,7 @@ void cgroup_migrate_add_src(struct css_set *src_cset, src_cset->mg_src_cgrp = src_cgrp; src_cset->mg_dst_cgrp = dst_cgrp; get_css_set(src_cset); - list_add_tail(&src_cset->mg_preload_node, &mgctx->preloaded_src_csets); + list_add_tail(&ext_src_cset->mg_src_preload_node, &mgctx->preloaded_src_csets); } /** @@ -2664,20 +2720,23 @@ void cgroup_migrate_add_src(struct css_set *src_cset, */ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) { - struct css_set *src_cset, *tmp_cset; + struct ext_css_set *ext_src_set, *tmp_cset; lockdep_assert_held(&cgroup_mutex); /* look up the dst cset for each src cset and link it to src */ - list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets, - mg_preload_node) { + list_for_each_entry_safe(ext_src_set, tmp_cset, &mgctx->preloaded_src_csets, + mg_src_preload_node) { + struct css_set *src_cset = &ext_src_set->cset; struct css_set *dst_cset; + struct ext_css_set *ext_dst_cset; struct cgroup_subsys *ss; int ssid; dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp); if (!dst_cset) return -ENOMEM; + ext_dst_cset = container_of(dst_cset, struct ext_css_set, cset); WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset); @@ -2689,7 +2748,7 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) if (src_cset == dst_cset) { src_cset->mg_src_cgrp = NULL; src_cset->mg_dst_cgrp = NULL; - list_del_init(&src_cset->mg_preload_node); + list_del_init(&ext_src_set->mg_src_preload_node); put_css_set(src_cset); put_css_set(dst_cset); continue; @@ -2697,8 +2756,8 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) src_cset->mg_dst_cset = dst_cset; - if (list_empty(&dst_cset->mg_preload_node)) - list_add_tail(&dst_cset->mg_preload_node, + if (list_empty(&ext_dst_cset->mg_dst_preload_node)) + list_add_tail(&ext_dst_cset->mg_dst_preload_node, &mgctx->preloaded_dst_csets); else put_css_set(dst_cset); @@ -2794,9 +2853,8 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, } struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, - bool *locked, + bool *threadgroup_locked, struct cgroup *dst_cgrp) - __acquires(&cgroup_threadgroup_rwsem) { struct task_struct *tsk; pid_t pid; @@ -2814,12 +2872,8 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, * Therefore, we can skip the global lock. */ lockdep_assert_held(&cgroup_mutex); - if (pid || threadgroup) { - percpu_down_write(&cgroup_threadgroup_rwsem); - *locked = true; - } else { - *locked = false; - } + *threadgroup_locked = pid || threadgroup; + cgroup_attach_lock(*threadgroup_locked); rcu_read_lock(); if (pid) { @@ -2853,17 +2907,14 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, goto out_unlock_rcu; out_unlock_threadgroup: - if (*locked) { - percpu_up_write(&cgroup_threadgroup_rwsem); - *locked = false; - } + cgroup_attach_unlock(*threadgroup_locked); + *threadgroup_locked = false; out_unlock_rcu: rcu_read_unlock(); return tsk; } -void cgroup_procs_write_finish(struct task_struct *task, bool locked) - __releases(&cgroup_threadgroup_rwsem) +void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked) { struct cgroup_subsys *ss; int ssid; @@ -2871,8 +2922,8 @@ void cgroup_procs_write_finish(struct task_struct *task, bool locked) /* release reference from cgroup_procs_write_start() */ put_task_struct(task); - if (locked) - percpu_up_write(&cgroup_threadgroup_rwsem); + cgroup_attach_unlock(threadgroup_locked); + for_each_subsys(ss, ssid) if (ss->post_attach) ss->post_attach(); @@ -2926,13 +2977,12 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) DEFINE_CGROUP_MGCTX(mgctx); struct cgroup_subsys_state *d_css; struct cgroup *dsct; - struct css_set *src_cset; + struct ext_css_set *ext_src_set; + bool has_tasks; int ret; lockdep_assert_held(&cgroup_mutex); - percpu_down_write(&cgroup_threadgroup_rwsem); - /* look up all csses currently attached to @cgrp's subtree */ spin_lock_irq(&css_set_lock); cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { @@ -2943,17 +2993,27 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) } spin_unlock_irq(&css_set_lock); + /* + * We need to write-lock threadgroup_rwsem while migrating tasks. + * However, if there are no source csets for @cgrp, changing its + * controllers isn't gonna produce any task migrations and the + * write-locking can be skipped safely. + */ + has_tasks = !list_empty(&mgctx.preloaded_src_csets); + cgroup_attach_lock(has_tasks); + /* NULL dst indicates self on default hierarchy */ ret = cgroup_migrate_prepare_dst(&mgctx); if (ret) goto out_finish; spin_lock_irq(&css_set_lock); - list_for_each_entry(src_cset, &mgctx.preloaded_src_csets, mg_preload_node) { + list_for_each_entry(ext_src_set, &mgctx.preloaded_src_csets, + mg_src_preload_node) { struct task_struct *task, *ntask; /* all tasks in src_csets need to be migrated */ - list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list) + list_for_each_entry_safe(task, ntask, &ext_src_set->cset.tasks, cg_list) cgroup_migrate_add_task(task, &mgctx); } spin_unlock_irq(&css_set_lock); @@ -2961,7 +3021,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) ret = cgroup_migrate_execute(&mgctx); out_finish: cgroup_migrate_finish(&mgctx); - percpu_up_write(&cgroup_threadgroup_rwsem); + cgroup_attach_unlock(has_tasks); return ret; } @@ -4834,14 +4894,15 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of, struct cgroup_file_ctx *ctx = of->priv; struct cgroup *src_cgrp, *dst_cgrp; struct task_struct *task; + const struct cred *saved_cred; ssize_t ret; - bool locked; + bool threadgroup_locked; dst_cgrp = cgroup_kn_lock_live(of->kn, false); if (!dst_cgrp) return -ENODEV; - task = cgroup_procs_write_start(buf, true, &locked, dst_cgrp); + task = cgroup_procs_write_start(buf, true, &threadgroup_locked, dst_cgrp); ret = PTR_ERR_OR_ZERO(task); if (ret) goto out_unlock; @@ -4851,16 +4912,23 @@ static ssize_t cgroup_procs_write(struct kernfs_open_file *of, src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); spin_unlock_irq(&css_set_lock); + /* + * Process and thread migrations follow same delegation rule. Check + * permissions using the credentials from file open to protect against + * inherited fd attacks. + */ + saved_cred = override_creds(of->file->f_cred); ret = cgroup_attach_permissions(src_cgrp, dst_cgrp, of->file->f_path.dentry->d_sb, true, ctx->ns); + revert_creds(saved_cred); if (ret) goto out_finish; ret = cgroup_attach_task(dst_cgrp, task, true); out_finish: - cgroup_procs_write_finish(task, locked); + cgroup_procs_write_finish(task, threadgroup_locked); out_unlock: cgroup_kn_unlock(of->kn); @@ -4878,8 +4946,9 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of, struct cgroup_file_ctx *ctx = of->priv; struct cgroup *src_cgrp, *dst_cgrp; struct task_struct *task; + const struct cred *saved_cred; ssize_t ret; - bool locked; + bool threadgroup_locked; buf = strstrip(buf); @@ -4887,7 +4956,7 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of, if (!dst_cgrp) return -ENODEV; - task = cgroup_procs_write_start(buf, false, &locked, dst_cgrp); + task = cgroup_procs_write_start(buf, false, &threadgroup_locked, dst_cgrp); ret = PTR_ERR_OR_ZERO(task); if (ret) goto out_unlock; @@ -4897,17 +4966,23 @@ static ssize_t cgroup_threads_write(struct kernfs_open_file *of, src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); spin_unlock_irq(&css_set_lock); - /* thread migrations follow the cgroup.procs delegation rule */ + /* + * Process and thread migrations follow same delegation rule. Check + * permissions using the credentials from file open to protect against + * inherited fd attacks. + */ + saved_cred = override_creds(of->file->f_cred); ret = cgroup_attach_permissions(src_cgrp, dst_cgrp, of->file->f_path.dentry->d_sb, false, ctx->ns); + revert_creds(saved_cred); if (ret) goto out_finish; ret = cgroup_attach_task(dst_cgrp, task, false); out_finish: - cgroup_procs_write_finish(task, locked); + cgroup_procs_write_finish(task, threadgroup_locked); out_unlock: cgroup_kn_unlock(of->kn); diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 354a3a158601..6820a0c57425 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -1083,10 +1084,18 @@ static void update_tasks_cpumask(struct cpuset *cs) { struct css_task_iter it; struct task_struct *task; + bool top_cs = cs == &top_cpuset; css_task_iter_start(&cs->css, 0, &it); - while ((task = css_task_iter_next(&it))) + while ((task = css_task_iter_next(&it))) { + /* + * Percpu kthreads in top_cpuset are ignored + */ + if (top_cs && (task->flags & PF_KTHREAD) && + kthread_is_per_cpu(task)) + continue; update_cpus_allowed(cs, task, cs->effective_cpus); + } css_task_iter_end(&it); } @@ -2042,12 +2051,7 @@ static int update_prstate(struct cpuset *cs, int new_prs) update_flag(CS_CPU_EXCLUSIVE, cs, 0); } - /* - * Update cpumask of parent's tasks except when it is the top - * cpuset as some system daemons cannot be mapped to other CPUs. - */ - if (parent != &top_cpuset) - update_tasks_cpumask(parent); + update_tasks_cpumask(parent); if (parent->child_ecpus_count) update_sibling_cpumasks(parent, cs, &tmpmask); @@ -2188,7 +2192,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset) goto out_unlock; cgroup_taskset_for_each(task, css, tset) { - ret = task_can_attach(task, cs->cpus_allowed); + ret = task_can_attach(task, cs->effective_cpus); if (ret) goto out_unlock; ret = security_task_setscheduler(task); @@ -2238,7 +2242,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) cgroup_taskset_first(tset, &css); cs = css_cs(css); - cpus_read_lock(); + lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */ mutex_lock(&cpuset_mutex); guarantee_online_mems(cs, &cpuset_attach_nodemask_to); @@ -2292,7 +2296,6 @@ static void cpuset_attach(struct cgroup_taskset *tset) wake_up(&cpuset_attach_wq); mutex_unlock(&cpuset_mutex); - cpus_read_unlock(); } /* The various types of files and directories in a cpuset file system */ @@ -3334,8 +3337,11 @@ static struct notifier_block cpuset_track_online_nodes_nb = { */ void __init cpuset_init_smp(void) { - cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); - top_cpuset.mems_allowed = node_states[N_MEMORY]; + /* + * cpus_allowd/mems_allowed set to v2 values in the initial + * cpuset_bind() call will be reset to v1 values in another + * cpuset_bind() call when v1 cpuset is mounted. + */ top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask); diff --git a/kernel/cpu.c b/kernel/cpu.c index e107dff94922..d2e4b5632fa8 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -1158,7 +1159,7 @@ int remove_cpu(unsigned int cpu) } EXPORT_SYMBOL_GPL(remove_cpu); -extern bool dl_cpu_busy(unsigned int cpu); +extern int dl_cpu_busy(int cpu, struct task_struct *p); int __pause_drain_rq(struct cpumask *cpus) { @@ -1233,7 +1234,7 @@ int pause_cpus(struct cpumask *cpus) cpumask_and(cpus, cpus, cpu_active_mask); for_each_cpu(cpu, cpus) { - if (!cpu_online(cpu) || dl_cpu_busy(cpu) || + if (!cpu_online(cpu) || dl_cpu_busy(cpu, NULL) || get_cpu_device(cpu)->offline_disabled == true) { err = -EBUSY; goto err_cpu_maps_update; @@ -1861,6 +1862,22 @@ core_initcall(cpu_hotplug_pm_sync_init); int __boot_cpu_id; +/* Horrific hacks because we can't add more to cpuhp_hp_states. */ +static int random_and_perf_prepare_fusion(unsigned int cpu) +{ +#ifdef CONFIG_PERF_EVENTS + perf_event_init_cpu(cpu); +#endif + random_prepare_cpu(cpu); + return 0; +} +static int random_and_workqueue_online_fusion(unsigned int cpu) +{ + workqueue_online_cpu(cpu); + random_online_cpu(cpu); + return 0; +} + #endif /* CONFIG_SMP */ /* Boot processor state steps */ @@ -1879,7 +1896,7 @@ static struct cpuhp_step cpuhp_hp_states[] = { }, [CPUHP_PERF_PREPARE] = { .name = "perf:prepare", - .startup.single = perf_event_init_cpu, + .startup.single = random_and_perf_prepare_fusion, .teardown.single = perf_event_exit_cpu, }, [CPUHP_WORKQUEUE_PREP] = { @@ -1995,7 +2012,7 @@ static struct cpuhp_step cpuhp_hp_states[] = { }, [CPUHP_AP_WORKQUEUE_ONLINE] = { .name = "workqueue:online", - .startup.single = workqueue_online_cpu, + .startup.single = random_and_workqueue_online_fusion, .teardown.single = workqueue_offline_cpu, }, [CPUHP_AP_RCUTREE_ONLINE] = { diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 8661eb2b1771..0f31b22abe8d 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include @@ -756,6 +757,29 @@ cpu_master_loop: continue; kgdb_connected = 0; } else { + /* + * This is a brutal way to interfere with the debugger + * and prevent gdb being used to poke at kernel memory. + * This could cause trouble if lockdown is applied when + * there is already an active gdb session. For now the + * answer is simply "don't do that". Typically lockdown + * *will* be applied before the debug core gets started + * so only developers using kgdb for fairly advanced + * early kernel debug can be biten by this. Hopefully + * they are sophisticated enough to take care of + * themselves, especially with help from the lockdown + * message printed on the console! + */ + if (security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL)) { + if (IS_ENABLED(CONFIG_KGDB_KDB)) { + /* Switch back to kdb if possible... */ + dbg_kdb_mode = 1; + continue; + } else { + /* ... otherwise just bail */ + break; + } + } error = gdb_serial_stub(ks); } diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 930ac1b25ec7..4e09fab52faf 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -45,6 +45,7 @@ #include #include #include +#include #include "kdb_private.h" #undef MODULE_PARAM_PREFIX @@ -197,10 +198,62 @@ struct task_struct *kdb_curr_task(int cpu) } /* - * Check whether the flags of the current command and the permissions - * of the kdb console has allow a command to be run. + * Update the permissions flags (kdb_cmd_enabled) to match the + * current lockdown state. + * + * Within this function the calls to security_locked_down() are "lazy". We + * avoid calling them if the current value of kdb_cmd_enabled already excludes + * flags that might be subject to lockdown. Additionally we deliberately check + * the lockdown flags independently (even though read lockdown implies write + * lockdown) since that results in both simpler code and clearer messages to + * the user on first-time debugger entry. + * + * The permission masks during a read+write lockdown permits the following + * flags: INSPECT, SIGNAL, REBOOT (and ALWAYS_SAFE). + * + * The INSPECT commands are not blocked during lockdown because they are + * not arbitrary memory reads. INSPECT covers the backtrace family (sometimes + * forcing them to have no arguments) and lsmod. These commands do expose + * some kernel state but do not allow the developer seated at the console to + * choose what state is reported. SIGNAL and REBOOT should not be controversial, + * given these are allowed for root during lockdown already. */ -static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, +static void kdb_check_for_lockdown(void) +{ + const int write_flags = KDB_ENABLE_MEM_WRITE | + KDB_ENABLE_REG_WRITE | + KDB_ENABLE_FLOW_CTRL; + const int read_flags = KDB_ENABLE_MEM_READ | + KDB_ENABLE_REG_READ; + + bool need_to_lockdown_write = false; + bool need_to_lockdown_read = false; + + if (kdb_cmd_enabled & (KDB_ENABLE_ALL | write_flags)) + need_to_lockdown_write = + security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL); + + if (kdb_cmd_enabled & (KDB_ENABLE_ALL | read_flags)) + need_to_lockdown_read = + security_locked_down(LOCKDOWN_DBG_READ_KERNEL); + + /* De-compose KDB_ENABLE_ALL if required */ + if (need_to_lockdown_write || need_to_lockdown_read) + if (kdb_cmd_enabled & KDB_ENABLE_ALL) + kdb_cmd_enabled = KDB_ENABLE_MASK & ~KDB_ENABLE_ALL; + + if (need_to_lockdown_write) + kdb_cmd_enabled &= ~write_flags; + + if (need_to_lockdown_read) + kdb_cmd_enabled &= ~read_flags; +} + +/* + * Check whether the flags of the current command, the permissions of the kdb + * console and the lockdown state allow a command to be run. + */ +static bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, bool no_args) { /* permissions comes from userspace so needs massaging slightly */ @@ -1194,6 +1247,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, kdb_curr_task(raw_smp_processor_id()); KDB_DEBUG_STATE("kdb_local 1", reason); + + kdb_check_for_lockdown(); + kdb_go_count = 0; if (reason == KDB_REASON_DEBUG) { /* special case below */ diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 6bca5077ee71..4833c6f7d3d0 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -448,7 +448,7 @@ void debug_dma_dump_mappings(struct device *dev) * other hand, consumes a single dma_debug_entry, but inserts 'nents' * entries into the tree. */ -static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); +static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC); static DEFINE_SPINLOCK(radix_lock); #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) @@ -564,7 +564,7 @@ static void add_dma_entry(struct dma_debug_entry *entry) rc = active_cacheline_insert(entry); if (rc == -ENOMEM) { - pr_err("cacheline tracking ENOMEM, dma-debug disabled\n"); + pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n"); global_disable = true; } diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 8ca84610d4d4..2be1d337a708 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -43,7 +43,6 @@ u64 dma_direct_get_required_mask(struct device *dev) return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; } -EXPORT_SYMBOL_GPL(dma_direct_get_required_mask); static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, u64 *phys_limit) @@ -191,7 +190,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, goto out_free_pages; if (force_dma_unencrypted(dev)) { err = set_memory_decrypted((unsigned long)ret, - 1 << get_order(size)); + PFN_UP(size)); if (err) goto out_free_pages; } @@ -213,7 +212,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, ret = page_address(page); if (force_dma_unencrypted(dev)) { err = set_memory_decrypted((unsigned long)ret, - 1 << get_order(size)); + PFN_UP(size)); if (err) goto out_free_pages; } @@ -234,7 +233,7 @@ done: out_encrypt_pages: if (force_dma_unencrypted(dev)) { err = set_memory_encrypted((unsigned long)page_address(page), - 1 << get_order(size)); + PFN_UP(size)); /* If memory cannot be re-encrypted, it must be leaked */ if (err) return NULL; @@ -247,8 +246,6 @@ out_free_pages: void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { - unsigned int page_order = get_order(size); - if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && !force_dma_unencrypted(dev)) { /* cpu_addr is a struct page cookie, not a kernel address */ @@ -269,7 +266,7 @@ void dma_direct_free(struct device *dev, size_t size, return; if (force_dma_unencrypted(dev)) - set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); + set_memory_encrypted((unsigned long)cpu_addr, PFN_UP(size)); if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) vunmap(cpu_addr); @@ -305,8 +302,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size, ret = page_address(page); if (force_dma_unencrypted(dev)) { - if (set_memory_decrypted((unsigned long)ret, - 1 << get_order(size))) + if (set_memory_decrypted((unsigned long)ret, PFN_UP(size))) goto out_free_pages; } memset(ret, 0, size); @@ -316,13 +312,11 @@ out_free_pages: dma_free_contiguous(dev, page, size); return NULL; } -EXPORT_SYMBOL_GPL(dma_direct_alloc); void dma_direct_free_pages(struct device *dev, size_t size, struct page *page, dma_addr_t dma_addr, enum dma_data_direction dir) { - unsigned int page_order = get_order(size); void *vaddr = page_address(page); /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ @@ -331,11 +325,10 @@ void dma_direct_free_pages(struct device *dev, size_t size, return; if (force_dma_unencrypted(dev)) - set_memory_encrypted((unsigned long)vaddr, 1 << page_order); + set_memory_encrypted((unsigned long)vaddr, PFN_UP(size)); dma_free_contiguous(dev, page, size); } -EXPORT_SYMBOL_GPL(dma_direct_free); #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ defined(CONFIG_SWIOTLB) diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h index b98615578737..c9d380318dd8 100644 --- a/kernel/dma/direct.h +++ b/kernel/dma/direct.h @@ -114,6 +114,7 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, dma_direct_sync_single_for_cpu(dev, addr, size, dir); if (unlikely(is_swiotlb_buffer(phys))) - swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs); + swiotlb_tbl_unmap_single(dev, phys, size, size, dir, + attrs | DMA_ATTR_SKIP_CPU_SYNC); } #endif /* _KERNEL_DMA_DIRECT_H */ diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c index e28e1e17eaf5..af4a6ef48ce0 100644 --- a/kernel/dma/ops_helpers.c +++ b/kernel/dma/ops_helpers.c @@ -27,7 +27,6 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); return ret; } -EXPORT_SYMBOL_GPL(dma_common_get_sgtable); /* * Create userspace mapping for the DMA-coherent memory. @@ -58,7 +57,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, return -ENXIO; #endif /* CONFIG_MMU */ } -EXPORT_SYMBOL_GPL(dma_common_mmap); struct page *dma_common_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 62b1e5fa8673..d897d161366a 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -452,7 +452,10 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr, } } -#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT)) +static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx) +{ + return start + (idx << IO_TLB_SHIFT); +} /* * Return the offset into a iotlb slot required to keep the device happy. @@ -597,10 +600,14 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, io_tlb_orig_addr[index + i] = slot_addr(orig_addr, i); tlb_addr = slot_addr(io_tlb_start, index) + offset; - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - (!(attrs & DMA_ATTR_OVERWRITE) || dir == DMA_TO_DEVICE || - dir == DMA_BIDIRECTIONAL)) - swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE); + /* + * When dir == DMA_FROM_DEVICE we could omit the copy from the orig + * to the tlb buffer, if we knew for sure the device will + * overwirte the entire current content. But we don't. Thus + * unconditional bounce may prevent leaking swiotlb content (i.e. + * kernel memory) to user-space. + */ + swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE); return tlb_addr; } @@ -727,7 +734,18 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, size_t swiotlb_max_mapping_size(struct device *dev) { - return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE; + int min_align_mask = dma_get_min_align_mask(dev); + int min_align = 0; + + /* + * swiotlb_find_slots() skips slots according to + * min align mask. This affects max mapping size. + * Take it into acount here. + */ + if (min_align_mask) + min_align = roundup(min_align_mask, IO_TLB_SIZE); + + return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align; } bool is_swiotlb_active(void) diff --git a/kernel/entry/common.c b/kernel/entry/common.c index e289e6773292..09f58853f692 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -135,7 +135,15 @@ static __always_inline void exit_to_user_mode(void) } /* Workaround to allow gradual conversion of architecture code */ -void __weak arch_do_signal(struct pt_regs *regs) { } +void __weak arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { } + +static void handle_signal_work(struct pt_regs *regs, unsigned long ti_work) +{ + if (ti_work & _TIF_NOTIFY_SIGNAL) + tracehook_notify_signal(); + + arch_do_signal_or_restart(regs, ti_work & _TIF_SIGPENDING); +} static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work) @@ -157,8 +165,8 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, if (ti_work & _TIF_PATCH_PENDING) klp_update_patch_state(current); - if (ti_work & _TIF_SIGPENDING) - arch_do_signal(regs); + if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) + handle_signal_work(regs, ti_work); if (ti_work & _TIF_NOTIFY_RESUME) { tracehook_notify_resume(regs); diff --git a/kernel/entry/kvm.c b/kernel/entry/kvm.c index 2a3139dab109..7b946847be78 100644 --- a/kernel/entry/kvm.c +++ b/kernel/entry/kvm.c @@ -8,7 +8,7 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work) do { int ret; - if (ti_work & _TIF_SIGPENDING) { + if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { kvm_handle_signal_exit(vcpu); return -EINTR; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 096799264d77..d3fc75e49792 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6175,17 +6175,17 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) again: mutex_lock(&event->mmap_mutex); if (event->rb) { - if (event->rb->nr_pages != nr_pages) { + if (data_page_nr(event->rb) != nr_pages) { ret = -EINVAL; goto unlock; } if (!atomic_inc_not_zero(&event->rb->mmap_count)) { /* - * Raced against perf_mmap_close() through - * perf_event_set_output(). Try again, hope for better - * luck. + * Raced against perf_mmap_close(); remove the + * event and try again. */ + ring_buffer_attach(event, NULL); mutex_unlock(&event->mmap_mutex); goto again; } @@ -8695,7 +8695,7 @@ static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog, PERF_RECORD_KSYMBOL_TYPE_BPF, (u64)(unsigned long)subprog->bpf_func, subprog->jited_len, unregister, - prog->aux->ksym.name); + subprog->aux->ksym.name); } } } @@ -11542,14 +11542,25 @@ err_size: goto out; } +static void mutex_lock_double(struct mutex *a, struct mutex *b) +{ + if (b < a) + swap(a, b); + + mutex_lock(a); + mutex_lock_nested(b, SINGLE_DEPTH_NESTING); +} + static int perf_event_set_output(struct perf_event *event, struct perf_event *output_event) { struct perf_buffer *rb = NULL; int ret = -EINVAL; - if (!output_event) + if (!output_event) { + mutex_lock(&event->mmap_mutex); goto set; + } /* don't allow circular references */ if (event == output_event) @@ -11587,8 +11598,15 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event) event->pmu != output_event->pmu) goto out; + /* + * Hold both mmap_mutex to serialize against perf_mmap_close(). Since + * output_event is already on rb->event_list, and the list iteration + * restarts after every removal, it is guaranteed this new event is + * observed *OR* if output_event is already removed, it's guaranteed we + * observe !rb->mmap_count. + */ + mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex); set: - mutex_lock(&event->mmap_mutex); /* Can't redirect output if we've got an active mmap() */ if (atomic_read(&event->mmap_count)) goto unlock; @@ -11598,6 +11616,12 @@ set: rb = ring_buffer_get(output_event); if (!rb) goto unlock; + + /* did we race against perf_mmap_close() */ + if (!atomic_read(&rb->mmap_count)) { + ring_buffer_put(rb); + goto unlock; + } } ring_buffer_attach(event, rb); @@ -11605,20 +11629,13 @@ set: ret = 0; unlock: mutex_unlock(&event->mmap_mutex); + if (output_event) + mutex_unlock(&output_event->mmap_mutex); out: return ret; } -static void mutex_lock_double(struct mutex *a, struct mutex *b) -{ - if (b < a) - swap(a, b); - - mutex_lock(a); - mutex_lock_nested(b, SINGLE_DEPTH_NESTING); -} - static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) { bool nmi_safe = false; @@ -11901,6 +11918,9 @@ SYSCALL_DEFINE5(perf_event_open, * Do not allow to attach to a group in a different task * or CPU context. If we're moving SW events, we'll fix * this up later, so allow that. + * + * Racy, not holding group_leader->ctx->mutex, see comment with + * perf_event_ctx_lock(). */ if (!move_group && group_leader->ctx != ctx) goto err_context; @@ -11968,6 +11988,7 @@ SYSCALL_DEFINE5(perf_event_open, } else { perf_event_ctx_unlock(group_leader, gctx); move_group = 0; + goto not_move_group; } } @@ -11984,7 +12005,17 @@ SYSCALL_DEFINE5(perf_event_open, } } else { mutex_lock(&ctx->mutex); + + /* + * Now that we hold ctx->lock, (re)validate group_leader->ctx == ctx, + * see the group_leader && !move_group test earlier. + */ + if (group_leader && group_leader->ctx != ctx) { + err = -EINVAL; + goto err_locked; + } } +not_move_group: if (ctx->task == TASK_TOMBSTONE) { err = -ESRCH; diff --git a/kernel/events/internal.h b/kernel/events/internal.h index 228801e20788..aa23ffdaf819 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -116,6 +116,11 @@ static inline int page_order(struct perf_buffer *rb) } #endif +static inline int data_page_nr(struct perf_buffer *rb) +{ + return rb->nr_pages << page_order(rb); +} + static inline unsigned long perf_data_size(struct perf_buffer *rb) { return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index ef91ae75ca56..4032cd475000 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -856,11 +856,6 @@ void rb_free(struct perf_buffer *rb) } #else -static int data_page_nr(struct perf_buffer *rb) -{ - return rb->nr_pages << page_order(rb); -} - static struct page * __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff) { diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index e1bbb3b92921..826a2355da1e 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1973,7 +1973,7 @@ bool uprobe_deny_signal(void) WARN_ON_ONCE(utask->state != UTASK_SSTEP); - if (signal_pending(t)) { + if (task_sigpending(t)) { spin_lock_irq(&t->sighand->siglock); clear_tsk_thread_flag(t, TIF_SIGPENDING); spin_unlock_irq(&t->sighand->siglock); diff --git a/kernel/exit.c b/kernel/exit.c index f61ac326daab..86e403191d47 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -765,7 +765,7 @@ void __noreturn do_exit(long code) schedule(); } - io_uring_files_cancel(tsk->files); + io_uring_files_cancel(); exit_signals(tsk); /* sets PF_EXITING */ /* sync mm's RSS info before statistics gathering */ @@ -784,7 +784,7 @@ void __noreturn do_exit(long code) #ifdef CONFIG_POSIX_TIMERS hrtimer_cancel(&tsk->signal->real_timer); - exit_itimers(tsk->signal); + exit_itimers(tsk); #endif if (tsk->mm) setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); diff --git a/kernel/fork.c b/kernel/fork.c index 3b281326c0e1..f73e3e694a6d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -961,6 +961,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; + tsk->pf_io_worker = NULL; account_kernel_stack(tsk, 1); @@ -1150,8 +1151,10 @@ void mmput(struct mm_struct *mm) { might_sleep(); - if (atomic_dec_and_test(&mm->mm_users)) + if (atomic_dec_and_test(&mm->mm_users)) { + trace_android_vh_mmput(NULL); __mmput(mm); + } } EXPORT_SYMBOL_GPL(mmput); @@ -1171,6 +1174,7 @@ void mmput_async(struct mm_struct *mm) schedule_work(&mm->async_put_work); } } +EXPORT_SYMBOL_GPL(mmput_async); #endif /** @@ -1985,13 +1989,21 @@ static __latent_entropy struct task_struct *copy_process( recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); retval = -ERESTARTNOINTR; - if (signal_pending(current)) + if (task_sigpending(current)) goto fork_out; retval = -ENOMEM; p = dup_task_struct(current, node); if (!p) goto fork_out; + if (args->io_thread) { + /* + * Mark us an IO worker, and block any signal that isn't + * fatal or STOP + */ + p->flags |= PF_IO_WORKER; + siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP)); + } cpufreq_task_times_init(p); @@ -2471,6 +2483,28 @@ struct mm_struct *copy_init_mm(void) return dup_mm(NULL, &init_mm); } +/* + * This is like kernel_clone(), but shaved down and tailored to just + * creating io_uring workers. It returns a created task, or an error pointer. + * The returned task is inactive, and the caller must fire it up through + * wake_up_new_task(p). All signals are blocked in the created task. + */ +struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node) +{ + unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| + CLONE_IO; + struct kernel_clone_args args = { + .flags = ((lower_32_bits(flags) | CLONE_VM | + CLONE_UNTRACED) & ~CSIGNAL), + .exit_signal = (lower_32_bits(flags) & CSIGNAL), + .stack = (unsigned long)fn, + .stack_size = (unsigned long)arg, + .io_thread = 1, + }; + + return copy_process(NULL, 0, node, &args); +} + /* * Ok, this is the main fork-routine. * diff --git a/kernel/futex.c b/kernel/futex.c index 29bd9cd92468..b223cc525032 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1594,6 +1594,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) struct futex_q *this, *next; union futex_key key = FUTEX_KEY_INIT; int ret; + int target_nr; DEFINE_WAKE_Q(wake_q); if (!bitset) @@ -1611,6 +1612,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) spin_lock(&hb->lock); + trace_android_vh_futex_wake_traverse_plist(&hb->chain, &target_nr, key, bitset); plist_for_each_entry_safe(this, next, &hb->chain, list) { if (match_futex (&this->key, &key)) { if (this->pi_state || this->rt_waiter) { @@ -1622,6 +1624,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) if (!(this->bitset & bitset)) continue; + trace_android_vh_futex_wake_this(ret, nr_wake, target_nr, this->task); mark_wake_futex(&wake_q, this); if (++ret >= nr_wake) break; @@ -1630,6 +1633,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) spin_unlock(&hb->lock); wake_up_q(&wake_q); + trace_android_vh_futex_wake_up_q_finish(nr_wake, target_nr); return ret; } @@ -2699,6 +2703,7 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, if (!bitset) return -EINVAL; q.bitset = bitset; + trace_android_vh_futex_wait_start(flags, bitset); to = futex_setup_timer(abs_time, &timeout, flags, current->timer_slack_ns); @@ -2748,6 +2753,7 @@ out: hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } + trace_android_vh_futex_wait_end(flags, bitset); return ret; } @@ -3733,6 +3739,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, return -ENOSYS; } + trace_android_vh_do_futex(cmd, &flags, uaddr2); switch (cmd) { case FUTEX_WAIT: val3 = FUTEX_BITSET_MATCH_ANY; diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c index c466c7fbdece..ea6b45d0fa0d 100644 --- a/kernel/gcov/clang.c +++ b/kernel/gcov/clang.c @@ -327,6 +327,8 @@ void gcov_info_add(struct gcov_info *dst, struct gcov_info *src) for (i = 0; i < sfn_ptr->num_counters; i++) dfn_ptr->counters[i] += sfn_ptr->counters[i]; + + sfn_ptr = list_next_entry(sfn_ptr, head); } } diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index 53c67c87f141..c699feda21ac 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -33,6 +33,13 @@ #define GCOV_TAG_FUNCTION_LENGTH 3 +/* Since GCC 12.1 sizes are in BYTES and not in WORDS (4B). */ +#if (__GNUC__ >= 12) +#define GCOV_UNIT_SIZE 4 +#else +#define GCOV_UNIT_SIZE 1 +#endif + static struct gcov_info *gcov_info_head; /** @@ -451,12 +458,18 @@ static size_t convert_to_gcda(char *buffer, struct gcov_info *info) pos += store_gcov_u32(buffer, pos, info->version); pos += store_gcov_u32(buffer, pos, info->stamp); +#if (__GNUC__ >= 12) + /* Use zero as checksum of the compilation unit. */ + pos += store_gcov_u32(buffer, pos, 0); +#endif + for (fi_idx = 0; fi_idx < info->n_functions; fi_idx++) { fi_ptr = info->functions[fi_idx]; /* Function record. */ pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION); - pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION_LENGTH); + pos += store_gcov_u32(buffer, pos, + GCOV_TAG_FUNCTION_LENGTH * GCOV_UNIT_SIZE); pos += store_gcov_u32(buffer, pos, fi_ptr->ident); pos += store_gcov_u32(buffer, pos, fi_ptr->lineno_checksum); pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum); @@ -470,7 +483,8 @@ static size_t convert_to_gcda(char *buffer, struct gcov_info *info) /* Counter record. */ pos += store_gcov_u32(buffer, pos, GCOV_TAG_FOR_COUNTER(ct_idx)); - pos += store_gcov_u32(buffer, pos, ci_ptr->num * 2); + pos += store_gcov_u32(buffer, pos, + ci_ptr->num * 2 * GCOV_UNIT_SIZE); for (cv_idx = 0; cv_idx < ci_ptr->num; cv_idx++) { pos += store_gcov_u64(buffer, pos, diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index ae9b13d5ee91..1bd144ed251d 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -82,6 +82,7 @@ config IRQ_FASTEOI_HIERARCHY_HANDLERS # Generic IRQ IPI support config GENERIC_IRQ_IPI bool + depends on SMP select IRQ_DOMAIN_HIERARCHY # Generic MSI interrupt support diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 4d89ad4fae3b..5fb78addff51 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -269,8 +269,9 @@ static int __irq_build_affinity_masks(unsigned int startvec, */ if (numvecs <= nodes) { for_each_node_mask(n, nodemsk) { - cpumask_or(&masks[curvec].mask, &masks[curvec].mask, - node_to_cpumask[n]); + /* Ensure that only CPUs which are in both masks are set */ + cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); + cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk); if (++curvec == last_affv) curvec = firstvec; } diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 91c48deeb429..520b9fa620c5 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -1566,7 +1566,8 @@ int irq_chip_request_resources_parent(struct irq_data *data) if (data->chip->irq_request_resources) return data->chip->irq_request_resources(data); - return -ENOSYS; + /* no error on missing optional irq_chip::irq_request_resources */ + return 0; } EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent); diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 762a928e18f9..8806444a6855 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -195,7 +195,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) retval = __handle_irq_event_percpu(desc, &flags); - add_interrupt_randomness(desc->irq_data.irq, flags); + add_interrupt_randomness(desc->irq_data.irq); if (!noirqdebug) note_interrupt(desc, retval); diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 54363527feea..e58342ace11f 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -29,12 +29,14 @@ extern struct irqaction chained_action; * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed * IRQTF_AFFINITY - irq thread is requested to adjust affinity * IRQTF_FORCED_THREAD - irq action is force threaded + * IRQTF_READY - signals that irq thread is ready */ enum { IRQTF_RUNTHREAD, IRQTF_WARNED, IRQTF_AFFINITY, IRQTF_FORCED_THREAD, + IRQTF_READY, }; /* diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index aa636e19ea59..2f35de34fe03 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -405,6 +405,7 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, lockdep_set_class(&desc->lock, &irq_desc_lock_class); mutex_init(&desc->request_mutex); init_rcu_head(&desc->rcu); + init_waitqueue_head(&desc->wait_for_threads); desc_set_defaults(irq, desc, node, affinity, owner); irqd_set(&desc->irq_data, flags); @@ -573,6 +574,7 @@ int __init early_irq_init(void) raw_spin_lock_init(&desc[i].lock); lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); mutex_init(&desc[i].request_mutex); + init_waitqueue_head(&desc[i].wait_for_threads); desc_set_defaults(i, &desc[i], node, NULL, NULL); } return arch_early_irq_init(); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index ec1c6e6ed7cf..76da8deea3a1 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -223,11 +223,16 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, { struct irq_desc *desc = irq_data_to_desc(data); struct irq_chip *chip = irq_data_get_irq_chip(data); + const struct cpumask *prog_mask; int ret; + static DEFINE_RAW_SPINLOCK(tmp_mask_lock); + static struct cpumask tmp_mask; + if (!chip || !chip->irq_set_affinity) return -EINVAL; + raw_spin_lock(&tmp_mask_lock); /* * If this is a managed interrupt and housekeeping is enabled on * it check whether the requested affinity mask intersects with @@ -249,24 +254,34 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, */ if (irqd_affinity_is_managed(data) && housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) { - const struct cpumask *hk_mask, *prog_mask; - - static DEFINE_RAW_SPINLOCK(tmp_mask_lock); - static struct cpumask tmp_mask; + const struct cpumask *hk_mask; hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ); - raw_spin_lock(&tmp_mask_lock); cpumask_and(&tmp_mask, mask, hk_mask); if (!cpumask_intersects(&tmp_mask, cpu_online_mask)) prog_mask = mask; else prog_mask = &tmp_mask; - ret = chip->irq_set_affinity(data, prog_mask, force); - raw_spin_unlock(&tmp_mask_lock); } else { - ret = chip->irq_set_affinity(data, mask, force); + prog_mask = mask; } + + /* + * Make sure we only provide online CPUs to the irqchip, + * unless we are being asked to force the affinity (in which + * case we do as we are told). + */ + cpumask_and(&tmp_mask, prog_mask, cpu_online_mask); + if (!force && !cpumask_empty(&tmp_mask)) + ret = chip->irq_set_affinity(data, &tmp_mask, force); + else if (force) + ret = chip->irq_set_affinity(data, mask, force); + else + ret = -EINVAL; + + raw_spin_unlock(&tmp_mask_lock); + switch (ret) { case IRQ_SET_MASK_OK: case IRQ_SET_MASK_OK_DONE: @@ -1149,6 +1164,31 @@ static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) raw_spin_unlock_irq(&desc->lock); } +/* + * Internal function to notify that a interrupt thread is ready. + */ +static void irq_thread_set_ready(struct irq_desc *desc, + struct irqaction *action) +{ + set_bit(IRQTF_READY, &action->thread_flags); + wake_up(&desc->wait_for_threads); +} + +/* + * Internal function to wake up a interrupt thread and wait until it is + * ready. + */ +static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc, + struct irqaction *action) +{ + if (!action || !action->thread) + return; + + wake_up_process(action->thread); + wait_event(desc->wait_for_threads, + test_bit(IRQTF_READY, &action->thread_flags)); +} + /* * Interrupt handler thread */ @@ -1160,6 +1200,8 @@ static int irq_thread(void *data) irqreturn_t (*handler_fn)(struct irq_desc *desc, struct irqaction *action); + irq_thread_set_ready(desc, action); + if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, &action->thread_flags)) handler_fn = irq_forced_thread_fn; @@ -1584,8 +1626,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) } if (!shared) { - init_waitqueue_head(&desc->wait_for_threads); - /* Setup the type (level, edge polarity) if configured: */ if (new->flags & IRQF_TRIGGER_MASK) { ret = __irq_set_trigger(desc, @@ -1675,14 +1715,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) irq_setup_timings(desc, new); - /* - * Strictly no need to wake it up, but hung_task complains - * when no hard interrupt wakes the thread up. - */ - if (new->thread) - wake_up_process(new->thread); - if (new->secondary) - wake_up_process(new->secondary->thread); + wake_up_and_wait_for_irq_thread_ready(desc, new); + wake_up_and_wait_for_irq_thread_ready(desc, new->secondary); register_irq_proc(irq, desc); new->dir = NULL; diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index d217acc9f71b..b47d95b68ac1 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -456,6 +456,13 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, irqd_clr_can_reserve(irq_data); if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK) irqd_set_msi_nomask_quirk(irq_data); + if ((info->flags & MSI_FLAG_ACTIVATE_EARLY) && + irqd_affinity_is_managed(irq_data) && + !cpumask_intersects(irq_data_get_affinity_mask(irq_data), + cpu_online_mask)) { + irqd_set_managed_shutdown(irq_data); + continue; + } } ret = irq_domain_activate_irq(irq_data, can_reserve); if (ret) diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index aea9104265f2..fff11916aba3 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -29,6 +29,15 @@ #include #include "kexec_internal.h" +#ifdef CONFIG_KEXEC_SIG +static bool sig_enforce = IS_ENABLED(CONFIG_KEXEC_SIG_FORCE); + +void set_kexec_sig_enforced(void) +{ + sig_enforce = true; +} +#endif + static int kexec_calculate_store_digests(struct kimage *image); /* @@ -108,40 +117,6 @@ int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, } #endif -/* - * arch_kexec_apply_relocations_add - apply relocations of type RELA - * @pi: Purgatory to be relocated. - * @section: Section relocations applying to. - * @relsec: Section containing RELAs. - * @symtab: Corresponding symtab. - * - * Return: 0 on success, negative errno on error. - */ -int __weak -arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section, - const Elf_Shdr *relsec, const Elf_Shdr *symtab) -{ - pr_err("RELA relocation unsupported.\n"); - return -ENOEXEC; -} - -/* - * arch_kexec_apply_relocations - apply relocations of type REL - * @pi: Purgatory to be relocated. - * @section: Section relocations applying to. - * @relsec: Section containing RELs. - * @symtab: Corresponding symtab. - * - * Return: 0 on success, negative errno on error. - */ -int __weak -arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section, - const Elf_Shdr *relsec, const Elf_Shdr *symtab) -{ - pr_err("REL relocation unsupported.\n"); - return -ENOEXEC; -} - /* * Free up memory used by kernel, initrd, and command line. This is temporary * memory allocation which is not needed any more after these buffers have @@ -193,7 +168,7 @@ kimage_validate_signature(struct kimage *image) image->kernel_buf_len); if (ret) { - if (IS_ENABLED(CONFIG_KEXEC_SIG_FORCE)) { + if (sig_enforce) { pr_notice("Enforced kernel signature verification failed (%d).\n", ret); return ret; } diff --git a/kernel/kprobes.c b/kernel/kprobes.c index cdea59acd66b..75150e755518 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1640,7 +1640,9 @@ static int check_kprobe_address_safe(struct kprobe *p, preempt_disable(); /* Ensure it is not in reserved area nor out of text */ - if (!kernel_text_address((unsigned long) p->addr) || + if (!(core_kernel_text((unsigned long) p->addr) || + is_module_text_address((unsigned long) p->addr)) || + in_gate_area_no_mm((unsigned long) p->addr) || within_kprobe_blacklist((unsigned long) p->addr) || jump_label_text_reserved(p->addr, p->addr) || static_call_text_reserved(p->addr, p->addr) || @@ -1785,11 +1787,12 @@ static struct kprobe *__disable_kprobe(struct kprobe *p) /* Try to disarm and disable this/parent probe */ if (p == orig_p || aggr_kprobe_disabled(orig_p)) { /* - * If kprobes_all_disarmed is set, orig_p - * should have already been disarmed, so - * skip unneed disarming process. + * Don't be lazy here. Even if 'kprobes_all_disarmed' + * is false, 'orig_p' might not have been armed yet. + * Note arm_all_kprobes() __tries__ to arm all kprobes + * on the best effort basis. */ - if (!kprobes_all_disarmed) { + if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) { ret = disarm_kprobe(orig_p, true); if (ret) { p->flags &= ~KPROBE_FLAG_DISABLED; @@ -1838,7 +1841,13 @@ static int __unregister_kprobe_top(struct kprobe *p) if ((list_p != p) && (list_p->post_handler)) goto noclean; } - ap->post_handler = NULL; + /* + * For the kprobe-on-ftrace case, we keep the + * post_handler setting to identify this aggrprobe + * armed with kprobe_ipmodify_ops. + */ + if (!kprobe_ftrace(ap)) + ap->post_handler = NULL; } noclean: /* @@ -2332,8 +2341,11 @@ int enable_kprobe(struct kprobe *kp) if (!kprobes_all_disarmed && kprobe_disabled(p)) { p->flags &= ~KPROBE_FLAG_DISABLED; ret = arm_kprobe(p); - if (ret) + if (ret) { p->flags |= KPROBE_FLAG_DISABLED; + if (p != kp) + kp->flags |= KPROBE_FLAG_DISABLED; + } } out: mutex_unlock(&kprobe_mutex); diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index f6310f848f34..b04b87a4e0a7 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -611,9 +611,23 @@ void klp_reverse_transition(void) /* Called from copy_process() during fork */ void klp_copy_process(struct task_struct *child) { - child->patch_state = current->patch_state; - /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ + /* + * The parent process may have gone through a KLP transition since + * the thread flag was copied in setup_thread_stack earlier. Bring + * the task flag up to date with the parent here. + * + * The operation is serialized against all klp_*_transition() + * operations by the tasklist_lock. The only exception is + * klp_update_patch_state(current), but we cannot race with + * that because we are current. + */ + if (test_tsk_thread_flag(current, TIF_PATCH_PENDING)) + set_tsk_thread_flag(child, TIF_PATCH_PENDING); + else + clear_tsk_thread_flag(child, TIF_PATCH_PENDING); + + child->patch_state = current->patch_state; } /* diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index b6683cefe19a..6cbd2b444476 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -1397,7 +1397,7 @@ static int add_lock_to_list(struct lock_class *this, /* * For good efficiency of modular, we use power of 2 */ -#define MAX_CIRCULAR_QUEUE_SIZE 4096UL +#define MAX_CIRCULAR_QUEUE_SIZE (1UL << CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS) #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1) /* @@ -5139,9 +5139,10 @@ __lock_set_class(struct lockdep_map *lock, const char *name, return 0; } - lockdep_init_map_waits(lock, name, key, 0, - lock->wait_type_inner, - lock->wait_type_outer); + lockdep_init_map_type(lock, name, key, 0, + lock->wait_type_inner, + lock->wait_type_outer, + lock->lock_type); class = register_lock_class(lock, subclass, 0); hlock->class_idx = class - lock_classes; diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index a19b01635347..bbe9000260d0 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -99,16 +99,16 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ = #define MAX_STACK_TRACE_ENTRIES 262144UL #define STACK_TRACE_HASH_SIZE 8192 #else -#define MAX_LOCKDEP_ENTRIES 32768UL +#define MAX_LOCKDEP_ENTRIES (1UL << CONFIG_LOCKDEP_BITS) -#define MAX_LOCKDEP_CHAINS_BITS 16 +#define MAX_LOCKDEP_CHAINS_BITS CONFIG_LOCKDEP_CHAINS_BITS /* * Stack-trace: tightly packed array of stack backtrace * addresses. Protected by the hash_lock. */ -#define MAX_STACK_TRACE_ENTRIES 524288UL -#define STACK_TRACE_HASH_SIZE 16384 +#define MAX_STACK_TRACE_ENTRIES (1UL << CONFIG_LOCKDEP_STACK_TRACE_BITS) +#define STACK_TRACE_HASH_SIZE (1 << CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS) #endif /* diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 6d4f413264d2..93020a888b09 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -170,8 +170,10 @@ static __always_inline bool __mutex_trylock_fast(struct mutex *lock) unsigned long curr = (unsigned long)current; unsigned long zero = 0UL; - if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) + if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) { + trace_android_vh_record_mutex_lock_starttime(current, jiffies); return true; + } return false; } @@ -748,6 +750,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne */ void __sched mutex_unlock(struct mutex *lock) { + trace_android_vh_record_mutex_lock_starttime(current, 0); #ifndef CONFIG_DEBUG_LOCK_ALLOC if (__mutex_unlock_fast(lock)) return; @@ -978,6 +981,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, lock_acquired(&lock->dep_map, ip); if (ww_ctx) ww_mutex_set_context_fastpath(ww, ww_ctx); + trace_android_vh_record_mutex_lock_starttime(current, jiffies); preempt_enable(); return 0; } @@ -1049,7 +1053,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, goto err; } - trace_android_vh_mutex_start_check_new_owner(lock); spin_unlock(&lock->wait_lock); schedule_preempt_disabled(); @@ -1097,6 +1100,7 @@ skip_wait: spin_unlock(&lock->wait_lock); preempt_enable(); + trace_android_vh_record_mutex_lock_starttime(current, jiffies); return 0; err: @@ -1433,8 +1437,10 @@ int __sched mutex_trylock(struct mutex *lock) #endif locked = __mutex_trylock(lock); - if (locked) + if (locked) { + trace_android_vh_record_mutex_lock_starttime(current, jiffies); mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); + } return locked; } diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c index b88eda4eb624..c8a474aa1b3b 100644 --- a/kernel/locking/percpu-rwsem.c +++ b/kernel/locking/percpu-rwsem.c @@ -10,6 +10,21 @@ #include #include +#include + +/* + * trace_android_vh_record_pcpu_rwsem_starttime is called in + * include/linux/percpu-rwsem.h by including include/hooks/dtask.h, which + * will result to build-err. So we create + * func:_trace_android_vh_record_pcpu_rwsem_starttime for percpu-rwsem.h to call. + */ +void _trace_android_vh_record_pcpu_rwsem_starttime(struct task_struct *tsk, + unsigned long settime) +{ + trace_android_vh_record_pcpu_rwsem_starttime(tsk, settime); +} +EXPORT_SYMBOL_GPL(_trace_android_vh_record_pcpu_rwsem_starttime); + int __percpu_init_rwsem(struct percpu_rw_semaphore *sem, const char *name, struct lock_class_key *key) { @@ -237,11 +252,13 @@ void percpu_down_write(struct percpu_rw_semaphore *sem) /* Wait for all active readers to complete. */ rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE); + trace_android_vh_record_pcpu_rwsem_starttime(current, jiffies); } EXPORT_SYMBOL_GPL(percpu_down_write); void percpu_up_write(struct percpu_rw_semaphore *sem) { + trace_android_vh_record_pcpu_rwsem_starttime(current, 0); rwsem_release(&sem->dep_map, _RET_IP_); /* diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 49c0fa173bde..419cc6639894 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1471,6 +1471,7 @@ static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); + trace_android_vh_record_rtmutex_lock_starttime(current, jiffies); } #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -1519,6 +1520,8 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); if (ret) mutex_release(&lock->dep_map, _RET_IP_); + else + trace_android_vh_record_rtmutex_lock_starttime(current, jiffies); return ret; } @@ -1563,6 +1566,8 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) rt_mutex_slowlock); if (ret) mutex_release(&lock->dep_map, _RET_IP_); + else + trace_android_vh_record_rtmutex_lock_starttime(current, jiffies); return ret; } @@ -1589,6 +1594,8 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); if (ret) mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); + else + trace_android_vh_record_rtmutex_lock_starttime(current, jiffies); return ret; } @@ -1603,6 +1610,7 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock) { mutex_release(&lock->dep_map, _RET_IP_); rt_mutex_fastunlock(lock, rt_mutex_slowunlock); + trace_android_vh_record_rtmutex_lock_starttime(current, 0); } EXPORT_SYMBOL_GPL(rt_mutex_unlock); diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index a33e6a36dae9..d368d1a1af49 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -279,6 +279,10 @@ static inline bool rwsem_read_trylock(struct rw_semaphore *sem) long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count); if (WARN_ON_ONCE(cnt < 0)) rwsem_set_nonspinnable(sem); + + if ((cnt & RWSEM_READ_FAILED_MASK) == 0) + trace_android_vh_record_rwsem_lock_starttime(current, jiffies); + return !(cnt & RWSEM_READ_FAILED_MASK); } @@ -1021,9 +1025,11 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state) raw_spin_unlock_irq(&sem->wait_lock); wake_up_q(&wake_q); } + trace_android_vh_record_rwsem_lock_starttime(current, jiffies); return sem; } else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) { /* rwsem_reader_phase_trylock() implies ACQUIRE on success */ + trace_android_vh_record_rwsem_lock_starttime(current, jiffies); return sem; } @@ -1104,6 +1110,7 @@ queue: __set_current_state(TASK_RUNNING); trace_android_vh_rwsem_read_wait_finish(sem); lockevent_inc(rwsem_rlock); + trace_android_vh_record_rwsem_lock_starttime(current, jiffies); return sem; out_nolock: @@ -1150,6 +1157,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) && rwsem_optimistic_spin(sem, true)) { /* rwsem_optimistic_spin() implies ACQUIRE on success */ + trace_android_vh_record_rwsem_lock_starttime(current, jiffies); return sem; } @@ -1280,7 +1288,7 @@ trylock_again: rwsem_disable_reader_optspin(sem, disable_rspin); raw_spin_unlock_irq(&sem->wait_lock); lockevent_inc(rwsem_wlock); - + trace_android_vh_record_rwsem_lock_starttime(current, jiffies); return ret; out_nolock: @@ -1396,6 +1404,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, tmp + RWSEM_READER_BIAS)) { rwsem_set_reader_owned(sem); + trace_android_vh_record_rwsem_lock_starttime(current, jiffies); return 1; } } while (!(tmp & RWSEM_READ_FAILED_MASK)); @@ -1410,10 +1419,12 @@ static inline void __down_write(struct rw_semaphore *sem) long tmp = RWSEM_UNLOCKED_VALUE; if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, - RWSEM_WRITER_LOCKED))) + RWSEM_WRITER_LOCKED))) { rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE); - else + } else { + trace_android_vh_record_rwsem_lock_starttime(current, jiffies); rwsem_set_owner(sem); + } } static inline int __down_write_killable(struct rw_semaphore *sem) @@ -1425,6 +1436,7 @@ static inline int __down_write_killable(struct rw_semaphore *sem) if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE))) return -EINTR; } else { + trace_android_vh_record_rwsem_lock_starttime(current, jiffies); rwsem_set_owner(sem); } return 0; @@ -1440,6 +1452,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) { rwsem_set_owner(sem); + trace_android_vh_record_rwsem_lock_starttime(current, jiffies); return true; } return false; @@ -1455,6 +1468,7 @@ static inline void __up_read(struct rw_semaphore *sem) DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); + trace_android_vh_record_rwsem_lock_starttime(current, 0); rwsem_clear_reader_owned(sem); tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count); DEBUG_RWSEMS_WARN_ON(tmp < 0, sem); @@ -1481,6 +1495,7 @@ static inline void __up_write(struct rw_semaphore *sem) DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) && !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem); + trace_android_vh_record_rwsem_lock_starttime(current, 0); rwsem_clear_owner(sem); tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count); if (unlikely(tmp & RWSEM_FLAG_WAITERS)) diff --git a/kernel/module.c b/kernel/module.c index 7db4c615143a..91b50848b699 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2297,6 +2297,15 @@ void *__symbol_get(const char *symbol) } EXPORT_SYMBOL_GPL(__symbol_get); +static bool module_init_layout_section(const char *sname) +{ +#ifndef CONFIG_MODULE_UNLOAD + if (module_exit_section(sname)) + return true; +#endif + return module_init_section(sname); +} + /* * Ensure that an exported symbol [global namespace] does not already exist * in the kernel or in some other module's exported symbol table. @@ -2506,7 +2515,7 @@ static void layout_sections(struct module *mod, struct load_info *info) if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL - || module_init_section(sname)) + || module_init_layout_section(sname)) continue; s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i); pr_debug("\t%s\n", sname); @@ -2539,7 +2548,7 @@ static void layout_sections(struct module *mod, struct load_info *info) if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL - || !module_init_section(sname)) + || !module_init_layout_section(sname)) continue; s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i) | INIT_OFFSET_MASK); @@ -3188,11 +3197,6 @@ static int rewrite_section_headers(struct load_info *info, int flags) temporary image. */ shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; -#ifndef CONFIG_MODULE_UNLOAD - /* Don't load .exit sections */ - if (module_exit_section(info->secstrings+shdr->sh_name)) - shdr->sh_flags &= ~(unsigned long)SHF_ALLOC; -#endif } /* Track but don't keep modinfo and version sections. */ diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 731aca37637d..b6875ebd184d 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -637,7 +637,7 @@ static void power_down(void) int error; if (hibernation_mode == HIBERNATION_SUSPEND) { - error = suspend_devices_and_enter(PM_SUSPEND_MEM); + error = suspend_devices_and_enter(mem_sleep_current); if (error) { hibernation_mode = hibernation_ops ? HIBERNATION_PLATFORM : diff --git a/kernel/power/user.c b/kernel/power/user.c index 740723bb3885..13cca2e2c2bc 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -26,6 +26,7 @@ #include "power.h" +static bool need_wait; static struct snapshot_data { struct snapshot_handle handle; @@ -78,7 +79,7 @@ static int snapshot_open(struct inode *inode, struct file *filp) * Resuming. We may need to wait for the image device to * appear. */ - wait_for_device_probe(); + need_wait = true; data->swap = -1; data->mode = O_WRONLY; @@ -168,6 +169,11 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf, ssize_t res; loff_t pg_offp = *offp & ~PAGE_MASK; + if (need_wait) { + wait_for_device_probe(); + need_wait = false; + } + lock_system_sleep(); data = filp->private_data; @@ -244,6 +250,11 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, loff_t size; sector_t offset; + if (need_wait) { + wait_for_device_probe(); + need_wait = false; + } + if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC) return -ENOTTY; if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR) diff --git a/kernel/profile.c b/kernel/profile.c index b47fe52f0ade..737b1c704aa8 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -109,6 +109,13 @@ int __ref profile_init(void) /* only text is profiled */ prof_len = (_etext - _stext) >> prof_shift; + + if (!prof_len) { + pr_warn("profiling shift: %u too large\n", prof_shift); + prof_on = 0; + return -EINVAL; + } + buffer_bytes = prof_len*sizeof(atomic_t); if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) diff --git a/kernel/ptrace.c b/kernel/ptrace.c index d99f73f83bf5..aab480e24bd6 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -1219,9 +1219,8 @@ int ptrace_request(struct task_struct *child, long request, return ptrace_resume(child, request, data); case PTRACE_KILL: - if (child->exit_state) /* already dead */ - return 0; - return ptrace_resume(child, request, SIGKILL); + send_sig_info(SIGKILL, SEND_SIG_NOINFO, child); + return 0; #ifdef CONFIG_HAVE_ARCH_TRACEHOOK case PTRACE_GETREGSET: diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig index b71e21f73c40..cd6e11403f1b 100644 --- a/kernel/rcu/Kconfig +++ b/kernel/rcu/Kconfig @@ -86,6 +86,7 @@ config TASKS_RCU config TASKS_RUDE_RCU def_bool 0 + select IRQ_WORK help This option enables a task-based RCU implementation that uses only context switch (including preemption) and user-mode diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 7c05c5ab7865..8b51e6a5b386 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -171,7 +171,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) { /* Complain if the scheduler has not started. */ - RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, + WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, "synchronize_rcu_tasks called too soon"); /* Wait for the grace period. */ @@ -620,6 +620,9 @@ static void rcu_tasks_be_rude(struct work_struct *work) // Wait for one rude RCU-tasks grace period. static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp) { + if (num_online_cpus() <= 1) + return; // Fastpath for only one CPU. + rtp->n_ipis += cpumask_weight(cpu_online_mask); schedule_on_each_cpu(rcu_tasks_be_rude); } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 844c35803739..b10d6bcea77d 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2456,7 +2456,7 @@ static void rcu_do_batch(struct rcu_data *rdp) div = READ_ONCE(rcu_divisor); div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div; bl = max(rdp->blimit, pending >> div); - if (unlikely(bl > 100)) { + if (in_serving_softirq() && unlikely(bl > 100)) { long rrn = READ_ONCE(rcu_resched_ns); rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn; @@ -2490,19 +2490,23 @@ static void rcu_do_batch(struct rcu_data *rdp) * Stop only if limit reached and CPU has something to do. * Note: The rcl structure counts down from zero. */ - if (-rcl.len >= bl && !offloaded && - (need_resched() || - (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) - break; - if (unlikely(tlimit)) { - /* only call local_clock() every 32 callbacks */ - if (likely((-rcl.len & 31) || local_clock() < tlimit)) - continue; - /* Exceeded the time limit, so leave. */ - break; - } - if (offloaded) { - WARN_ON_ONCE(in_serving_softirq()); + if (in_serving_softirq()) { + if (-rcl.len >= bl && (need_resched() || + (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) + break; + + /* + * Make sure we don't spend too much time here and deprive other + * softirq vectors of CPU cycles. + */ + if (unlikely(tlimit)) { + /* only call local_clock() every 32 callbacks */ + if (likely((-rcl.len & 31) || local_clock() < tlimit)) + continue; + /* Exceeded the time limit, so leave. */ + break; + } + } else { local_bh_enable(); lockdep_assert_irqs_enabled(); cond_resched_tasks_rcu_qs(); @@ -3389,15 +3393,16 @@ static void fill_page_cache_func(struct work_struct *work) bnode = (struct kvfree_rcu_bulk_data *) __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); - if (bnode) { - raw_spin_lock_irqsave(&krcp->lock, flags); - pushed = put_cached_bnode(krcp, bnode); - raw_spin_unlock_irqrestore(&krcp->lock, flags); + if (!bnode) + break; - if (!pushed) { - free_page((unsigned long) bnode); - break; - } + raw_spin_lock_irqsave(&krcp->lock, flags); + pushed = put_cached_bnode(krcp, bnode); + raw_spin_unlock_irqrestore(&krcp->lock, flags); + + if (!pushed) { + free_page((unsigned long) bnode); + break; } } diff --git a/kernel/scftorture.c b/kernel/scftorture.c index 554a521ee235..060ee0b1569a 100644 --- a/kernel/scftorture.c +++ b/kernel/scftorture.c @@ -253,9 +253,10 @@ static void scf_handler(void *scfc_in) } this_cpu_inc(scf_invoked_count); if (longwait <= 0) { - if (!(r & 0xffc0)) + if (!(r & 0xffc0)) { udelay(r & 0x3f); - goto out; + goto out; + } } if (r & 0xfff) goto out; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6ea0e3ceabea..73593759ddff 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -21,7 +21,7 @@ #include #include "../workqueue_internal.h" -#include "../../fs/io-wq.h" +#include "../../io_uring/io-wq.h" #include "../smpboot.h" #include "pelt.h" @@ -881,8 +881,9 @@ int tg_nop(struct task_group *tg, void *data) } #endif -static void set_load_weight(struct task_struct *p, bool update_load) +static void set_load_weight(struct task_struct *p) { + bool update_load = !(READ_ONCE(p->state) & TASK_NEW); int prio = p->static_prio - MAX_RT_PRIO; struct load_weight *load = &p->se.load; @@ -2872,8 +2873,12 @@ static inline bool ttwu_queue_cond(int cpu, int wake_flags) * CPU then use the wakelist to offload the task activation to * the soon-to-be-idle CPU as the current CPU is likely busy. * nr_running is checked to avoid unnecessary task stacking. + * + * Note that we can only get here with (wakee) p->on_rq=0, + * p->on_cpu can be whatever, we've done the dequeue, so + * the wakee has been accounted out of ->nr_running. */ - if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1) + if ((wake_flags & WF_ON_CPU) && !cpu_rq(cpu)->nr_running) return true; return false; @@ -3493,7 +3498,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) p->static_prio = NICE_TO_PRIO(0); p->prio = p->normal_prio = p->static_prio; - set_load_weight(p, false); + set_load_weight(p); /* * We don't need the reset flag anymore after the fork. It has @@ -5271,7 +5276,7 @@ void set_user_nice(struct task_struct *p, long nice) put_prev_task(rq, p); p->static_prio = NICE_TO_PRIO(nice); - set_load_weight(p, true); + set_load_weight(p); old_prio = p->prio; p->prio = effective_prio(p); @@ -5445,7 +5450,7 @@ static void __setscheduler_params(struct task_struct *p, */ p->rt_priority = attr->sched_priority; p->normal_prio = normal_prio(p); - set_load_weight(p, true); + set_load_weight(p); } /* @@ -6868,7 +6873,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur, } int task_can_attach(struct task_struct *p, - const struct cpumask *cs_cpus_allowed) + const struct cpumask *cs_effective_cpus) { int ret = 0; @@ -6887,8 +6892,13 @@ int task_can_attach(struct task_struct *p, } if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, - cs_cpus_allowed)) - ret = dl_task_can_attach(p, cs_cpus_allowed); + cs_effective_cpus)) { + int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus); + + if (unlikely(cpu >= nr_cpu_ids)) + return -EINVAL; + ret = dl_cpu_busy(cpu, p); + } out: return ret; @@ -7218,8 +7228,10 @@ static void cpuset_cpu_active(void) static int cpuset_cpu_inactive(unsigned int cpu) { if (!cpuhp_tasks_frozen) { - if (dl_cpu_busy(cpu)) - return -EBUSY; + int ret = dl_cpu_busy(cpu, NULL); + + if (ret) + return ret; cpuset_update_active_cpus(); } else { num_cpus_frozen++; @@ -7596,7 +7608,7 @@ void __init sched_init(void) atomic_set(&rq->nr_iowait, 0); } - set_load_weight(&init_task, false); + set_load_weight(&init_task); /* * The boot idle thread does lazy MMU switching as well: diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 16230441b47a..fa92656122d1 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1563,7 +1563,10 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) * the throttle. */ p->dl.dl_throttled = 0; - BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH); + if (!(flags & ENQUEUE_REPLENISH)) + printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n", + task_pid_nr(p)); + return; } @@ -2825,41 +2828,6 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) } #ifdef CONFIG_SMP -int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed) -{ - unsigned long flags, cap; - unsigned int dest_cpu; - struct dl_bw *dl_b; - bool overflow; - int ret; - - dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed); - - rcu_read_lock_sched(); - dl_b = dl_bw_of(dest_cpu); - raw_spin_lock_irqsave(&dl_b->lock, flags); - cap = dl_bw_capacity(dest_cpu); - overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw); - if (overflow) { - ret = -EBUSY; - } else { - /* - * We reserve space for this task in the destination - * root_domain, as we can't fail after this point. - * We will free resources in the source root_domain - * later on (see set_cpus_allowed_dl()). - */ - int cpus = dl_bw_cpus(dest_cpu); - - __dl_add(dl_b, p->dl.dl_bw, cpus); - ret = 0; - } - raw_spin_unlock_irqrestore(&dl_b->lock, flags); - rcu_read_unlock_sched(); - - return ret; -} - int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial) { @@ -2881,7 +2849,7 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, return ret; } -bool dl_cpu_busy(unsigned int cpu) +int dl_cpu_busy(int cpu, struct task_struct *p) { unsigned long flags, cap; struct dl_bw *dl_b; @@ -2891,11 +2859,22 @@ bool dl_cpu_busy(unsigned int cpu) dl_b = dl_bw_of(cpu); raw_spin_lock_irqsave(&dl_b->lock, flags); cap = dl_bw_capacity(cpu); - overflow = __dl_overflow(dl_b, cap, 0, 0); + overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0); + + if (!overflow && p) { + /* + * We reserve space for this task in the destination + * root_domain, as we can't fail after this point. + * We will free resources in the source root_domain + * later on (see set_cpus_allowed_dl()). + */ + __dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu)); + } + raw_spin_unlock_irqrestore(&dl_b->lock, flags); rcu_read_unlock_sched(); - return overflow; + return overflow ? -EBUSY : 0; } #endif diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 193adbff97f7..8ef2f8488d8b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3759,11 +3759,11 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s se->avg.runnable_sum = se->avg.runnable_avg * divider; - se->avg.load_sum = divider; - if (se_weight(se)) { - se->avg.load_sum = - div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se)); - } + se->avg.load_sum = se->avg.load_avg * divider; + if (se_weight(se) < se->avg.load_sum) + se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se)); + else + se->avg.load_sum = 1; enqueue_load_avg(cfs_rq, se); cfs_rq->avg.util_avg += se->avg.util_avg; @@ -4788,8 +4788,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data) cfs_rq->throttle_count--; if (!cfs_rq->throttle_count) { - cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - - cfs_rq->throttled_clock_task; + cfs_rq->throttled_clock_pelt_time += rq_clock_task_mult(rq) - + cfs_rq->throttled_clock_pelt; /* Add cfs_rq with already running entity in the list */ if (cfs_rq->nr_running >= 1) @@ -4806,7 +4806,7 @@ static int tg_throttle_down(struct task_group *tg, void *data) /* group is entering throttled state, stop time */ if (!cfs_rq->throttle_count) { - cfs_rq->throttled_clock_task = rq_clock_task(rq); + cfs_rq->throttled_clock_pelt = rq_clock_task_mult(rq); list_del_leaf_cfs_rq(cfs_rq); } cfs_rq->throttle_count++; @@ -5224,7 +5224,7 @@ static void sync_throttle(struct task_group *tg, int cpu) pcfs_rq = tg->parent->cfs_rq[cpu]; cfs_rq->throttle_count = pcfs_rq->throttle_count; - cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); + cfs_rq->throttled_clock_pelt = rq_clock_task_mult(cpu_rq(cpu)); } /* conditionally throttle active cfs_rq's from put_prev_entity() */ diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index ca5f99126e0c..acc544c43f10 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -547,3 +547,50 @@ int update_irq_load_avg(struct rq *rq, u64 running) return ret; } #endif + +#include +DEFINE_PER_CPU(u64, clock_task_mult); + +unsigned int sysctl_sched_pelt_multiplier = 1; +__read_mostly unsigned int sched_pelt_lshift; + +int sched_pelt_multiplier(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos) +{ + static DEFINE_MUTEX(mutex); + unsigned int old; + int ret; + + mutex_lock(&mutex); + + old = sysctl_sched_pelt_multiplier; + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (ret) + goto undo; + if (!write) + goto done; + + trace_android_vh_sched_pelt_multiplier(old, sysctl_sched_pelt_multiplier, &ret); + if (ret) + goto undo; + + switch (sysctl_sched_pelt_multiplier) { + case 1: + fallthrough; + case 2: + fallthrough; + case 4: + WRITE_ONCE(sched_pelt_lshift, + sysctl_sched_pelt_multiplier >> 1); + goto done; + default: + ret = -EINVAL; + } + +undo: + sysctl_sched_pelt_multiplier = old; +done: + mutex_unlock(&mutex); + + return ret; +} diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h index 45bf08e22207..0245e3e21f93 100644 --- a/kernel/sched/pelt.h +++ b/kernel/sched/pelt.h @@ -61,6 +61,8 @@ static inline void cfs_se_util_change(struct sched_avg *avg) WRITE_ONCE(avg->util_est.enqueued, enqueued); } +extern unsigned int sched_pelt_lshift; + /* * The clock_pelt scales the time to reflect the effective amount of * computation done during the running delta time but then sync back to @@ -75,9 +77,13 @@ static inline void cfs_se_util_change(struct sched_avg *avg) */ static inline void update_rq_clock_pelt(struct rq *rq, s64 delta) { + delta <<= READ_ONCE(sched_pelt_lshift); + + per_cpu(clock_task_mult, rq->cpu) += delta; + if (unlikely(is_idle_task(rq->curr))) { /* The rq is idle, we can sync to clock_task */ - rq->clock_pelt = rq_clock_task(rq); + rq->clock_pelt = rq_clock_task_mult(rq); return; } @@ -129,7 +135,8 @@ static inline void update_idle_rq_clock_pelt(struct rq *rq) * rq's clock_task. */ if (util_sum >= divider) - rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt; + rq->lost_idle_time += rq_clock_task_mult(rq) - + rq->clock_pelt; } static inline u64 rq_clock_pelt(struct rq *rq) @@ -145,9 +152,9 @@ static inline u64 rq_clock_pelt(struct rq *rq) static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { if (unlikely(cfs_rq->throttle_count)) - return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time; + return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time; - return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; + return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time; } #else static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index f46ac0f39777..e14917cd2a1d 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -1020,7 +1020,7 @@ void psi_cgroup_free(struct cgroup *cgroup) */ void cgroup_move_task(struct task_struct *task, struct css_set *to) { - unsigned int task_flags = 0; + unsigned int task_flags; struct rq_flags rf; struct rq *rq; @@ -1035,15 +1035,31 @@ void cgroup_move_task(struct task_struct *task, struct css_set *to) rq = task_rq_lock(task, &rf); - if (task_on_rq_queued(task)) { - task_flags = TSK_RUNNING; - if (task_current(rq, task)) - task_flags |= TSK_ONCPU; - } else if (task->in_iowait) - task_flags = TSK_IOWAIT; - - if (task->in_memstall) - task_flags |= TSK_MEMSTALL; + /* + * We may race with schedule() dropping the rq lock between + * deactivating prev and switching to next. Because the psi + * updates from the deactivation are deferred to the switch + * callback to save cgroup tree updates, the task's scheduling + * state here is not coherent with its psi state: + * + * schedule() cgroup_move_task() + * rq_lock() + * deactivate_task() + * p->on_rq = 0 + * psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates + * pick_next_task() + * rq_unlock() + * rq_lock() + * psi_task_change() // old cgroup + * task->cgroups = to + * psi_task_change() // new cgroup + * rq_unlock() + * rq_lock() + * psi_sched_switch() // does deferred updates in new cgroup + * + * Don't rely on the scheduling state. Use psi_flags instead. + */ + task_flags = task->psi_flags; if (task_flags) psi_task_change(task, task_flags, 0); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index f918bd866cc3..651f5782ed4d 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -439,7 +439,7 @@ static inline void rt_queue_push_tasks(struct rq *rq) #endif /* CONFIG_SMP */ static void enqueue_top_rt_rq(struct rt_rq *rt_rq); -static void dequeue_top_rt_rq(struct rt_rq *rt_rq); +static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count); static inline int on_rt_rq(struct sched_rt_entity *rt_se) { @@ -560,7 +560,7 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) rt_se = rt_rq->tg->rt_se[cpu]; if (!rt_se) { - dequeue_top_rt_rq(rt_rq); + dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running); /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ cpufreq_update_util(rq_of_rt_rq(rt_rq), 0); } @@ -646,7 +646,7 @@ static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) { - dequeue_top_rt_rq(rt_rq); + dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running); } static inline int rt_rq_throttled(struct rt_rq *rt_rq) @@ -1054,7 +1054,7 @@ static void update_curr_rt(struct rq *rq) } static void -dequeue_top_rt_rq(struct rt_rq *rt_rq) +dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count) { struct rq *rq = rq_of_rt_rq(rt_rq); @@ -1065,7 +1065,7 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq) BUG_ON(!rq->nr_running); - sub_nr_running(rq, rt_rq->rt_nr_running); + sub_nr_running(rq, count); rt_rq->rt_queued = 0; } @@ -1344,18 +1344,21 @@ static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flag static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags) { struct sched_rt_entity *back = NULL; + unsigned int rt_nr_running; for_each_sched_rt_entity(rt_se) { rt_se->back = back; back = rt_se; } - dequeue_top_rt_rq(rt_rq_of_se(back)); + rt_nr_running = rt_rq_of_se(back)->rt_nr_running; for (rt_se = back; rt_se; rt_se = rt_se->back) { if (on_rt_rq(rt_se)) __dequeue_rt_entity(rt_se, flags); } + + dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running); } static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index af46fcbae927..9e798c586486 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -350,9 +350,8 @@ extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); extern bool __checkparam_dl(const struct sched_attr *attr); extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); -extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); -extern bool dl_cpu_busy(unsigned int cpu); +extern int dl_cpu_busy(int cpu, struct task_struct *p); #ifdef CONFIG_CGROUP_SCHED @@ -610,8 +609,8 @@ struct cfs_rq { s64 runtime_remaining; u64 throttled_clock; - u64 throttled_clock_task; - u64 throttled_clock_task_time; + u64 throttled_clock_pelt; + u64 throttled_clock_pelt_time; int throttled; int throttle_count; struct list_head throttled_list; @@ -1194,6 +1193,23 @@ static inline u64 rq_clock_task(struct rq *rq) return rq->clock_task; } +#ifdef CONFIG_SMP +DECLARE_PER_CPU(u64, clock_task_mult); + +static inline u64 rq_clock_task_mult(struct rq *rq) +{ + lockdep_assert_held(&rq->lock); + assert_clock_updated(rq); + + return per_cpu(clock_task_mult, cpu_of(rq)); +} +#else +static inline u64 rq_clock_task_mult(struct rq *rq) +{ + return rq_clock_task(rq); +} +#endif + /** * By default the decay is the default pelt decay period. * The decay shift can change the decay period in diff --git a/kernel/signal.c b/kernel/signal.c index 6fff4a9788ac..f6ecd01311d6 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -987,7 +987,7 @@ static inline bool wants_signal(int sig, struct task_struct *p) if (task_is_stopped_or_traced(p)) return false; - return task_curr(p) || !signal_pending(p); + return task_curr(p) || !task_sigpending(p); } static void complete_signal(int sig, struct task_struct *p, enum pid_type type) @@ -1400,7 +1400,6 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, return sighand; } -EXPORT_SYMBOL_GPL(__lock_task_sighand); /* * send signal info to all the members of a group @@ -1925,12 +1924,12 @@ bool do_notify_parent(struct task_struct *tsk, int sig) bool autoreap = false; u64 utime, stime; - BUG_ON(sig == -1); + WARN_ON_ONCE(sig == -1); - /* do_notify_parent_cldstop should have been called instead. */ - BUG_ON(task_is_stopped_or_traced(tsk)); + /* do_notify_parent_cldstop should have been called instead. */ + WARN_ON_ONCE(task_is_stopped_or_traced(tsk)); - BUG_ON(!tsk->ptrace && + WARN_ON_ONCE(!tsk->ptrace && (tsk->group_leader != tsk || !thread_group_empty(tsk))); /* Wake up all pidfd waiters */ @@ -2553,6 +2552,21 @@ bool get_signal(struct ksignal *ksig) struct signal_struct *signal = current->signal; int signr; + if (unlikely(current->task_works)) + task_work_run(); + + /* + * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so + * that the arch handlers don't all have to do it. If we get here + * without TIF_SIGPENDING, just exit after running signal work. + */ + if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) { + if (test_thread_flag(TIF_NOTIFY_SIGNAL)) + tracehook_notify_signal(); + if (!task_sigpending(current)) + return false; + } + if (unlikely(uprobe_deny_signal())) return false; @@ -2565,26 +2579,6 @@ bool get_signal(struct ksignal *ksig) relock: spin_lock_irq(&sighand->siglock); - /* - * Make sure we can safely read ->jobctl() in task_work add. As Oleg - * states: - * - * It pairs with mb (implied by cmpxchg) before READ_ONCE. So we - * roughly have - * - * task_work_add: get_signal: - * STORE(task->task_works, new_work); STORE(task->jobctl); - * mb(); mb(); - * LOAD(task->jobctl); LOAD(task->task_works); - * - * and we can rely on STORE-MB-LOAD [ in task_work_add]. - */ - smp_store_mb(current->jobctl, current->jobctl & ~JOBCTL_TASK_WORK); - if (unlikely(current->task_works)) { - spin_unlock_irq(&sighand->siglock); - task_work_run(); - goto relock; - } /* * Every stopped thread goes here after wakeup. Check to see if @@ -2775,6 +2769,14 @@ relock: do_coredump(&ksig->info); } + /* + * PF_IO_WORKER threads will catch and exit on fatal signals + * themselves. They have cleanup that must be performed, so + * we cannot call do_exit() on their behalf. + */ + if (current->flags & PF_IO_WORKER) + goto out; + /* * Death signals, no core dump. */ @@ -2782,7 +2784,7 @@ relock: /* NOTREACHED */ } spin_unlock_irq(&sighand->siglock); - +out: ksig->sig = signr; if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS)) @@ -2850,7 +2852,7 @@ static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) /* Remove the signals this thread can handle. */ sigandsets(&retarget, &retarget, &t->blocked); - if (!signal_pending(t)) + if (!task_sigpending(t)) signal_wake_up(t, 0); if (sigisemptyset(&retarget)) @@ -2884,7 +2886,7 @@ void exit_signals(struct task_struct *tsk) cgroup_threadgroup_change_end(tsk); - if (!signal_pending(tsk)) + if (!task_sigpending(tsk)) goto out; unblocked = tsk->blocked; @@ -2928,7 +2930,7 @@ long do_no_restart_syscall(struct restart_block *param) static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) { - if (signal_pending(tsk) && !thread_group_empty(tsk)) { + if (task_sigpending(tsk) && !thread_group_empty(tsk)) { sigset_t newblocked; /* A set of now blocked but previously unblocked signals. */ sigandnsets(&newblocked, newset, ¤t->blocked); diff --git a/kernel/smp.c b/kernel/smp.c index d7033091bdfb..d5b26978940f 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -347,7 +347,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) /* There shouldn't be any pending callbacks on an offline CPU. */ if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && - !warned && !llist_empty(head))) { + !warned && entry != NULL)) { warned = true; WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 6b8203edf531..762218cf3a2a 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -268,6 +268,7 @@ COND_SYSCALL_COMPAT(keyctl); /* mm/fadvise.c */ COND_SYSCALL(fadvise64_64); +COND_SYSCALL_COMPAT(fadvise64_64); /* mm/, CONFIG_MMU only */ COND_SYSCALL(swapon); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 8e8e352824e4..5c8a7d6a07b9 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -394,13 +394,14 @@ int proc_dostring(struct ctl_table *table, int write, ppos); } -static size_t proc_skip_spaces(char **buf) +static void proc_skip_spaces(char **buf, size_t *size) { - size_t ret; - char *tmp = skip_spaces(*buf); - ret = tmp - *buf; - *buf = tmp; - return ret; + while (*size) { + if (!isspace(**buf)) + break; + (*size)--; + (*buf)++; + } } static void proc_skip_char(char **buf, size_t *size, const char v) @@ -469,13 +470,12 @@ static int proc_get_long(char **buf, size_t *size, unsigned long *val, bool *neg, const char *perm_tr, unsigned perm_tr_len, char *tr) { - int len; char *p, tmp[TMPBUFLEN]; + ssize_t len = *size; - if (!*size) + if (len <= 0) return -EINVAL; - len = *size; if (len > TMPBUFLEN - 1) len = TMPBUFLEN - 1; @@ -560,14 +560,14 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp, if (*negp) { if (*lvalp > (unsigned long) INT_MAX + 1) return -EINVAL; - *valp = -*lvalp; + WRITE_ONCE(*valp, -*lvalp); } else { if (*lvalp > (unsigned long) INT_MAX) return -EINVAL; - *valp = *lvalp; + WRITE_ONCE(*valp, *lvalp); } } else { - int val = *valp; + int val = READ_ONCE(*valp); if (val < 0) { *negp = true; *lvalp = -(unsigned long)val; @@ -586,9 +586,9 @@ static int do_proc_douintvec_conv(unsigned long *lvalp, if (write) { if (*lvalp > UINT_MAX) return -EINVAL; - *valp = *lvalp; + WRITE_ONCE(*valp, *lvalp); } else { - unsigned int val = *valp; + unsigned int val = READ_ONCE(*valp); *lvalp = (unsigned long)val; } return 0; @@ -633,7 +633,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, bool neg; if (write) { - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (!left) break; @@ -660,7 +660,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, if (!write && !first && left && !err) proc_put_char(&buffer, &left, '\n'); if (write && !err && left) - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (write && first) return err ? : -EINVAL; *lenp -= left; @@ -702,7 +702,7 @@ static int do_proc_douintvec_w(unsigned int *tbl_data, if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1; - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (!left) { err = -EINVAL; goto out_free; @@ -722,7 +722,7 @@ static int do_proc_douintvec_w(unsigned int *tbl_data, } if (!err && left) - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); out_free: if (err) @@ -962,7 +962,7 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp, if ((param->min && *param->min > tmp) || (param->max && *param->max < tmp)) return -EINVAL; - *valp = tmp; + WRITE_ONCE(*valp, tmp); } return 0; @@ -1028,7 +1028,7 @@ static int do_proc_douintvec_minmax_conv(unsigned long *lvalp, (param->max && *param->max < tmp)) return -ERANGE; - *valp = tmp; + WRITE_ONCE(*valp, tmp); } return 0; @@ -1180,7 +1180,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, if (write) { bool neg; - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (!left) break; @@ -1196,9 +1196,9 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, err = -EINVAL; break; } - *i = val; + WRITE_ONCE(*i, val); } else { - val = convdiv * (*i) / convmul; + val = convdiv * READ_ONCE(*i) / convmul; if (!first) proc_put_char(&buffer, &left, '\t'); proc_put_long(&buffer, &left, val, false); @@ -1208,7 +1208,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, if (!write && !first && left && !err) proc_put_char(&buffer, &left, '\n'); if (write && !err) - left -= proc_skip_spaces(&p); + proc_skip_spaces(&p, &left); if (write && first) return err ? : -EINVAL; *lenp -= left; @@ -1279,9 +1279,12 @@ static int do_proc_dointvec_jiffies_conv(bool *negp, unsigned long *lvalp, if (write) { if (*lvalp > INT_MAX / HZ) return 1; - *valp = *negp ? -(*lvalp*HZ) : (*lvalp*HZ); + if (*negp) + WRITE_ONCE(*valp, -*lvalp * HZ); + else + WRITE_ONCE(*valp, *lvalp * HZ); } else { - int val = *valp; + int val = READ_ONCE(*valp); unsigned long lval; if (val < 0) { *negp = true; @@ -1327,9 +1330,9 @@ static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp, if (jif > INT_MAX) return 1; - *valp = (int)jif; + WRITE_ONCE(*valp, (int)jif); } else { - int val = *valp; + int val = READ_ONCE(*valp); unsigned long lval; if (val < 0) { *negp = true; @@ -1397,8 +1400,8 @@ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, * @ppos: the current position in the file * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer - * values from/to the user buffer, treated as an ASCII string. - * The values read are assumed to be in 1/1000 seconds, and + * values from/to the user buffer, treated as an ASCII string. + * The values read are assumed to be in 1/1000 seconds, and * are converted into jiffies. * * Returns 0 on success. @@ -1836,6 +1839,15 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = sched_rr_handler, }, +#ifdef CONFIG_SMP + { + .procname = "sched_pelt_multiplier", + .data = &sysctl_sched_pelt_multiplier, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_pelt_multiplier, + }, +#endif #ifdef CONFIG_UCLAMP_TASK { .procname = "sched_util_clamp_min", @@ -2821,6 +2833,17 @@ static struct ctl_table vm_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = &two_hundred, }, +#ifdef CONFIG_NUMA + { + .procname = "numa_stat", + .data = &sysctl_vm_numa_stat, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = sysctl_vm_numa_stat_handler, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +#endif #ifdef CONFIG_HUGETLB_PAGE { .procname = "nr_hugepages", @@ -2837,15 +2860,6 @@ static struct ctl_table vm_table[] = { .mode = 0644, .proc_handler = &hugetlb_mempolicy_sysctl_handler, }, - { - .procname = "numa_stat", - .data = &sysctl_vm_numa_stat, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = sysctl_vm_numa_stat_handler, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, - }, #endif { .procname = "hugetlb_shm_group", diff --git a/kernel/task_work.c b/kernel/task_work.c index e3a8e5c66ae5..1698fbe6f0e1 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c @@ -33,7 +33,6 @@ int task_work_add(struct task_struct *task, struct callback_head *work, enum task_work_notify_mode notify) { struct callback_head *head; - unsigned long flags; /* record the work call stack in order to print it in KASAN reports */ kasan_record_aux_stack(work); @@ -52,17 +51,7 @@ int task_work_add(struct task_struct *task, struct callback_head *work, set_notify_resume(task); break; case TWA_SIGNAL: - /* - * Only grab the sighand lock if we don't already have some - * task_work pending. This pairs with the smp_store_mb() - * in get_signal(), see comment there. - */ - if (!(READ_ONCE(task->jobctl) & JOBCTL_TASK_WORK) && - lock_task_sighand(task, &flags)) { - task->jobctl |= JOBCTL_TASK_WORK; - signal_wake_up(task, 0); - unlock_task_sighand(task, &flags); - } + set_notify_signal(task); break; default: WARN_ON_ONCE(1); @@ -72,6 +61,48 @@ int task_work_add(struct task_struct *task, struct callback_head *work, return 0; } +/** + * task_work_cancel_match - cancel a pending work added by task_work_add() + * @task: the task which should execute the work + * @match: match function to call + * + * RETURNS: + * The found work or NULL if not found. + */ +struct callback_head * +task_work_cancel_match(struct task_struct *task, + bool (*match)(struct callback_head *, void *data), + void *data) +{ + struct callback_head **pprev = &task->task_works; + struct callback_head *work; + unsigned long flags; + + if (likely(!task->task_works)) + return NULL; + /* + * If cmpxchg() fails we continue without updating pprev. + * Either we raced with task_work_add() which added the + * new entry before this work, we will find it again. Or + * we raced with task_work_run(), *pprev == NULL/exited. + */ + raw_spin_lock_irqsave(&task->pi_lock, flags); + while ((work = READ_ONCE(*pprev))) { + if (!match(work, data)) + pprev = &work->next; + else if (cmpxchg(pprev, work, work->next) == work) + break; + } + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + + return work; +} + +static bool task_work_func_match(struct callback_head *cb, void *data) +{ + return cb->func == data; +} + /** * task_work_cancel - cancel a pending work added by task_work_add() * @task: the task which should execute the work @@ -86,28 +117,7 @@ int task_work_add(struct task_struct *task, struct callback_head *work, struct callback_head * task_work_cancel(struct task_struct *task, task_work_func_t func) { - struct callback_head **pprev = &task->task_works; - struct callback_head *work; - unsigned long flags; - - if (likely(!task->task_works)) - return NULL; - /* - * If cmpxchg() fails we continue without updating pprev. - * Either we raced with task_work_add() which added the - * new entry before this work, we will find it again. Or - * we raced with task_work_run(), *pprev == NULL/exited. - */ - raw_spin_lock_irqsave(&task->pi_lock, flags); - while ((work = READ_ONCE(*pprev))) { - if (work->func != func) - pprev = &work->next; - else if (cmpxchg(pprev, work, work->next) == work) - break; - } - raw_spin_unlock_irqrestore(&task->pi_lock, flags); - - return work; + return task_work_cancel_match(task, task_work_func_match, func); } /** diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 4ef90718c114..544ce87ba38a 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -2209,6 +2209,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, return !t.task ? 0 : -EINTR; } +EXPORT_SYMBOL_GPL(schedule_hrtimeout_range_clock); /** * schedule_hrtimeout_range - sleep until timeout diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index dd5697d7347b..b624788023d8 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -1051,15 +1051,24 @@ retry_delete: } /* - * This is called by do_exit or de_thread, only when there are no more - * references to the shared signal_struct. + * This is called by do_exit or de_thread, only when nobody else can + * modify the signal->posix_timers list. Yet we need sighand->siglock + * to prevent the race with /proc/pid/timers. */ -void exit_itimers(struct signal_struct *sig) +void exit_itimers(struct task_struct *tsk) { + struct list_head timers; struct k_itimer *tmr; - while (!list_empty(&sig->posix_timers)) { - tmr = list_entry(sig->posix_timers.next, struct k_itimer, list); + if (list_empty(&tsk->signal->posix_timers)) + return; + + spin_lock_irq(&tsk->sighand->siglock); + list_replace_init(&tsk->signal->posix_timers, &timers); + spin_unlock_irq(&tsk->sighand->siglock); + + while (!list_empty(&timers)) { + tmr = list_first_entry(&timers, struct k_itimer, list); itimer_delete(tmr); } } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 9895cee4f797..07969fa0ab2e 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -137,7 +137,7 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) */ if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) { #ifdef CONFIG_NO_HZ_FULL - WARN_ON(tick_nohz_full_running); + WARN_ON_ONCE(tick_nohz_full_running); #endif tick_do_timer_cpu = cpu; } @@ -428,7 +428,6 @@ void __init tick_nohz_full_setup(cpumask_var_t cpumask) cpumask_copy(tick_nohz_full_mask, cpumask); tick_nohz_full_running = true; } -EXPORT_SYMBOL_GPL(tick_nohz_full_setup); static int tick_nohz_cpu_down(unsigned int cpu) { diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index cc4dc2857a87..d9b48f7a35e0 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -17,11 +17,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include "tick-internal.h" #include "ntp_internal.h" @@ -1329,8 +1331,10 @@ out: /* signal hrtimers about time change */ clock_was_set(); - if (!ret) + if (!ret) { audit_tk_injoffset(ts_delta); + add_device_randomness(ts, sizeof(*ts)); + } return ret; } @@ -2378,6 +2382,20 @@ static int timekeeping_validate_timex(const struct __kernel_timex *txc) return 0; } +/** + * random_get_entropy_fallback - Returns the raw clock source value, + * used by random.c for platforms with no valid random_get_entropy(). + */ +unsigned long random_get_entropy_fallback(void) +{ + struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono; + struct clocksource *clock = READ_ONCE(tkr->clock); + + if (unlikely(timekeeping_suspended || !clock)) + return 0; + return clock->read(clock); +} +EXPORT_SYMBOL_GPL(random_get_entropy_fallback); /** * do_adjtimex() - Accessor function to NTP __do_adjtimex function @@ -2395,6 +2413,7 @@ int do_adjtimex(struct __kernel_timex *txc) ret = timekeeping_validate_timex(txc); if (ret) return ret; + add_device_randomness(txc, sizeof(*txc)); if (txc->modes & ADJ_SETOFFSET) { struct timespec64 delta; @@ -2412,6 +2431,7 @@ int do_adjtimex(struct __kernel_timex *txc) audit_ntp_init(&ad); ktime_get_real_ts64(&ts); + add_device_randomness(&ts, sizeof(ts)); raw_spin_lock_irqsave(&timekeeper_lock, flags); write_seqcount_begin(&tk_core.seq); diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 1535065a7d76..f5147bde3ed0 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1744,11 +1744,14 @@ static inline void __run_timers(struct timer_base *base) time_after_eq(jiffies, base->next_expiry)) { levels = collect_expired_timers(base, heads); /* - * The only possible reason for not finding any expired - * timer at this clk is that all matching timers have been - * dequeued. + * The two possible reasons for not finding any expired + * timer at this clk are that all matching timers have been + * dequeued or no timer has been queued since + * base::next_expiry was set to base::clk + + * NEXT_TIMER_MAX_DELTA. */ - WARN_ON_ONCE(!levels && !base->next_expiry_recalc); + WARN_ON_ONCE(!levels && !base->next_expiry_recalc + && base->timers_pending); base->clk++; base->next_expiry = __next_timer_interrupt(base); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4a5d35dc490b..d97c189695cb 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1295,6 +1295,7 @@ static int ftrace_add_mod(struct trace_array *tr, if (!ftrace_mod) return -ENOMEM; + INIT_LIST_HEAD(&ftrace_mod->list); ftrace_mod->func = kstrdup(func, GFP_KERNEL); ftrace_mod->module = kstrdup(module, GFP_KERNEL); ftrace_mod->enable = enable; @@ -2899,6 +2900,16 @@ int ftrace_startup(struct ftrace_ops *ops, int command) ftrace_startup_enable(command); + /* + * If ftrace is in an undefined state, we just remove ops from list + * to prevent the NULL pointer, instead of totally rolling it back and + * free trampoline, because those actions could cause further damage. + */ + if (unlikely(ftrace_disabled)) { + __unregister_ftrace_function(ops); + return -ENODEV; + } + ops->flags &= ~FTRACE_OPS_FL_ADDING; return 0; @@ -2936,18 +2947,8 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command) command |= FTRACE_UPDATE_TRACE_FUNC; } - if (!command || !ftrace_enabled) { - /* - * If these are dynamic or per_cpu ops, they still - * need their data freed. Since, function tracing is - * not currently active, we can just free them - * without synchronizing all CPUs. - */ - if (ops->flags & FTRACE_OPS_FL_DYNAMIC) - goto free_ops; - - return 0; - } + if (!command || !ftrace_enabled) + goto out; /* * If the ops uses a trampoline, then it needs to be @@ -2984,6 +2985,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command) removed_ops = NULL; ops->flags &= ~FTRACE_OPS_FL_REMOVING; +out: /* * Dynamic ops may be freed, we must make sure that all * callers are done before leaving this function. @@ -3011,7 +3013,6 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command) if (IS_ENABLED(CONFIG_PREEMPTION)) synchronize_rcu_tasks(); - free_ops: ftrace_trampoline_free(ops); } @@ -3178,7 +3179,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count) /* if we can't allocate this size, try something smaller */ if (!order) return -ENOMEM; - order >>= 1; + order--; goto again; } @@ -4427,7 +4428,7 @@ int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, * @ip: The instruction pointer address to remove the data from * * Returns the data if it is found, otherwise NULL. - * Note, if the data pointer is used as the data itself, (see + * Note, if the data pointer is used as the data itself, (see * ftrace_func_mapper_find_ip(), then the return value may be meaningless, * if the data pointer was set to zero. */ @@ -5153,8 +5154,6 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr) __add_hash_entry(direct_functions, entry); ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0); - if (ret) - remove_hash_entry(direct_functions, entry); if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) { ret = register_ftrace_function(&direct_ops); @@ -5163,6 +5162,7 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr) } if (ret) { + remove_hash_entry(direct_functions, entry); kfree(entry); if (!direct->count) { list_del_rcu(&direct->next); @@ -5653,8 +5653,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file) if (filter_hash) { orig_hash = &iter->ops->func_hash->filter_hash; - if (iter->tr && !list_empty(&iter->tr->mod_trace)) - iter->hash->flags |= FTRACE_HASH_FL_MOD; + if (iter->tr) { + if (list_empty(&iter->tr->mod_trace)) + iter->hash->flags &= ~FTRACE_HASH_FL_MOD; + else + iter->hash->flags |= FTRACE_HASH_FL_MOD; + } } else orig_hash = &iter->ops->func_hash->notrace_hash; @@ -6874,7 +6878,7 @@ void __init ftrace_init(void) } pr_info("ftrace: allocating %ld entries in %ld pages\n", - count, count / ENTRIES_PER_PAGE + 1); + count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); last_ftrace_enabled = ftrace_enabled = 1; diff --git a/kernel/trace/kprobe_event_gen_test.c b/kernel/trace/kprobe_event_gen_test.c index 18b0f1cbb947..c736487fc0e4 100644 --- a/kernel/trace/kprobe_event_gen_test.c +++ b/kernel/trace/kprobe_event_gen_test.c @@ -35,6 +35,49 @@ static struct trace_event_file *gen_kprobe_test; static struct trace_event_file *gen_kretprobe_test; +#define KPROBE_GEN_TEST_FUNC "do_sys_open" + +/* X86 */ +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_32) +#define KPROBE_GEN_TEST_ARG0 "dfd=%ax" +#define KPROBE_GEN_TEST_ARG1 "filename=%dx" +#define KPROBE_GEN_TEST_ARG2 "flags=%cx" +#define KPROBE_GEN_TEST_ARG3 "mode=+4($stack)" + +/* ARM64 */ +#elif defined(CONFIG_ARM64) +#define KPROBE_GEN_TEST_ARG0 "dfd=%x0" +#define KPROBE_GEN_TEST_ARG1 "filename=%x1" +#define KPROBE_GEN_TEST_ARG2 "flags=%x2" +#define KPROBE_GEN_TEST_ARG3 "mode=%x3" + +/* ARM */ +#elif defined(CONFIG_ARM) +#define KPROBE_GEN_TEST_ARG0 "dfd=%r0" +#define KPROBE_GEN_TEST_ARG1 "filename=%r1" +#define KPROBE_GEN_TEST_ARG2 "flags=%r2" +#define KPROBE_GEN_TEST_ARG3 "mode=%r3" + +/* RISCV */ +#elif defined(CONFIG_RISCV) +#define KPROBE_GEN_TEST_ARG0 "dfd=%a0" +#define KPROBE_GEN_TEST_ARG1 "filename=%a1" +#define KPROBE_GEN_TEST_ARG2 "flags=%a2" +#define KPROBE_GEN_TEST_ARG3 "mode=%a3" + +/* others */ +#else +#define KPROBE_GEN_TEST_ARG0 NULL +#define KPROBE_GEN_TEST_ARG1 NULL +#define KPROBE_GEN_TEST_ARG2 NULL +#define KPROBE_GEN_TEST_ARG3 NULL +#endif + +static bool trace_event_file_is_valid(struct trace_event_file *input) +{ + return input && !IS_ERR(input); +} + /* * Test to make sure we can create a kprobe event, then add more * fields. @@ -58,23 +101,23 @@ static int __init test_gen_kprobe_cmd(void) * fields. */ ret = kprobe_event_gen_cmd_start(&cmd, "gen_kprobe_test", - "do_sys_open", - "dfd=%ax", "filename=%dx"); + KPROBE_GEN_TEST_FUNC, + KPROBE_GEN_TEST_ARG0, KPROBE_GEN_TEST_ARG1); if (ret) - goto free; + goto out; /* Use kprobe_event_add_fields to add the rest of the fields */ - ret = kprobe_event_add_fields(&cmd, "flags=%cx", "mode=+4($stack)"); + ret = kprobe_event_add_fields(&cmd, KPROBE_GEN_TEST_ARG2, KPROBE_GEN_TEST_ARG3); if (ret) - goto free; + goto out; /* * This actually creates the event. */ ret = kprobe_event_gen_cmd_end(&cmd); if (ret) - goto free; + goto out; /* * Now get the gen_kprobe_test event file. We need to prevent @@ -97,13 +140,13 @@ static int __init test_gen_kprobe_cmd(void) goto delete; } out: + kfree(buf); return ret; delete: + if (trace_event_file_is_valid(gen_kprobe_test)) + gen_kprobe_test = NULL; /* We got an error after creating the event, delete it */ ret = kprobe_event_delete("gen_kprobe_test"); - free: - kfree(buf); - goto out; } @@ -128,17 +171,17 @@ static int __init test_gen_kretprobe_cmd(void) * Define the kretprobe event. */ ret = kretprobe_event_gen_cmd_start(&cmd, "gen_kretprobe_test", - "do_sys_open", + KPROBE_GEN_TEST_FUNC, "$retval"); if (ret) - goto free; + goto out; /* * This actually creates the event. */ ret = kretprobe_event_gen_cmd_end(&cmd); if (ret) - goto free; + goto out; /* * Now get the gen_kretprobe_test event file. We need to @@ -162,13 +205,13 @@ static int __init test_gen_kretprobe_cmd(void) goto delete; } out: + kfree(buf); return ret; delete: + if (trace_event_file_is_valid(gen_kretprobe_test)) + gen_kretprobe_test = NULL; /* We got an error after creating the event, delete it */ ret = kprobe_event_delete("gen_kretprobe_test"); - free: - kfree(buf); - goto out; } @@ -182,10 +225,12 @@ static int __init kprobe_event_gen_test_init(void) ret = test_gen_kretprobe_cmd(); if (ret) { - WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr, - "kprobes", - "gen_kretprobe_test", false)); - trace_put_event_file(gen_kretprobe_test); + if (trace_event_file_is_valid(gen_kretprobe_test)) { + WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr, + "kprobes", + "gen_kretprobe_test", false)); + trace_put_event_file(gen_kretprobe_test); + } WARN_ON(kprobe_event_delete("gen_kretprobe_test")); } @@ -194,24 +239,30 @@ static int __init kprobe_event_gen_test_init(void) static void __exit kprobe_event_gen_test_exit(void) { - /* Disable the event or you can't remove it */ - WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr, - "kprobes", - "gen_kprobe_test", false)); + if (trace_event_file_is_valid(gen_kprobe_test)) { + /* Disable the event or you can't remove it */ + WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr, + "kprobes", + "gen_kprobe_test", false)); + + /* Now give the file and instance back */ + trace_put_event_file(gen_kprobe_test); + } - /* Now give the file and instance back */ - trace_put_event_file(gen_kprobe_test); /* Now unregister and free the event */ WARN_ON(kprobe_event_delete("gen_kprobe_test")); - /* Disable the event or you can't remove it */ - WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr, - "kprobes", - "gen_kretprobe_test", false)); + if (trace_event_file_is_valid(gen_kretprobe_test)) { + /* Disable the event or you can't remove it */ + WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr, + "kprobes", + "gen_kretprobe_test", false)); + + /* Now give the file and instance back */ + trace_put_event_file(gen_kretprobe_test); + } - /* Now give the file and instance back */ - trace_put_event_file(gen_kretprobe_test); /* Now unregister and free the event */ WARN_ON(kprobe_event_delete("gen_kretprobe_test")); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 6deac666ba3e..49ebb8c66268 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -414,6 +414,7 @@ struct rb_irq_work { struct irq_work work; wait_queue_head_t waiters; wait_queue_head_t full_waiters; + long wait_index; bool waiters_pending; bool full_waiters_pending; bool wakeup_full; @@ -516,6 +517,7 @@ struct ring_buffer_per_cpu { local_t committing; local_t commits; local_t pages_touched; + local_t pages_lost; local_t pages_read; long last_pages_touch; size_t shortest_full; @@ -770,10 +772,18 @@ size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) { size_t read; + size_t lost; size_t cnt; read = local_read(&buffer->buffers[cpu]->pages_read); + lost = local_read(&buffer->buffers[cpu]->pages_lost); cnt = local_read(&buffer->buffers[cpu]->pages_touched); + + if (WARN_ON_ONCE(cnt < lost)) + return 0; + + cnt -= lost; + /* The reader can read an empty page, but not more than that */ if (cnt < read) { WARN_ON_ONCE(read > cnt + 1); @@ -783,6 +793,21 @@ size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) return cnt - read; } +static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) +{ + struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; + size_t nr_pages; + size_t dirty; + + nr_pages = cpu_buffer->nr_pages; + if (!nr_pages || !full) + return true; + + dirty = ring_buffer_nr_dirty_pages(buffer, cpu); + + return (dirty * 100) > (full * nr_pages); +} + /* * rb_wake_up_waiters - wake up tasks waiting for ring buffer input * @@ -794,12 +819,44 @@ static void rb_wake_up_waiters(struct irq_work *work) struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); wake_up_all(&rbwork->waiters); - if (rbwork->wakeup_full) { + if (rbwork->full_waiters_pending || rbwork->wakeup_full) { rbwork->wakeup_full = false; + rbwork->full_waiters_pending = false; wake_up_all(&rbwork->full_waiters); } } +/** + * ring_buffer_wake_waiters - wake up any waiters on this ring buffer + * @buffer: The ring buffer to wake waiters on + * + * In the case of a file that represents a ring buffer is closing, + * it is prudent to wake up any waiters that are on this. + */ +void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) +{ + struct ring_buffer_per_cpu *cpu_buffer; + struct rb_irq_work *rbwork; + + if (cpu == RING_BUFFER_ALL_CPUS) { + + /* Wake up individual ones too. One level recursion */ + for_each_buffer_cpu(buffer, cpu) + ring_buffer_wake_waiters(buffer, cpu); + + rbwork = &buffer->irq_work; + } else { + cpu_buffer = buffer->buffers[cpu]; + rbwork = &cpu_buffer->irq_work; + } + + rbwork->wait_index++; + /* make sure the waiters see the new index */ + smp_wmb(); + + rb_wake_up_waiters(&rbwork->work); +} + /** * ring_buffer_wait - wait for input to the ring buffer * @buffer: buffer to wait on @@ -815,6 +872,7 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) struct ring_buffer_per_cpu *cpu_buffer; DEFINE_WAIT(wait); struct rb_irq_work *work; + long wait_index; int ret = 0; /* @@ -833,6 +891,7 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) work = &cpu_buffer->irq_work; } + wait_index = READ_ONCE(work->wait_index); while (true) { if (full) @@ -877,26 +936,29 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) !ring_buffer_empty_cpu(buffer, cpu)) { unsigned long flags; bool pagebusy; - size_t nr_pages; - size_t dirty; + bool done; if (!full) break; raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; - nr_pages = cpu_buffer->nr_pages; - dirty = ring_buffer_nr_dirty_pages(buffer, cpu); + done = !pagebusy && full_hit(buffer, cpu, full); + if (!cpu_buffer->shortest_full || - cpu_buffer->shortest_full < full) + cpu_buffer->shortest_full > full) cpu_buffer->shortest_full = full; raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); - if (!pagebusy && - (!nr_pages || (dirty * 100) > full * nr_pages)) + if (done) break; } schedule(); + + /* Make sure to see the new wait index */ + smp_rmb(); + if (wait_index != work->wait_index) + break; } if (full) @@ -913,6 +975,7 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) * @cpu: the cpu buffer to wait on * @filp: the file descriptor * @poll_table: The poll descriptor + * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS * * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon * as data is added to any of the @buffer's cpu buffers. Otherwise @@ -922,14 +985,15 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) * zero otherwise. */ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, - struct file *filp, poll_table *poll_table) + struct file *filp, poll_table *poll_table, int full) { struct ring_buffer_per_cpu *cpu_buffer; struct rb_irq_work *work; - if (cpu == RING_BUFFER_ALL_CPUS) + if (cpu == RING_BUFFER_ALL_CPUS) { work = &buffer->irq_work; - else { + full = 0; + } else { if (!cpumask_test_cpu(cpu, buffer->cpumask)) return -EINVAL; @@ -937,8 +1001,14 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, work = &cpu_buffer->irq_work; } - poll_wait(filp, &work->waiters, poll_table); - work->waiters_pending = true; + if (full) { + poll_wait(filp, &work->full_waiters, poll_table); + work->full_waiters_pending = true; + } else { + poll_wait(filp, &work->waiters, poll_table); + work->waiters_pending = true; + } + /* * There's a tight race between setting the waiters_pending and * checking if the ring buffer is empty. Once the waiters_pending bit @@ -954,6 +1024,9 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, */ smp_mb(); + if (full) + return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; + if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) return EPOLLIN | EPOLLRDNORM; @@ -1595,9 +1668,9 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) free_buffer_page(cpu_buffer->reader_page); - rb_head_page_deactivate(cpu_buffer); - if (head) { + rb_head_page_deactivate(cpu_buffer); + list_for_each_entry_safe(bpage, tmp, head, list) { list_del_init(&bpage->list); free_buffer_page(bpage); @@ -1833,6 +1906,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) */ local_add(page_entries, &cpu_buffer->overrun); local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); + local_inc(&cpu_buffer->pages_lost); } /* @@ -2323,6 +2397,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, */ local_add(entries, &cpu_buffer->overrun); local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); + local_inc(&cpu_buffer->pages_lost); /* * The entries will be zeroed out when we move the @@ -2491,6 +2566,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, /* Mark the rest of the page with padding */ rb_event_set_padding(event); + /* Make sure the padding is visible before the write update */ + smp_wmb(); + /* Set the write back to the previous setting */ local_sub(length, &tail_page->write); return; @@ -2502,6 +2580,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, /* time delta must be non zero */ event->time_delta = 1; + /* Make sure the padding is visible before the tail_page->write update */ + smp_wmb(); + /* Set write to end of buffer */ length = (tail + length) - BUF_PAGE_SIZE; local_sub(length, &tail_page->write); @@ -2987,10 +3068,6 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, static __always_inline void rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) { - size_t nr_pages; - size_t dirty; - size_t full; - if (buffer->irq_work.waiters_pending) { buffer->irq_work.waiters_pending = false; /* irq_work_queue() supplies it's own memory barriers */ @@ -3014,10 +3091,7 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); - full = cpu_buffer->shortest_full; - nr_pages = cpu_buffer->nr_pages; - dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu); - if (full && nr_pages && (dirty * 100) <= full * nr_pages) + if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) return; cpu_buffer->irq_work.wakeup_full = true; @@ -4316,6 +4390,33 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) arch_spin_unlock(&cpu_buffer->lock); local_irq_restore(flags); + /* + * The writer has preempt disable, wait for it. But not forever + * Although, 1 second is pretty much "forever" + */ +#define USECS_WAIT 1000000 + for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) { + /* If the write is past the end of page, a writer is still updating it */ + if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE)) + break; + + udelay(1); + + /* Get the latest version of the reader write value */ + smp_rmb(); + } + + /* The writer is not moving forward? Something is wrong */ + if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) + reader = NULL; + + /* + * Make sure we see any padding after the write update + * (see rb_reset_tail()) + */ + smp_rmb(); + + return reader; } @@ -4891,6 +4992,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) local_set(&cpu_buffer->committing, 0); local_set(&cpu_buffer->commits, 0); local_set(&cpu_buffer->pages_touched, 0); + local_set(&cpu_buffer->pages_lost, 0); local_set(&cpu_buffer->pages_read, 0); cpu_buffer->last_pages_touch = 0; cpu_buffer->shortest_full = 0; @@ -5341,7 +5443,15 @@ int ring_buffer_read_page(struct trace_buffer *buffer, unsigned int pos = 0; unsigned int size; - if (full) + /* + * If a full page is expected, this can still be returned + * if there's been a previous partial read and the + * rest of the page can be read and the commit page is off + * the reader page. + */ + if (full && + (!read || (len < (commit - read)) || + cpu_buffer->reader_page == cpu_buffer->commit_page)) goto out_unlock; if (len > (commit - read)) diff --git a/kernel/trace/synth_event_gen_test.c b/kernel/trace/synth_event_gen_test.c index edd912cd14aa..a6a2813afb87 100644 --- a/kernel/trace/synth_event_gen_test.c +++ b/kernel/trace/synth_event_gen_test.c @@ -120,15 +120,13 @@ static int __init test_gen_synth_cmd(void) /* Now generate a gen_synth_test event */ ret = synth_event_trace_array(gen_synth_test, vals, ARRAY_SIZE(vals)); - out: + free: + kfree(buf); return ret; delete: /* We got an error after creating the event, delete it */ synth_event_delete("gen_synth_test"); - free: - kfree(buf); - - goto out; + goto free; } /* @@ -227,15 +225,13 @@ static int __init test_empty_synth_event(void) /* Now trace an empty_synth_test event */ ret = synth_event_trace_array(empty_synth_test, vals, ARRAY_SIZE(vals)); - out: + free: + kfree(buf); return ret; delete: /* We got an error after creating the event, delete it */ synth_event_delete("empty_synth_test"); - free: - kfree(buf); - - goto out; + goto free; } static struct synth_field_desc create_synth_test_fields[] = { diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 2509e3a3f083..8b1f74eb985c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1198,12 +1198,14 @@ void *tracing_cond_snapshot_data(struct trace_array *tr) { void *cond_data = NULL; + local_irq_disable(); arch_spin_lock(&tr->max_lock); if (tr->cond_snapshot) cond_data = tr->cond_snapshot->cond_data; arch_spin_unlock(&tr->max_lock); + local_irq_enable(); return cond_data; } @@ -1339,9 +1341,11 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, goto fail_unlock; } + local_irq_disable(); arch_spin_lock(&tr->max_lock); tr->cond_snapshot = cond_snapshot; arch_spin_unlock(&tr->max_lock); + local_irq_enable(); mutex_unlock(&trace_types_lock); @@ -1368,6 +1372,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr) { int ret = 0; + local_irq_disable(); arch_spin_lock(&tr->max_lock); if (!tr->cond_snapshot) @@ -1378,6 +1383,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr) } arch_spin_unlock(&tr->max_lock); + local_irq_enable(); return ret; } @@ -2199,6 +2205,11 @@ static size_t tgid_map_max; #define SAVED_CMDLINES_DEFAULT 128 #define NO_CMDLINE_MAP UINT_MAX +/* + * Preemption must be disabled before acquiring trace_cmdline_lock. + * The various trace_arrays' max_lock must be acquired in a context + * where interrupt is disabled. + */ static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; struct saved_cmdlines_buffer { unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; @@ -2411,7 +2422,11 @@ static int trace_save_cmdline(struct task_struct *tsk) * the lock, but we also don't want to spin * nor do we want to disable interrupts, * so if we miss here, then better luck next time. + * + * This is called within the scheduler and wake up, so interrupts + * had better been disabled and run queue lock been held. */ + lockdep_assert_preemption_disabled(); if (!arch_spin_trylock(&trace_cmdline_lock)) return 0; @@ -2785,7 +2800,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, } EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); -static DEFINE_SPINLOCK(tracepoint_iter_lock); +static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock); static DEFINE_MUTEX(tracepoint_printk_mutex); static void output_printk(struct trace_event_buffer *fbuffer) @@ -2813,14 +2828,14 @@ static void output_printk(struct trace_event_buffer *fbuffer) event = &fbuffer->trace_file->event_call->event; - spin_lock_irqsave(&tracepoint_iter_lock, flags); + raw_spin_lock_irqsave(&tracepoint_iter_lock, flags); trace_seq_init(&iter->seq); iter->ent = fbuffer->entry; event_call->event.funcs->trace(iter, 0, event); trace_seq_putc(&iter->seq, 0); printk("%s", iter->seq.buffer); - spin_unlock_irqrestore(&tracepoint_iter_lock, flags); + raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags); } int tracepoint_printk_sysctl(struct ctl_table *table, int write, @@ -5471,9 +5486,11 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf, char buf[64]; int r; + preempt_disable(); arch_spin_lock(&trace_cmdline_lock); r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); arch_spin_unlock(&trace_cmdline_lock); + preempt_enable(); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } @@ -5498,10 +5515,12 @@ static int tracing_resize_saved_cmdlines(unsigned int val) return -ENOMEM; } + preempt_disable(); arch_spin_lock(&trace_cmdline_lock); savedcmd_temp = savedcmd; savedcmd = s; arch_spin_unlock(&trace_cmdline_lock); + preempt_enable(); free_saved_cmdlines_buffer(savedcmd_temp); return 0; @@ -5908,12 +5927,18 @@ static void tracing_set_nop(struct trace_array *tr) tr->current_trace = &nop_trace; } +static bool tracer_options_updated; + static void add_tracer_options(struct trace_array *tr, struct tracer *t) { /* Only enable if the directory has been created already. */ if (!tr->dir) return; + /* Only create trace option files after update_tracer_options finish */ + if (!tracer_options_updated) + return; + create_trace_option_files(tr, t); } @@ -5948,10 +5973,12 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) #ifdef CONFIG_TRACER_SNAPSHOT if (t->use_max_tr) { + local_irq_disable(); arch_spin_lock(&tr->max_lock); if (tr->cond_snapshot) ret = -EBUSY; arch_spin_unlock(&tr->max_lock); + local_irq_enable(); if (ret) goto out; } @@ -5982,12 +6009,12 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) if (tr->current_trace->reset) tr->current_trace->reset(tr); +#ifdef CONFIG_TRACER_MAX_TRACE + had_max_tr = tr->current_trace->use_max_tr; + /* Current trace needs to be nop_trace before synchronize_rcu */ tr->current_trace = &nop_trace; -#ifdef CONFIG_TRACER_MAX_TRACE - had_max_tr = tr->allocated_snapshot; - if (had_max_tr && !t->use_max_tr) { /* * We need to make sure that the update_max_tr sees that @@ -5999,14 +6026,14 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) synchronize_rcu(); free_snapshot(tr); } -#endif -#ifdef CONFIG_TRACER_MAX_TRACE - if (t->use_max_tr && !had_max_tr) { + if (t->use_max_tr && !tr->allocated_snapshot) { ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) goto out; } +#else + tr->current_trace = &nop_trace; #endif if (t->init) { @@ -6237,7 +6264,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl return EPOLLIN | EPOLLRDNORM; else return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file, - filp, poll_table); + filp, poll_table, iter->tr->buffer_percent); } static __poll_t @@ -7025,10 +7052,12 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, goto out; } + local_irq_disable(); arch_spin_lock(&tr->max_lock); if (tr->cond_snapshot) ret = -EBUSY; arch_spin_unlock(&tr->max_lock); + local_irq_enable(); if (ret) goto out; @@ -8650,6 +8679,7 @@ static void __update_tracer_options(struct trace_array *tr) static void update_tracer_options(struct trace_array *tr) { mutex_lock(&trace_types_lock); + tracer_options_updated = true; __update_tracer_options(tr); mutex_unlock(&trace_types_lock); } diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c index 5fa49cfd2bb6..d312a52a10a5 100644 --- a/kernel/trace/trace_dynevent.c +++ b/kernel/trace/trace_dynevent.c @@ -70,6 +70,7 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type) if (ret) break; } + tracing_reset_all_online_cpus(); mutex_unlock(&event_mutex); return ret; @@ -165,6 +166,7 @@ int dyn_events_release_all(struct dyn_event_operations *type) break; } out: + tracing_reset_all_online_cpus(); mutex_unlock(&event_mutex); return ret; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 7cc5f0a77c3c..bac13f24a96e 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -168,6 +168,7 @@ static int trace_define_generic_fields(void) __generic_field(int, CPU, FILTER_CPU); __generic_field(int, cpu, FILTER_CPU); + __generic_field(int, common_cpu, FILTER_CPU); __generic_field(char *, COMM, FILTER_COMM); __generic_field(char *, comm, FILTER_COMM); @@ -2571,7 +2572,10 @@ static int probe_remove_event_call(struct trace_event_call *call) * TRACE_REG_UNREGISTER. */ if (file->flags & EVENT_FILE_FL_ENABLED) - return -EBUSY; + goto busy; + + if (file->flags & EVENT_FILE_FL_WAS_ENABLED) + tr->clear_trace = true; /* * The do_for_each_event_file_safe() is * a double loop. After finding the call for this @@ -2584,6 +2588,12 @@ static int probe_remove_event_call(struct trace_event_call *call) __trace_remove_event_call(call); return 0; + busy: + /* No need to clear the trace now */ + list_for_each_entry(tr, &ftrace_trace_arrays, list) { + tr->clear_trace = false; + } + return -EBUSY; } /* Remove an event_call */ diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index eb7200699cf6..fd5416829445 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1789,8 +1789,11 @@ static int init_var_ref(struct hist_field *ref_field, return err; free: kfree(ref_field->system); + ref_field->system = NULL; kfree(ref_field->event_name); + ref_field->event_name = NULL; kfree(ref_field->name); + ref_field->name = NULL; goto out; } @@ -3940,6 +3943,8 @@ static int parse_var_defs(struct hist_trigger_data *hist_data) s = kstrdup(field_str, GFP_KERNEL); if (!s) { + kfree(hist_data->attrs->var_defs.name[n_vars]); + hist_data->attrs->var_defs.name[n_vars] = NULL; ret = -ENOMEM; goto free; } diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 881df991742a..18291ab35657 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -791,10 +791,9 @@ static int register_synth_event(struct synth_event *event) } ret = set_synth_event_print_fmt(call); - if (ret < 0) { + /* unregister_trace_event() will be called inside */ + if (ret < 0) trace_remove_event_call(call); - goto err; - } out: return ret; err: diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index d0309de2f84f..4bc90965abb2 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -1219,7 +1219,14 @@ static void stacktrace_trigger(struct event_trigger_data *data, void *rec, struct ring_buffer_event *event) { - trace_dump_stack(STACK_SKIP); + struct trace_event_file *file = data->private_data; + unsigned long flags; + + if (file) { + local_save_flags(flags); + __trace_stack(file->tr, flags, STACK_SKIP, preempt_count()); + } else + trace_dump_stack(STACK_SKIP); } static void diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c index c881e5a5efb8..4593f16da973 100644 --- a/kernel/trace/trace_preemptirq.c +++ b/kernel/trace/trace_preemptirq.c @@ -111,15 +111,15 @@ __visible void trace_hardirqs_on_caller(unsigned long caller_addr) this_cpu_write(tracing_irq_cpu, 0); } - lockdep_hardirqs_on_prepare(CALLER_ADDR0); - lockdep_hardirqs_on(CALLER_ADDR0); + lockdep_hardirqs_on_prepare(caller_addr); + lockdep_hardirqs_on(caller_addr); } EXPORT_SYMBOL(trace_hardirqs_on_caller); NOKPROBE_SYMBOL(trace_hardirqs_on_caller); __visible void trace_hardirqs_off_caller(unsigned long caller_addr) { - lockdep_hardirqs_off(CALLER_ADDR0); + lockdep_hardirqs_off(caller_addr); if (!this_cpu_read(tracing_irq_cpu)) { this_cpu_write(tracing_irq_cpu, 1); diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 1d31bc4acf7a..073abbe3866b 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -300,7 +300,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, } } else goto inval_var; - } else if (strcmp(arg, "comm") == 0) { + } else if (strcmp(arg, "comm") == 0 || strcmp(arg, "COMM") == 0) { code->op = FETCH_OP_COMM; #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API } else if (((flags & TPARG_FL_MASK) == @@ -595,7 +595,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, * Since $comm and immediate string can not be dereferred, * we can find those by strcmp. */ - if (strcmp(arg, "$comm") == 0 || strncmp(arg, "\\\"", 2) == 0) { + if (strcmp(arg, "$comm") == 0 || strcmp(arg, "$COMM") == 0 || + strncmp(arg, "\\\"", 2) == 0) { /* The type of $comm must be "string", and not an array. */ if (parg->count || (t && strcmp(t, "string"))) return -EINVAL; diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index 249ed3259144..d29731a30b8e 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -34,6 +34,27 @@ MODULE_LICENSE("GPL"); #define WATCH_QUEUE_NOTE_SIZE 128 #define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE) +/* + * This must be called under the RCU read-lock, which makes + * sure that the wqueue still exists. It can then take the lock, + * and check that the wqueue hasn't been destroyed, which in + * turn makes sure that the notification pipe still exists. + */ +static inline bool lock_wqueue(struct watch_queue *wqueue) +{ + spin_lock_bh(&wqueue->lock); + if (unlikely(wqueue->defunct)) { + spin_unlock_bh(&wqueue->lock); + return false; + } + return true; +} + +static inline void unlock_wqueue(struct watch_queue *wqueue) +{ + spin_unlock_bh(&wqueue->lock); +} + static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { @@ -69,6 +90,10 @@ static const struct pipe_buf_operations watch_queue_pipe_buf_ops = { /* * Post a notification to a watch queue. + * + * Must be called with the RCU lock for reading, and the + * watch_queue lock held, which guarantees that the pipe + * hasn't been released. */ static bool post_one_notification(struct watch_queue *wqueue, struct watch_notification *n) @@ -85,9 +110,6 @@ static bool post_one_notification(struct watch_queue *wqueue, spin_lock_irq(&pipe->rd_wait.lock); - if (wqueue->defunct) - goto out; - mask = pipe->ring_size - 1; head = pipe->head; tail = pipe->tail; @@ -203,7 +225,10 @@ void __post_watch_notification(struct watch_list *wlist, if (security_post_notification(watch->cred, cred, n) < 0) continue; - post_one_notification(wqueue, n); + if (lock_wqueue(wqueue)) { + post_one_notification(wqueue, n); + unlock_wqueue(wqueue); + } } rcu_read_unlock(); @@ -432,6 +457,33 @@ void init_watch(struct watch *watch, struct watch_queue *wqueue) rcu_assign_pointer(watch->queue, wqueue); } +static int add_one_watch(struct watch *watch, struct watch_list *wlist, struct watch_queue *wqueue) +{ + const struct cred *cred; + struct watch *w; + + hlist_for_each_entry(w, &wlist->watchers, list_node) { + struct watch_queue *wq = rcu_access_pointer(w->queue); + if (wqueue == wq && watch->id == w->id) + return -EBUSY; + } + + cred = current_cred(); + if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) { + atomic_dec(&cred->user->nr_watches); + return -EAGAIN; + } + + watch->cred = get_cred(cred); + rcu_assign_pointer(watch->watch_list, wlist); + + kref_get(&wqueue->usage); + kref_get(&watch->usage); + hlist_add_head(&watch->queue_node, &wqueue->watches); + hlist_add_head_rcu(&watch->list_node, &wlist->watchers); + return 0; +} + /** * add_watch_to_object - Add a watch on an object to a watch list * @watch: The watch to add @@ -446,33 +498,21 @@ void init_watch(struct watch *watch, struct watch_queue *wqueue) */ int add_watch_to_object(struct watch *watch, struct watch_list *wlist) { - struct watch_queue *wqueue = rcu_access_pointer(watch->queue); - struct watch *w; + struct watch_queue *wqueue; + int ret = -ENOENT; - hlist_for_each_entry(w, &wlist->watchers, list_node) { - struct watch_queue *wq = rcu_access_pointer(w->queue); - if (wqueue == wq && watch->id == w->id) - return -EBUSY; + rcu_read_lock(); + + wqueue = rcu_access_pointer(watch->queue); + if (lock_wqueue(wqueue)) { + spin_lock(&wlist->lock); + ret = add_one_watch(watch, wlist, wqueue); + spin_unlock(&wlist->lock); + unlock_wqueue(wqueue); } - watch->cred = get_current_cred(); - rcu_assign_pointer(watch->watch_list, wlist); - - if (atomic_inc_return(&watch->cred->user->nr_watches) > - task_rlimit(current, RLIMIT_NOFILE)) { - atomic_dec(&watch->cred->user->nr_watches); - put_cred(watch->cred); - return -EAGAIN; - } - - spin_lock_bh(&wqueue->lock); - kref_get(&wqueue->usage); - kref_get(&watch->usage); - hlist_add_head(&watch->queue_node, &wqueue->watches); - spin_unlock_bh(&wqueue->lock); - - hlist_add_head(&watch->list_node, &wlist->watchers); - return 0; + rcu_read_unlock(); + return ret; } EXPORT_SYMBOL(add_watch_to_object); @@ -523,20 +563,15 @@ found: wqueue = rcu_dereference(watch->queue); - /* We don't need the watch list lock for the next bit as RCU is - * protecting *wqueue from deallocation. - */ - if (wqueue) { + if (lock_wqueue(wqueue)) { post_one_notification(wqueue, &n.watch); - spin_lock_bh(&wqueue->lock); - if (!hlist_unhashed(&watch->queue_node)) { hlist_del_init_rcu(&watch->queue_node); put_watch(watch); } - spin_unlock_bh(&wqueue->lock); + unlock_wqueue(wqueue); } if (wlist->release_watch) { diff --git a/kernel/watchdog.c b/kernel/watchdog.c index af6597523802..a94b71e130cf 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -640,7 +640,7 @@ int lockup_detector_offline_cpu(unsigned int cpu) return 0; } -static void lockup_detector_reconfigure(void) +static void __lockup_detector_reconfigure(void) { cpus_read_lock(); watchdog_nmi_stop(); @@ -660,6 +660,13 @@ static void lockup_detector_reconfigure(void) __lockup_detector_cleanup(); } +void lockup_detector_reconfigure(void) +{ + mutex_lock(&watchdog_mutex); + __lockup_detector_reconfigure(); + mutex_unlock(&watchdog_mutex); +} + /* * Create the watchdog thread infrastructure and configure the detector(s). * @@ -680,13 +687,13 @@ static __init void lockup_detector_setup(void) return; mutex_lock(&watchdog_mutex); - lockup_detector_reconfigure(); + __lockup_detector_reconfigure(); softlockup_initialized = true; mutex_unlock(&watchdog_mutex); } #else /* CONFIG_SOFTLOCKUP_DETECTOR */ -static void lockup_detector_reconfigure(void) +static void __lockup_detector_reconfigure(void) { cpus_read_lock(); watchdog_nmi_stop(); @@ -694,9 +701,13 @@ static void lockup_detector_reconfigure(void) watchdog_nmi_start(); cpus_read_unlock(); } +void lockup_detector_reconfigure(void) +{ + __lockup_detector_reconfigure(); +} static inline void lockup_detector_setup(void) { - lockup_detector_reconfigure(); + __lockup_detector_reconfigure(); } #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */ @@ -736,7 +747,7 @@ static void proc_watchdog_update(void) { /* Remove impossible cpus to keep sysctl output clean. */ cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask); - lockup_detector_reconfigure(); + __lockup_detector_reconfigure(); } /* diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 50ab893aeaf7..cb057e333187 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3074,10 +3074,8 @@ static bool __flush_work(struct work_struct *work, bool from_cancel) if (WARN_ON(!work->func)) return false; - if (!from_cancel) { - lock_map_acquire(&work->lockdep_map); - lock_map_release(&work->lockdep_map); - } + lock_map_acquire(&work->lockdep_map); + lock_map_release(&work->lockdep_map); if (start_flush_work(work, &barr, from_cancel)) { wait_for_completion(&barr.done); diff --git a/lib/Kconfig b/lib/Kconfig index 0abb7975586c..f9660d0332c6 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -101,6 +101,11 @@ config INDIRECT_PIO When in doubt, say N. +source "lib/crypto/Kconfig" + +config LIB_MEMNEQ + bool + config CRC_CCITT tristate "CRC-CCITT functions" help diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f712a262e36e..8608332e0ecc 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -302,8 +302,10 @@ config FRAME_WARN int "Warn for stack frames larger than" range 0 8192 default 2048 if GCC_PLUGIN_LATENT_ENTROPY - default 1280 if (!64BIT && PARISC) - default 1024 if (!64BIT && !PARISC) + default 2048 if PARISC + default 1536 if (!64BIT && XTENSA) + default 1280 if KASAN && !64BIT + default 1024 if !64BIT default 2048 if 64BIT help Tell gcc to warn at build time for stack frames larger than this. @@ -1323,6 +1325,46 @@ config LOCKDEP config LOCKDEP_SMALL bool +config LOCKDEP_BITS + int "Bitsize for MAX_LOCKDEP_ENTRIES" + depends on LOCKDEP && !LOCKDEP_SMALL + range 10 30 + default 15 + help + Try increasing this value if you hit "BUG: MAX_LOCKDEP_ENTRIES too low!" message. + +config LOCKDEP_CHAINS_BITS + int "Bitsize for MAX_LOCKDEP_CHAINS" + depends on LOCKDEP && !LOCKDEP_SMALL + range 10 30 + default 16 + help + Try increasing this value if you hit "BUG: MAX_LOCKDEP_CHAINS too low!" message. + +config LOCKDEP_STACK_TRACE_BITS + int "Bitsize for MAX_STACK_TRACE_ENTRIES" + depends on LOCKDEP && !LOCKDEP_SMALL + range 10 30 + default 19 + help + Try increasing this value if you hit "BUG: MAX_STACK_TRACE_ENTRIES too low!" message. + +config LOCKDEP_STACK_TRACE_HASH_BITS + int "Bitsize for STACK_TRACE_HASH_SIZE" + depends on LOCKDEP && !LOCKDEP_SMALL + range 10 30 + default 14 + help + Try increasing this value if you need large MAX_STACK_TRACE_ENTRIES. + +config LOCKDEP_CIRCULAR_QUEUE_BITS + int "Bitsize for elements in circular_queue struct" + depends on LOCKDEP + range 10 30 + default 12 + help + Try increasing this value if you hit "lockdep bfs error:-1" warning due to __cq_enqueue() failure. + config DEBUG_LOCKDEP bool "Lock dependency engine debugging" depends on DEBUG_KERNEL && LOCKDEP @@ -1442,8 +1484,7 @@ config WARN_ALL_UNSEEDED_RANDOM so architecture maintainers really need to do what they can to get the CRNG seeded sooner after the system is booted. However, since users cannot do anything actionable to - address this, by default the kernel will issue only a single - warning for the first use of unseeded randomness. + address this, by default this option is disabled. Say Y here if you want to receive warnings for all uses of unseeded randomness. This will be of use primarily for @@ -1778,8 +1819,14 @@ config NETDEV_NOTIFIER_ERROR_INJECT If unsure, say N. config FUNCTION_ERROR_INJECTION - def_bool y + bool "Fault-injections of functions" depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES + help + Add fault injections into various functions that are annotated with + ALLOW_ERROR_INJECTION() in the kernel. BPF may also modify the return + value of theses functions. This is useful to test error paths of code. + + If unsure, say N config FAULT_INJECTION bool "Fault-injection framework" diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index a5ba06a86358..4e999807b6c7 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -59,6 +59,7 @@ choice config KASAN_GENERIC bool "Generic mode" depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC + depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS select SLUB_DEBUG if SLUB select CONSTRUCTORS help @@ -79,6 +80,7 @@ config KASAN_GENERIC config KASAN_SW_TAGS bool "Software tag-based mode" depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS + depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS select SLUB_DEBUG if SLUB select CONSTRUCTORS help diff --git a/lib/Makefile b/lib/Makefile index 5550543bfa80..1864a5e89741 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -248,6 +248,7 @@ obj-$(CONFIG_DIMLIB) += dim/ obj-$(CONFIG_SIGNATURE) += digsig.o lib-$(CONFIG_CLZ_TAB) += clz_tab.o +lib-$(CONFIG_LIB_MEMNEQ) += memneq.o obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o @@ -274,7 +275,7 @@ $(foreach file, $(libfdt_files), \ $(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt)) lib-$(CONFIG_LIBFDT) += $(libfdt_files) -lib-$(CONFIG_BOOT_CONFIG) += bootconfig.o +obj-$(CONFIG_BOOT_CONFIG) += bootconfig.o obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 6f4bcf524554..b537a83678e1 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c @@ -1462,6 +1462,7 @@ int assoc_array_gc(struct assoc_array *array, struct assoc_array_ptr *cursor, *ptr; struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp; unsigned long nr_leaves_on_tree; + bool retained; int keylen, slot, nr_free, next_slot, i; pr_devel("-->%s()\n", __func__); @@ -1538,6 +1539,7 @@ continue_node: goto descend; } +retry_compress: pr_devel("-- compress node %p --\n", new_n); /* Count up the number of empty slots in this node and work out the @@ -1555,6 +1557,7 @@ continue_node: pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch); /* See what we can fold in */ + retained = false; next_slot = 0; for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) { struct assoc_array_shortcut *s; @@ -1604,9 +1607,14 @@ continue_node: pr_devel("[%d] retain node %lu/%d [nx %d]\n", slot, child->nr_leaves_on_branch, nr_free + 1, next_slot); + retained = true; } } + if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) { + pr_devel("internal nodes remain despite enough space, retrying\n"); + goto retry_compress; + } pr_devel("after: %lu\n", new_n->nr_leaves_on_branch); nr_leaves_on_tree = new_n->nr_leaves_on_branch; diff --git a/lib/bitmap.c b/lib/bitmap.c index 75006c4036e9..27e08c0e547e 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -3,17 +3,19 @@ * lib/bitmap.c * Helper functions for bitmap.h. */ -#include -#include -#include -#include + #include #include #include +#include +#include +#include +#include #include #include #include #include +#include #include #include @@ -1262,6 +1264,38 @@ void bitmap_free(const unsigned long *bitmap) } EXPORT_SYMBOL(bitmap_free); +static void devm_bitmap_free(void *data) +{ + unsigned long *bitmap = data; + + bitmap_free(bitmap); +} + +unsigned long *devm_bitmap_alloc(struct device *dev, + unsigned int nbits, gfp_t flags) +{ + unsigned long *bitmap; + int ret; + + bitmap = bitmap_alloc(nbits, flags); + if (!bitmap) + return NULL; + + ret = devm_add_action_or_reset(dev, devm_bitmap_free, bitmap); + if (ret) + return NULL; + + return bitmap; +} +EXPORT_SYMBOL_GPL(devm_bitmap_alloc); + +unsigned long *devm_bitmap_zalloc(struct device *dev, + unsigned int nbits, gfp_t flags) +{ + return devm_bitmap_alloc(dev, nbits, flags | __GFP_ZERO); +} +EXPORT_SYMBOL_GPL(devm_bitmap_zalloc); + #if BITS_PER_LONG == 64 /** * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 14c032de276e..0717a0dcefed 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -comment "Crypto library routines" +menu "Crypto library routines" config CRYPTO_LIB_AES tristate @@ -9,14 +9,14 @@ config CRYPTO_LIB_ARC4 tristate config CRYPTO_ARCH_HAVE_LIB_BLAKE2S - tristate + bool help Declares whether the architecture provides an arch-specific accelerated implementation of the Blake2s library interface, either builtin or as a module. config CRYPTO_LIB_BLAKE2S_GENERIC - tristate + def_bool !CRYPTO_ARCH_HAVE_LIB_BLAKE2S help This symbol can be depended upon by arch implementations of the Blake2s library interface that require the generic code as a @@ -24,15 +24,6 @@ config CRYPTO_LIB_BLAKE2S_GENERIC implementation is enabled, this implementation serves the users of CRYPTO_LIB_BLAKE2S. -config CRYPTO_LIB_BLAKE2S - tristate "BLAKE2s hash function library" - depends on CRYPTO_ARCH_HAVE_LIB_BLAKE2S || !CRYPTO_ARCH_HAVE_LIB_BLAKE2S - select CRYPTO_LIB_BLAKE2S_GENERIC if CRYPTO_ARCH_HAVE_LIB_BLAKE2S=n - help - Enable the Blake2s library interface. This interface may be fulfilled - by either the generic implementation or an arch-specific one, if one - is available and enabled. - config CRYPTO_ARCH_HAVE_LIB_CHACHA tristate help @@ -42,7 +33,6 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA config CRYPTO_LIB_CHACHA_GENERIC tristate - select CRYPTO_ALGAPI help This symbol can be depended upon by arch implementations of the ChaCha library interface that require the generic code as a @@ -52,6 +42,7 @@ config CRYPTO_LIB_CHACHA_GENERIC config CRYPTO_LIB_CHACHA tristate "ChaCha library interface" + depends on CRYPTO depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n help @@ -79,6 +70,7 @@ config CRYPTO_LIB_CURVE25519 tristate "Curve25519 scalar multiplication library" depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519 select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n + select LIB_MEMNEQ help Enable the Curve25519 library interface. This interface may be fulfilled by either the generic implementation or an arch-specific @@ -123,8 +115,12 @@ config CRYPTO_LIB_CHACHA20POLY1305 tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)" depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305 + depends on CRYPTO select CRYPTO_LIB_CHACHA select CRYPTO_LIB_POLY1305 + select CRYPTO_ALGAPI config CRYPTO_LIB_SHA256 tristate + +endmenu diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 3a435629d9ce..26be2bbe09c5 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -10,11 +10,10 @@ libaes-y := aes.o obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o libarc4-y := arc4.o -obj-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += libblake2s-generic.o -libblake2s-generic-y += blake2s-generic.o - -obj-$(CONFIG_CRYPTO_LIB_BLAKE2S) += libblake2s.o -libblake2s-y += blake2s.o +# blake2s is used by the /dev/random driver which is always builtin +obj-y += libblake2s.o +libblake2s-y := blake2s.o +libblake2s-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += blake2s-generic.o obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o libchacha20poly1305-y += chacha20poly1305.o diff --git a/lib/crypto/blake2s-generic.c b/lib/crypto/blake2s-generic.c index 04ff8df24513..75ccb3e633e6 100644 --- a/lib/crypto/blake2s-generic.c +++ b/lib/crypto/blake2s-generic.c @@ -37,7 +37,11 @@ static inline void blake2s_increment_counter(struct blake2s_state *state, state->t[1] += (state->t[0] < inc); } -void blake2s_compress_generic(struct blake2s_state *state,const u8 *block, +void blake2s_compress(struct blake2s_state *state, const u8 *block, + size_t nblocks, const u32 inc) + __weak __alias(blake2s_compress_generic); + +void blake2s_compress_generic(struct blake2s_state *state, const u8 *block, size_t nblocks, const u32 inc) { u32 m[16]; diff --git a/lib/crypto/blake2s-selftest.c b/lib/crypto/blake2s-selftest.c index 5d9ea53be973..409e4b728770 100644 --- a/lib/crypto/blake2s-selftest.c +++ b/lib/crypto/blake2s-selftest.c @@ -15,7 +15,6 @@ * #include * * #include - * #include * * #define BLAKE2S_TESTVEC_COUNT 256 * @@ -58,16 +57,6 @@ * } * printf("};\n\n"); * - * printf("static const u8 blake2s_hmac_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {\n"); - * - * HMAC(EVP_blake2s256(), key, sizeof(key), buf, sizeof(buf), hash, NULL); - * print_vec(hash, BLAKE2S_OUTBYTES); - * - * HMAC(EVP_blake2s256(), buf, sizeof(buf), key, sizeof(key), hash, NULL); - * print_vec(hash, BLAKE2S_OUTBYTES); - * - * printf("};\n"); - * * return 0; *} */ @@ -554,15 +543,6 @@ static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = { 0xd6, 0x98, 0x6b, 0x07, 0x10, 0x65, 0x52, 0x65, }, }; -static const u8 blake2s_hmac_testvecs[][BLAKE2S_HASH_SIZE] __initconst = { - { 0xce, 0xe1, 0x57, 0x69, 0x82, 0xdc, 0xbf, 0x43, 0xad, 0x56, 0x4c, 0x70, - 0xed, 0x68, 0x16, 0x96, 0xcf, 0xa4, 0x73, 0xe8, 0xe8, 0xfc, 0x32, 0x79, - 0x08, 0x0a, 0x75, 0x82, 0xda, 0x3f, 0x05, 0x11, }, - { 0x77, 0x2f, 0x0c, 0x71, 0x41, 0xf4, 0x4b, 0x2b, 0xb3, 0xc6, 0xb6, 0xf9, - 0x60, 0xde, 0xe4, 0x52, 0x38, 0x66, 0xe8, 0xbf, 0x9b, 0x96, 0xc4, 0x9f, - 0x60, 0xd9, 0x24, 0x37, 0x99, 0xd6, 0xec, 0x31, }, -}; - bool __init blake2s_selftest(void) { u8 key[BLAKE2S_KEY_SIZE]; @@ -607,16 +587,5 @@ bool __init blake2s_selftest(void) } } - if (success) { - blake2s256_hmac(hash, buf, key, sizeof(buf), sizeof(key)); - success &= !memcmp(hash, blake2s_hmac_testvecs[0], BLAKE2S_HASH_SIZE); - - blake2s256_hmac(hash, key, buf, sizeof(key), sizeof(buf)); - success &= !memcmp(hash, blake2s_hmac_testvecs[1], BLAKE2S_HASH_SIZE); - - if (!success) - pr_err("blake2s256_hmac self-test: FAIL\n"); - } - return success; } diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index c64ac8bfb6a9..80b194f9a0a0 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -16,63 +16,20 @@ #include #include -#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S) -# define blake2s_compress blake2s_compress_arch -#else -# define blake2s_compress blake2s_compress_generic -#endif - void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen) { - __blake2s_update(state, in, inlen, blake2s_compress); + __blake2s_update(state, in, inlen, false); } EXPORT_SYMBOL(blake2s_update); void blake2s_final(struct blake2s_state *state, u8 *out) { WARN_ON(IS_ENABLED(DEBUG) && !out); - __blake2s_final(state, out, blake2s_compress); + __blake2s_final(state, out, false); memzero_explicit(state, sizeof(*state)); } EXPORT_SYMBOL(blake2s_final); -void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, - const size_t keylen) -{ - struct blake2s_state state; - u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 }; - u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32)); - int i; - - if (keylen > BLAKE2S_BLOCK_SIZE) { - blake2s_init(&state, BLAKE2S_HASH_SIZE); - blake2s_update(&state, key, keylen); - blake2s_final(&state, x_key); - } else - memcpy(x_key, key, keylen); - - for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i) - x_key[i] ^= 0x36; - - blake2s_init(&state, BLAKE2S_HASH_SIZE); - blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE); - blake2s_update(&state, in, inlen); - blake2s_final(&state, i_hash); - - for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i) - x_key[i] ^= 0x5c ^ 0x36; - - blake2s_init(&state, BLAKE2S_HASH_SIZE); - blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE); - blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE); - blake2s_final(&state, i_hash); - - memcpy(out, i_hash, BLAKE2S_HASH_SIZE); - memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE); - memzero_explicit(i_hash, BLAKE2S_HASH_SIZE); -} -EXPORT_SYMBOL(blake2s256_hmac); - static int __init mod_init(void) { if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c index a4db51c21266..dae3b51ac3d9 100644 --- a/lib/dim/net_dim.c +++ b/lib/dim/net_dim.c @@ -12,41 +12,41 @@ * Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES */ #define NET_DIM_PARAMS_NUM_PROFILES 5 -#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256 -#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128 +#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256 +#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128 #define NET_DIM_DEF_PROFILE_CQE 1 #define NET_DIM_DEF_PROFILE_EQE 1 #define NET_DIM_RX_EQE_PROFILES { \ - {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {.usec = 1, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \ + {.usec = 8, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \ + {.usec = 64, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \ + {.usec = 128, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \ + {.usec = 256, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,} \ } #define NET_DIM_RX_CQE_PROFILES { \ - {2, 256}, \ - {8, 128}, \ - {16, 64}, \ - {32, 64}, \ - {64, 64} \ + {.usec = 2, .pkts = 256,}, \ + {.usec = 8, .pkts = 128,}, \ + {.usec = 16, .pkts = 64,}, \ + {.usec = 32, .pkts = 64,}, \ + {.usec = 64, .pkts = 64,} \ } #define NET_DIM_TX_EQE_PROFILES { \ - {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \ + {.usec = 1, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \ + {.usec = 8, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \ + {.usec = 32, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \ + {.usec = 64, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \ + {.usec = 128, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,} \ } #define NET_DIM_TX_CQE_PROFILES { \ - {5, 128}, \ - {8, 64}, \ - {16, 32}, \ - {32, 32}, \ - {64, 32} \ + {.usec = 5, .pkts = 128,}, \ + {.usec = 8, .pkts = 64,}, \ + {.usec = 16, .pkts = 32,}, \ + {.usec = 32, .pkts = 32,}, \ + {.usec = 64, .pkts = 32,} \ } static const struct dim_cq_moder diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 921d0a654243..10a50c03074e 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -207,10 +207,11 @@ static int ddebug_change(const struct ddebug_query *query, continue; #ifdef CONFIG_JUMP_LABEL if (dp->flags & _DPRINTK_FLAGS_PRINT) { - if (!(modifiers->flags & _DPRINTK_FLAGS_PRINT)) + if (!(newflags & _DPRINTK_FLAGS_PRINT)) static_branch_disable(&dp->key.dd_key_true); - } else if (modifiers->flags & _DPRINTK_FLAGS_PRINT) + } else if (newflags & _DPRINTK_FLAGS_PRINT) { static_branch_enable(&dp->key.dd_key_true); + } #endif dp->flags = newflags; v2pr_info("changed %s:%d [%s]%s =%s\n", @@ -379,10 +380,6 @@ static int ddebug_parse_query(char *words[], int nwords, return -EINVAL; } - if (modname) - /* support $modname.dyndbg= */ - query->module = modname; - for (i = 0; i < nwords; i += 2) { char *keyword = words[i]; char *arg = words[i+1]; @@ -423,6 +420,13 @@ static int ddebug_parse_query(char *words[], int nwords, if (rc) return rc; } + if (!query->module && modname) + /* + * support $modname.dyndbg=, when + * not given in the query itself + */ + query->module = modname; + vpr_info_dq(query, "parsed"); return 0; } @@ -548,35 +552,6 @@ static int ddebug_exec_queries(char *query, const char *modname) return nfound; } -/** - * dynamic_debug_exec_queries - select and change dynamic-debug prints - * @query: query-string described in admin-guide/dynamic-debug-howto - * @modname: string containing module name, usually &module.mod_name - * - * This uses the >/proc/dynamic_debug/control reader, allowing module - * authors to modify their dynamic-debug callsites. The modname is - * canonically struct module.mod_name, but can also be null or a - * module-wildcard, for example: "drm*". - */ -int dynamic_debug_exec_queries(const char *query, const char *modname) -{ - int rc; - char *qry; /* writable copy of query */ - - if (!query) { - pr_err("non-null query/command string expected\n"); - return -EINVAL; - } - qry = kstrndup(query, PAGE_SIZE, GFP_KERNEL); - if (!qry) - return -ENOMEM; - - rc = ddebug_exec_queries(qry, modname); - kfree(qry); - return rc; -} -EXPORT_SYMBOL_GPL(dynamic_debug_exec_queries); - #define PREFIX_SIZE 64 static int remaining(int wrote) diff --git a/lib/hexdump.c b/lib/hexdump.c index 9301578f98e8..07cda1a13e09 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -22,15 +22,41 @@ EXPORT_SYMBOL(hex_asc_upper); * * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad * input. + * + * This function is used to load cryptographic keys, so it is coded in such a + * way that there are no conditions or memory accesses that depend on data. + * + * Explanation of the logic: + * (ch - '9' - 1) is negative if ch <= '9' + * ('0' - 1 - ch) is negative if ch >= '0' + * we "and" these two values, so the result is negative if ch is in the range + * '0' ... '9' + * we are only interested in the sign, so we do a shift ">> 8"; note that right + * shift of a negative value is implementation-defined, so we cast the + * value to (unsigned) before the shift --- we have 0xffffff if ch is in + * the range '0' ... '9', 0 otherwise + * we "and" this value with (ch - '0' + 1) --- we have a value 1 ... 10 if ch is + * in the range '0' ... '9', 0 otherwise + * we add this value to -1 --- we have a value 0 ... 9 if ch is in the range '0' + * ... '9', -1 otherwise + * the next line is similar to the previous one, but we need to decode both + * uppercase and lowercase letters, so we use (ch & 0xdf), which converts + * lowercase to uppercase */ +/* + * perserve abi due to 15b78a8e38e8 ("hex2bin: make the function hex_to_bin + * constant-time" + */ +#ifdef __GENKSYMS__ int hex_to_bin(char ch) +#else +int hex_to_bin(unsigned char ch) +#endif { - if ((ch >= '0') && (ch <= '9')) - return ch - '0'; - ch = tolower(ch); - if ((ch >= 'a') && (ch <= 'f')) - return ch - 'a' + 10; - return -1; + unsigned char cu = ch & 0xdf; + return -1 + + ((ch - '0' + 1) & (unsigned)((ch - '9' - 1) & ('0' - 1 - ch)) >> 8) + + ((cu - 'A' + 11) & (unsigned)((cu - 'F' - 1) & ('A' - 1 - cu)) >> 8); } EXPORT_SYMBOL(hex_to_bin); @@ -45,10 +71,13 @@ EXPORT_SYMBOL(hex_to_bin); int hex2bin(u8 *dst, const char *src, size_t count) { while (count--) { - int hi = hex_to_bin(*src++); - int lo = hex_to_bin(*src++); + int hi, lo; - if ((hi < 0) || (lo < 0)) + hi = hex_to_bin(*src++); + if (unlikely(hi < 0)) + return -EINVAL; + lo = hex_to_bin(*src++); + if (unlikely(lo < 0)) return -EINVAL; *dst++ = (hi << 4) | lo; diff --git a/lib/idr.c b/lib/idr.c index f4ab4f4aa3c7..7ecdfdb5309e 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -491,7 +491,8 @@ void ida_free(struct ida *ida, unsigned int id) struct ida_bitmap *bitmap; unsigned long flags; - BUG_ON((int)id < 0); + if ((int)id < 0) + return; xas_lock_irqsave(&xas, flags); bitmap = xas_load(&xas); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 1b0a349fbcd9..650554964f18 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1836,24 +1836,38 @@ int import_single_range(int rw, void __user *buf, size_t len, } EXPORT_SYMBOL(import_single_range); -int iov_iter_for_each_range(struct iov_iter *i, size_t bytes, - int (*f)(struct kvec *vec, void *context), - void *context) +/** + * iov_iter_restore() - Restore a &struct iov_iter to the same state as when + * iov_iter_save_state() was called. + * + * @i: &struct iov_iter to restore + * @state: state to restore from + * + * Used after iov_iter_save_state() to bring restore @i, if operations may + * have advanced it. + * + * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC + */ +void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) { - struct kvec w; - int err = -EINVAL; - if (!bytes) - return 0; - - iterate_all_kinds(i, bytes, v, -EINVAL, ({ - w.iov_base = kmap(v.bv_page) + v.bv_offset; - w.iov_len = v.bv_len; - err = f(&w, context); - kunmap(v.bv_page); - err;}), ({ - w = v; - err = f(&w, context);}) - ) - return err; + if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && + !iov_iter_is_kvec(i)) + return; + i->iov_offset = state->iov_offset; + i->count = state->count; + /* + * For the *vec iters, nr_segs + iov is constant - if we increment + * the vec, then we also decrement the nr_segs count. Hence we don't + * need to track both of these, just one is enough and we can deduct + * the other from that. ITER_KVEC and ITER_IOVEC are the same struct + * size, so we can just increment the iov pointer as they are unionzed. + * ITER_BVEC _may_ be the same size on some archs, but on others it is + * not. Be safe and handle it separately. + */ + BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); + if (iov_iter_is_bvec(i)) + i->bvec -= state->nr_segs - i->nr_segs; + else + i->iov -= state->nr_segs - i->nr_segs; + i->nr_segs = state->nr_segs; } -EXPORT_SYMBOL(iov_iter_for_each_range); diff --git a/lib/list_debug.c b/lib/list_debug.c index 5d5424b51b74..413daa72a3d8 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c @@ -20,7 +20,11 @@ bool __list_add_valid(struct list_head *new, struct list_head *prev, struct list_head *next) { - if (CHECK_DATA_CORRUPTION(next->prev != prev, + if (CHECK_DATA_CORRUPTION(prev == NULL, + "list_add corruption. prev is NULL.\n") || + CHECK_DATA_CORRUPTION(next == NULL, + "list_add corruption. next is NULL.\n") || + CHECK_DATA_CORRUPTION(next->prev != prev, "list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n", prev, next->prev, next) || CHECK_DATA_CORRUPTION(prev->next != next, @@ -42,7 +46,11 @@ bool __list_del_entry_valid(struct list_head *entry) prev = entry->prev; next = entry->next; - if (CHECK_DATA_CORRUPTION(next == LIST_POISON1, + if (CHECK_DATA_CORRUPTION(next == NULL, + "list_del corruption, %px->next is NULL\n", entry) || + CHECK_DATA_CORRUPTION(prev == NULL, + "list_del corruption, %px->prev is NULL\n", entry) || + CHECK_DATA_CORRUPTION(next == LIST_POISON1, "list_del corruption, %px->next is LIST_POISON1 (%px)\n", entry, LIST_POISON1) || CHECK_DATA_CORRUPTION(prev == LIST_POISON2, diff --git a/lib/livepatch/test_klp_callbacks_busy.c b/lib/livepatch/test_klp_callbacks_busy.c index 7ac845f65be5..133929e0ce8f 100644 --- a/lib/livepatch/test_klp_callbacks_busy.c +++ b/lib/livepatch/test_klp_callbacks_busy.c @@ -16,10 +16,12 @@ MODULE_PARM_DESC(block_transition, "block_transition (default=false)"); static void busymod_work_func(struct work_struct *work); static DECLARE_WORK(work, busymod_work_func); +static DECLARE_COMPLETION(busymod_work_started); static void busymod_work_func(struct work_struct *work) { pr_info("%s enter\n", __func__); + complete(&busymod_work_started); while (READ_ONCE(block_transition)) { /* @@ -37,6 +39,12 @@ static int test_klp_callbacks_busy_init(void) pr_info("%s\n", __func__); schedule_work(&work); + /* + * To synchronize kernel messages, hold the init function from + * exiting until the work function's entry message has printed. + */ + wait_for_completion(&busymod_work_started); + if (!block_transition) { /* * Serialize output: print all messages from the work diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index 8a7724a6ce2f..5b6705c4b2d2 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c @@ -271,8 +271,12 @@ static FORCE_INLINE int LZ4_decompress_generic( ip += length; op += length; - /* Necessarily EOF, due to parsing restrictions */ - if (!partialDecoding || (cpy == oend)) + /* Necessarily EOF when !partialDecoding. + * When partialDecoding, it is EOF if we've either + * filled the output buffer or + * can't proceed with reading an offset for following match. + */ + if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2))) break; } else { /* may overwrite up to WILDCOPYLENGTH beyond cpy */ diff --git a/crypto/memneq.c b/lib/memneq.c similarity index 100% rename from crypto/memneq.c rename to lib/memneq.c diff --git a/lib/nodemask.c b/lib/nodemask.c index 3aa454c54c0d..e22647f5181b 100644 --- a/lib/nodemask.c +++ b/lib/nodemask.c @@ -3,9 +3,9 @@ #include #include -int __next_node_in(int node, const nodemask_t *srcp) +unsigned int __next_node_in(int node, const nodemask_t *srcp) { - int ret = __next_node(node, srcp); + unsigned int ret = __next_node(node, srcp); if (ret == MAX_NUMNODES) ret = __first_node(srcp); diff --git a/lib/once.c b/lib/once.c index 8b7d6235217e..1b71cd72eab4 100644 --- a/lib/once.c +++ b/lib/once.c @@ -61,3 +61,33 @@ void __do_once_done(bool *done, struct static_key_true *once_key, once_disable_jump(once_key); } EXPORT_SYMBOL(__do_once_done); + +static DEFINE_MUTEX(once_mutex); + +bool __do_once_slow_start(bool *done) + __acquires(once_mutex) +{ + mutex_lock(&once_mutex); + if (*done) { + mutex_unlock(&once_mutex); + /* Keep sparse happy by restoring an even lock count on + * this mutex. In case we return here, we don't call into + * __do_once_done but return early in the DO_ONCE_SLOW() macro. + */ + __acquire(once_mutex); + return false; + } + + return true; +} +EXPORT_SYMBOL(__do_once_slow_start); + +void __do_once_slow_done(bool *done, struct static_key_true *once_key, + struct module *mod) + __releases(once_mutex) +{ + *done = true; + mutex_unlock(&once_mutex); + once_disable_jump(once_key); +} +EXPORT_SYMBOL(__do_once_slow_done); diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index e59eda07305e..493093b97093 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -75,6 +75,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, data = kzalloc(sizeof(*ref->data), gfp); if (!data) { free_percpu((void __percpu *)ref->percpu_count_ptr); + ref->percpu_count_ptr = 0; return -ENOMEM; } diff --git a/lib/random32.c b/lib/random32.c index 4d0e05e471d7..f0ab17c2244b 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -39,8 +39,9 @@ #include #include #include +#include +#include #include -#include /** * prandom_u32_state - seeded pseudo-random number generator. @@ -386,7 +387,6 @@ u32 prandom_u32(void) struct siprand_state *state = get_cpu_ptr(&net_rand_state); u32 res = siprand_u32(state); - trace_prandom_u32(res); put_cpu_ptr(&net_rand_state); return res; } @@ -552,9 +552,11 @@ static void prandom_reseed(struct timer_list *unused) * To avoid worrying about whether it's safe to delay that interrupt * long enough to seed all CPUs, just schedule an immediate timer event. */ -static void prandom_timer_start(struct random_ready_callback *unused) +static int prandom_timer_start(struct notifier_block *nb, + unsigned long action, void *data) { mod_timer(&seed_timer, jiffies); + return 0; } #ifdef CONFIG_RANDOM32_SELFTEST @@ -618,13 +620,13 @@ core_initcall(prandom32_state_selftest); */ static int __init prandom_init_late(void) { - static struct random_ready_callback random_ready = { - .func = prandom_timer_start + static struct notifier_block random_ready = { + .notifier_call = prandom_timer_start }; - int ret = add_random_ready_callback(&random_ready); + int ret = register_random_ready_notifier(&random_ready); if (ret == -EALREADY) { - prandom_timer_start(&random_ready); + prandom_timer_start(&random_ready, 0, NULL); ret = 0; } return ret; diff --git a/lib/ratelimit.c b/lib/ratelimit.c index e01a93f46f83..ce945c17980b 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c @@ -26,10 +26,16 @@ */ int ___ratelimit(struct ratelimit_state *rs, const char *func) { + /* Paired with WRITE_ONCE() in .proc_handler(). + * Changing two values seperately could be inconsistent + * and some message could be lost. (See: net_ratelimit_state). + */ + int interval = READ_ONCE(rs->interval); + int burst = READ_ONCE(rs->burst); unsigned long flags; int ret; - if (!rs->interval) + if (!interval) return 1; /* @@ -44,7 +50,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) if (!rs->begin) rs->begin = jiffies; - if (time_is_before_jiffies(rs->begin + rs->interval)) { + if (time_is_before_jiffies(rs->begin + interval)) { if (rs->missed) { if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) { printk_deferred(KERN_WARNING @@ -56,7 +62,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) rs->begin = jiffies; rs->printed = 0; } - if (rs->burst && rs->burst > rs->printed) { + if (burst && burst > rs->printed) { rs->printed++; ret = 1; } else { diff --git a/lib/sha1.c b/lib/sha1.c index 49257a915bb6..5ad4e4948272 100644 --- a/lib/sha1.c +++ b/lib/sha1.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -55,7 +56,8 @@ #define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \ __u32 TEMP = input(t); setW(t, TEMP); \ E += TEMP + rol32(A,5) + (fn) + (constant); \ - B = ror32(B, 2); } while (0) + B = ror32(B, 2); \ + TEMP = E; E = D; D = C; C = B; B = A; A = TEMP; } while (0) #define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) #define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) @@ -84,6 +86,7 @@ void sha1_transform(__u32 *digest, const char *data, __u32 *array) { __u32 A, B, C, D, E; + unsigned int i = 0; A = digest[0]; B = digest[1]; @@ -92,94 +95,24 @@ void sha1_transform(__u32 *digest, const char *data, __u32 *array) E = digest[4]; /* Round 1 - iterations 0-16 take their input from 'data' */ - T_0_15( 0, A, B, C, D, E); - T_0_15( 1, E, A, B, C, D); - T_0_15( 2, D, E, A, B, C); - T_0_15( 3, C, D, E, A, B); - T_0_15( 4, B, C, D, E, A); - T_0_15( 5, A, B, C, D, E); - T_0_15( 6, E, A, B, C, D); - T_0_15( 7, D, E, A, B, C); - T_0_15( 8, C, D, E, A, B); - T_0_15( 9, B, C, D, E, A); - T_0_15(10, A, B, C, D, E); - T_0_15(11, E, A, B, C, D); - T_0_15(12, D, E, A, B, C); - T_0_15(13, C, D, E, A, B); - T_0_15(14, B, C, D, E, A); - T_0_15(15, A, B, C, D, E); + for (; i < 16; ++i) + T_0_15(i, A, B, C, D, E); /* Round 1 - tail. Input from 512-bit mixing array */ - T_16_19(16, E, A, B, C, D); - T_16_19(17, D, E, A, B, C); - T_16_19(18, C, D, E, A, B); - T_16_19(19, B, C, D, E, A); + for (; i < 20; ++i) + T_16_19(i, A, B, C, D, E); /* Round 2 */ - T_20_39(20, A, B, C, D, E); - T_20_39(21, E, A, B, C, D); - T_20_39(22, D, E, A, B, C); - T_20_39(23, C, D, E, A, B); - T_20_39(24, B, C, D, E, A); - T_20_39(25, A, B, C, D, E); - T_20_39(26, E, A, B, C, D); - T_20_39(27, D, E, A, B, C); - T_20_39(28, C, D, E, A, B); - T_20_39(29, B, C, D, E, A); - T_20_39(30, A, B, C, D, E); - T_20_39(31, E, A, B, C, D); - T_20_39(32, D, E, A, B, C); - T_20_39(33, C, D, E, A, B); - T_20_39(34, B, C, D, E, A); - T_20_39(35, A, B, C, D, E); - T_20_39(36, E, A, B, C, D); - T_20_39(37, D, E, A, B, C); - T_20_39(38, C, D, E, A, B); - T_20_39(39, B, C, D, E, A); + for (; i < 40; ++i) + T_20_39(i, A, B, C, D, E); /* Round 3 */ - T_40_59(40, A, B, C, D, E); - T_40_59(41, E, A, B, C, D); - T_40_59(42, D, E, A, B, C); - T_40_59(43, C, D, E, A, B); - T_40_59(44, B, C, D, E, A); - T_40_59(45, A, B, C, D, E); - T_40_59(46, E, A, B, C, D); - T_40_59(47, D, E, A, B, C); - T_40_59(48, C, D, E, A, B); - T_40_59(49, B, C, D, E, A); - T_40_59(50, A, B, C, D, E); - T_40_59(51, E, A, B, C, D); - T_40_59(52, D, E, A, B, C); - T_40_59(53, C, D, E, A, B); - T_40_59(54, B, C, D, E, A); - T_40_59(55, A, B, C, D, E); - T_40_59(56, E, A, B, C, D); - T_40_59(57, D, E, A, B, C); - T_40_59(58, C, D, E, A, B); - T_40_59(59, B, C, D, E, A); + for (; i < 60; ++i) + T_40_59(i, A, B, C, D, E); /* Round 4 */ - T_60_79(60, A, B, C, D, E); - T_60_79(61, E, A, B, C, D); - T_60_79(62, D, E, A, B, C); - T_60_79(63, C, D, E, A, B); - T_60_79(64, B, C, D, E, A); - T_60_79(65, A, B, C, D, E); - T_60_79(66, E, A, B, C, D); - T_60_79(67, D, E, A, B, C); - T_60_79(68, C, D, E, A, B); - T_60_79(69, B, C, D, E, A); - T_60_79(70, A, B, C, D, E); - T_60_79(71, E, A, B, C, D); - T_60_79(72, D, E, A, B, C); - T_60_79(73, C, D, E, A, B); - T_60_79(74, B, C, D, E, A); - T_60_79(75, A, B, C, D, E); - T_60_79(76, E, A, B, C, D); - T_60_79(77, D, E, A, B, C); - T_60_79(78, C, D, E, A, B); - T_60_79(79, B, C, D, E, A); + for (; i < 80; ++i) + T_60_79(i, A, B, C, D, E); digest[0] += A; digest[1] += B; diff --git a/lib/siphash.c b/lib/siphash.c index 025f0cbf6d7a..b4055b1cc2f6 100644 --- a/lib/siphash.c +++ b/lib/siphash.c @@ -18,19 +18,13 @@ #include #endif -#define SIPROUND \ - do { \ - v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \ - v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \ - v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \ - v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \ - } while (0) +#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3) #define PREAMBLE(len) \ - u64 v0 = 0x736f6d6570736575ULL; \ - u64 v1 = 0x646f72616e646f6dULL; \ - u64 v2 = 0x6c7967656e657261ULL; \ - u64 v3 = 0x7465646279746573ULL; \ + u64 v0 = SIPHASH_CONST_0; \ + u64 v1 = SIPHASH_CONST_1; \ + u64 v2 = SIPHASH_CONST_2; \ + u64 v3 = SIPHASH_CONST_3; \ u64 b = ((u64)(len)) << 56; \ v3 ^= key->key[1]; \ v2 ^= key->key[0]; \ @@ -389,19 +383,13 @@ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, } EXPORT_SYMBOL(hsiphash_4u32); #else -#define HSIPROUND \ - do { \ - v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \ - v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \ - v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \ - v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \ - } while (0) +#define HSIPROUND HSIPHASH_PERMUTATION(v0, v1, v2, v3) #define HPREAMBLE(len) \ - u32 v0 = 0; \ - u32 v1 = 0; \ - u32 v2 = 0x6c796765U; \ - u32 v3 = 0x74656462U; \ + u32 v0 = HSIPHASH_CONST_0; \ + u32 v1 = HSIPHASH_CONST_1; \ + u32 v2 = HSIPHASH_CONST_2; \ + u32 v3 = HSIPHASH_CONST_3; \ u32 b = ((u32)(len)) << 24; \ v3 ^= key->key[1]; \ v2 ^= key->key[0]; \ diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 525222e4f409..2916606a9333 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -46,9 +46,9 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2) printk("caller is %pS\n", __builtin_return_address(0)); dump_stack(); - instrumentation_end(); out_enable: + instrumentation_end(); preempt_enable_no_resched_notrace(); out: return this_cpu; diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 4a9137c8551a..8761b9797073 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -6918,9 +6918,9 @@ static struct skb_segment_test skb_segment_tests[] __initconst = { .build_skb = build_test_skb_linear_no_head_frag, .features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_GSO | - NETIF_F_LLTX_BIT | NETIF_F_GRO | + NETIF_F_LLTX | NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_STAG_TX_BIT + NETIF_F_HW_VLAN_STAG_TX } }; diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c index 7e7bbd0f3fd2..5d98d98e2866 100644 --- a/lib/test_ubsan.c +++ b/lib/test_ubsan.c @@ -98,23 +98,11 @@ static void test_ubsan_misaligned_access(void) *ptr = val; } -static void test_ubsan_object_size_mismatch(void) -{ - /* "((aligned(8)))" helps this not into be misaligned for ptr-access. */ - volatile int val __aligned(8) = 4; - volatile long long *ptr, val2; - - UBSAN_TEST(CONFIG_UBSAN_OBJECT_SIZE); - ptr = (long long *)&val; - val2 = *ptr; -} - static const test_ubsan_fp test_ubsan_array[] = { test_ubsan_shift_out_of_bounds, test_ubsan_out_of_bounds, test_ubsan_load_invalid_value, test_ubsan_misaligned_access, - test_ubsan_object_size_mismatch, }; /* Excluded because they Oops the module. */ diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile index c415a685d61b..e814061d6aa0 100644 --- a/lib/vdso/Makefile +++ b/lib/vdso/Makefile @@ -17,6 +17,6 @@ $(error ARCH_REL_TYPE_ABS is not set) endif quiet_cmd_vdso_check = VDSOCHK $@ - cmd_vdso_check = if $(OBJDUMP) -R $@ | egrep -h "$(ARCH_REL_TYPE_ABS)"; \ + cmd_vdso_check = if $(OBJDUMP) -R $@ | grep -E -h "$(ARCH_REL_TYPE_ABS)"; \ then (echo >&2 "$@: dynamic relocations are not supported"; \ rm -f $@; /bin/false); fi diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c index 2919f1698140..c6f6dee08746 100644 --- a/lib/vdso/gettimeofday.c +++ b/lib/vdso/gettimeofday.c @@ -46,8 +46,8 @@ static inline bool vdso_cycles_ok(u64 cycles) #endif #ifdef CONFIG_TIME_NS -static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, - struct __kernel_timespec *ts) +static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, + struct __kernel_timespec *ts) { const struct vdso_data *vd = __arch_get_timens_vdso_data(); const struct timens_offset *offs = &vdns->offset[clk]; @@ -97,8 +97,8 @@ static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void) return NULL; } -static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, - struct __kernel_timespec *ts) +static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, + struct __kernel_timespec *ts) { return -EINVAL; } @@ -159,8 +159,8 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk, } #ifdef CONFIG_TIME_NS -static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, - struct __kernel_timespec *ts) +static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, + struct __kernel_timespec *ts) { const struct vdso_data *vd = __arch_get_timens_vdso_data(); const struct vdso_timestamp *vdso_ts = &vd->basetime[clk]; @@ -188,8 +188,8 @@ static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, return 0; } #else -static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, - struct __kernel_timespec *ts) +static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, + struct __kernel_timespec *ts) { return -1; } diff --git a/lib/vsprintf.c b/lib/vsprintf.c index ed2f709de0db..5a1bf311b913 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -53,6 +53,10 @@ #include #include "kstrtox.h" +/* Disable pointer hashing if requested */ +bool no_hash_pointers __ro_after_init; +EXPORT_SYMBOL_GPL(no_hash_pointers); + static unsigned long long simple_strntoull(const char *startp, size_t max_chars, char **endp, unsigned int base) { @@ -756,14 +760,16 @@ static void enable_ptr_key_workfn(struct work_struct *work) static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); -static void fill_random_ptr_key(struct random_ready_callback *unused) +static int fill_random_ptr_key(struct notifier_block *nb, + unsigned long action, void *data) { /* This may be in an interrupt handler. */ queue_work(system_unbound_wq, &enable_ptr_key_work); + return 0; } -static struct random_ready_callback random_ready = { - .func = fill_random_ptr_key +static struct notifier_block random_ready = { + .notifier_call = fill_random_ptr_key }; static int __init initialize_ptr_random(void) @@ -777,7 +783,7 @@ static int __init initialize_ptr_random(void) return 0; } - ret = add_random_ready_callback(&random_ready); + ret = register_random_ready_notifier(&random_ready); if (!ret) { return 0; } else if (ret == -EALREADY) { @@ -847,6 +853,19 @@ static char *ptr_to_id(char *buf, char *end, const void *ptr, return pointer_string(buf, end, (const void *)hashval, spec); } +static char *default_pointer(char *buf, char *end, const void *ptr, + struct printf_spec spec) +{ + /* + * default is to _not_ leak addresses, so hash before printing, + * unless no_hash_pointers is specified on the command line. + */ + if (unlikely(no_hash_pointers)) + return pointer_string(buf, end, ptr, spec); + + return ptr_to_id(buf, end, ptr, spec); +} + int kptr_restrict __read_mostly; static noinline_for_stack @@ -856,7 +875,7 @@ char *restricted_pointer(char *buf, char *end, const void *ptr, switch (kptr_restrict) { case 0: /* Handle as %p, hash and do _not_ leak addresses. */ - return ptr_to_id(buf, end, ptr, spec); + return default_pointer(buf, end, ptr, spec); case 1: { const struct cred *cred; @@ -2116,10 +2135,6 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode, return widen_string(buf, buf - buf_start, end, spec); } -/* Disable pointer hashing if requested */ -bool no_hash_pointers __ro_after_init; -EXPORT_SYMBOL_GPL(no_hash_pointers); - static int __init no_hash_pointers_enable(char *str) { no_hash_pointers = true; @@ -2337,7 +2352,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, case 'e': /* %pe with a non-ERR_PTR gets treated as plain %p */ if (!IS_ERR(ptr)) - break; + return default_pointer(buf, end, ptr, spec); return err_ptr(buf, end, ptr, spec); case 'u': case 'k': @@ -2347,16 +2362,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, default: return error_string(buf, end, "(einval)", spec); } + default: + return default_pointer(buf, end, ptr, spec); } - - /* - * default is to _not_ leak addresses, so hash before printing, - * unless no_hash_pointers is specified on the command line. - */ - if (unlikely(no_hash_pointers)) - return pointer_string(buf, end, ptr, spec); - else - return ptr_to_id(buf, end, ptr, spec); } /* diff --git a/mm/Kconfig b/mm/Kconfig index d1135de34208..b1799bb9dcd9 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -859,7 +859,7 @@ config SPECULATIVE_PAGE_FAULT bool "Speculative page faults" default y depends on ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT - depends on MMU && SMP + depends on MMU && SMP && !NUMA help Try to handle user space page faults without holding the mmap_sem. diff --git a/mm/cma.c b/mm/cma.c index 2e82b2758a64..bf1cc2ef8cc9 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -47,6 +47,7 @@ extern void lru_cache_enable(void); struct cma cma_areas[MAX_CMA_AREAS]; unsigned cma_area_count; +static DEFINE_MUTEX(cma_mutex); phys_addr_t cma_get_base(const struct cma *cma) { @@ -534,7 +535,9 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, lru_cache_enable(); goto out; } + mutex_lock(&cma_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp_mask, &info); + mutex_unlock(&cma_mutex); cma_info.nr_migrated += info.nr_migrated; cma_info.nr_reclaimed += info.nr_reclaimed; cma_info.nr_mapped += info.nr_mapped; diff --git a/mm/compaction.c b/mm/compaction.c index 822e6221b0a9..9922a6f12e7d 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1772,6 +1772,8 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) update_fast_start_pfn(cc, free_pfn); pfn = pageblock_start_pfn(free_pfn); + if (pfn < cc->zone->zone_start_pfn) + pfn = cc->zone->zone_start_pfn; cc->fast_search_fail = 0; found_block = true; set_pageblock_skip(freepage); diff --git a/mm/damon/core.c b/mm/damon/core.c index 1dd153c31c9e..b194846f809b 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -155,6 +155,7 @@ struct damon_target *damon_new_target(unsigned long id) t->id = id; t->nr_regions = 0; INIT_LIST_HEAD(&t->regions_list); + INIT_LIST_HEAD(&t->list); return t; } diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c index 5b899601e56c..f2772fc740c0 100644 --- a/mm/damon/dbgfs.c +++ b/mm/damon/dbgfs.c @@ -721,6 +721,9 @@ static int dbgfs_mk_context(char *name) return -ENOENT; new_dir = debugfs_create_dir(name, root); + /* Below check is required for a potential duplicated name case */ + if (IS_ERR(new_dir)) + return PTR_ERR(new_dir); dbgfs_dirs[dbgfs_nr_ctxs] = new_dir; new_ctx = dbgfs_new_ctx(); @@ -784,6 +787,7 @@ static int dbgfs_rm_context(char *name) struct dentry *root, *dir, **new_dirs; struct damon_ctx **new_ctxs; int i, j; + int ret = 0; if (damon_nr_running_ctxs()) return -EBUSY; @@ -798,14 +802,16 @@ static int dbgfs_rm_context(char *name) new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs), GFP_KERNEL); - if (!new_dirs) - return -ENOMEM; + if (!new_dirs) { + ret = -ENOMEM; + goto out_dput; + } new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs), GFP_KERNEL); if (!new_ctxs) { - kfree(new_dirs); - return -ENOMEM; + ret = -ENOMEM; + goto out_new_dirs; } for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) { @@ -825,7 +831,13 @@ static int dbgfs_rm_context(char *name) dbgfs_ctxs = new_ctxs; dbgfs_nr_ctxs--; - return 0; + goto out_dput; + +out_new_dirs: + kfree(new_dirs); +out_dput: + dput(dir); + return ret; } static ssize_t dbgfs_rm_context_write(struct file *file, diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 89b6468da2b9..c0dec53b2330 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -375,6 +375,11 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, if (pmd_huge(*pmd)) { ptl = pmd_lock(walk->mm, pmd); + if (!pmd_present(*pmd)) { + spin_unlock(ptl); + return 0; + } + if (pmd_huge(*pmd)) { damon_pmdp_mkold(pmd, walk->mm, addr); spin_unlock(ptl); @@ -410,8 +415,7 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm, if (pte_young(entry)) { referenced = true; entry = pte_mkold(entry); - huge_ptep_set_access_flags(vma, addr, pte, entry, - vma->vm_flags & VM_WRITE); + set_huge_pte_at(mm, addr, pte, entry); } #ifdef CONFIG_MMU_NOTIFIER @@ -506,6 +510,11 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (pmd_huge(*pmd)) { ptl = pmd_lock(walk->mm, pmd); + if (!pmd_present(*pmd)) { + spin_unlock(ptl); + return 0; + } + if (!pmd_huge(*pmd)) { spin_unlock(ptl); goto regular_page; diff --git a/mm/filemap.c b/mm/filemap.c index 632734e9f8f1..aeb45f36aba1 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2706,7 +2706,9 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf, * it in the page cache, and handles the special cases reasonably without * having a lot of duplicated code. * - * vma->vm_mm->mmap_lock must be held on entry. + * If FAULT_FLAG_SPECULATIVE is set, this function runs with elevated vma + * refcount and with mmap lock not held. + * Otherwise, vma->vm_mm->mmap_lock must be held on entry. * * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock * may be dropped before doing I/O or by lock_page_maybe_drop_mmap(). @@ -2732,6 +2734,47 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) vm_fault_t ret = 0; bool retry = false; + if (vmf->flags & FAULT_FLAG_SPECULATIVE) { + page = find_get_page(mapping, offset); + if (unlikely(!page) || unlikely(PageReadahead(page))) + return VM_FAULT_RETRY; + + if (!trylock_page(page)) + return VM_FAULT_RETRY; + + if (unlikely(compound_head(page)->mapping != mapping)) + goto page_unlock; + VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); + if (unlikely(!PageUptodate(page))) + goto page_unlock; + + max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); + if (unlikely(offset >= max_off)) + goto page_unlock; + + /* + * Update readahead mmap_miss statistic. + * + * Note that we are not sure if finish_fault() will + * manage to complete the transaction. If it fails, + * we'll come back to filemap_fault() non-speculative + * case which will update mmap_miss a second time. + * This is not ideal, we would prefer to guarantee the + * update will happen exactly once. + */ + if (!(vmf->vma->vm_flags & VM_RAND_READ) && ra->ra_pages) { + unsigned int mmap_miss = READ_ONCE(ra->mmap_miss); + if (mmap_miss) + WRITE_ONCE(ra->mmap_miss, --mmap_miss); + } + + vmf->page = page; + return VM_FAULT_LOCKED; +page_unlock: + unlock_page(page); + return VM_FAULT_RETRY; + } + max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); if (unlikely(offset >= max_off)) return VM_FAULT_SIGBUS; @@ -2874,11 +2917,6 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page) } if (pmd_none(*vmf->pmd)) { - if (vmf->flags & FAULT_FLAG_SPECULATIVE) { - unlock_page(page); - put_page(page); - return true; - } vmf->ptl = pmd_lock(mm, vmf->pmd); if (likely(pmd_none(*vmf->pmd))) { mm_inc_nr_ptes(mm); @@ -2976,20 +3014,16 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, XA_STATE(xas, &mapping->i_pages, start_pgoff); struct page *head, *page; unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss); - vm_fault_t ret = 0; + vm_fault_t ret = (vmf->flags & FAULT_FLAG_SPECULATIVE) ? + VM_FAULT_RETRY : 0; rcu_read_lock(); head = first_map_page(mapping, &xas, end_pgoff); if (!head) goto out; - if (filemap_map_pmd(vmf, head)) { - if (pmd_none(*vmf->pmd) && - vmf->flags & FAULT_FLAG_SPECULATIVE) { - ret = VM_FAULT_RETRY; - goto out; - } - + if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) && + filemap_map_pmd(vmf, head)) { ret = VM_FAULT_NOPAGE; goto out; } @@ -2998,7 +3032,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, if (!pte_map_lock_addr(vmf, addr)) { unlock_page(head); put_page(head); - ret = VM_FAULT_RETRY; goto out; } @@ -3438,7 +3471,7 @@ ssize_t generic_perform_write(struct file *file, unsigned long offset; /* Offset into pagecache page */ unsigned long bytes; /* Bytes to write to page */ size_t copied; /* Bytes copied from user */ - void *fsdata; + void *fsdata = NULL; offset = (pos & (PAGE_SIZE - 1)); bytes = min_t(unsigned long, PAGE_SIZE - offset, diff --git a/mm/frame_vector.c b/mm/frame_vector.c index 10f82d5643b6..0e589a9a8801 100644 --- a/mm/frame_vector.c +++ b/mm/frame_vector.c @@ -37,7 +37,6 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; int ret = 0; - int err; int locked; if (nr_frames == 0) @@ -74,32 +73,14 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, vec->is_pfns = false; ret = pin_user_pages_locked(start, nr_frames, gup_flags, (struct page **)(vec->ptrs), &locked); - goto out; + if (likely(ret > 0)) + goto out; } - vec->got_ref = false; - vec->is_pfns = true; - do { - unsigned long *nums = frame_vector_pfns(vec); + /* This used to (racily) return non-refcounted pfns. Let people know */ + WARN_ONCE(1, "get_vaddr_frames() cannot follow VM_IO mapping"); + vec->nr_frames = 0; - while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) { - err = follow_pfn(vma, start, &nums[ret]); - if (err) { - if (ret == 0) - ret = err; - goto out; - } - start += PAGE_SIZE; - ret++; - } - /* - * We stop if we have enough pages or if VMA doesn't completely - * cover the tail page. - */ - if (ret >= nr_frames || start < vma->vm_end) - break; - vma = find_vma_intersection(mm, start, start + 1); - } while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP)); out: if (locked) mmap_read_unlock(mm); diff --git a/mm/gup.c b/mm/gup.c index 54b0a694712e..0c8affbb3870 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -436,6 +436,18 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == (FOLL_PIN | FOLL_GET))) return ERR_PTR(-EINVAL); + + /* + * Considering PTE level hugetlb, like continuous-PTE hugetlb on + * ARM64 architecture. + */ + if (is_vm_hugetlb_page(vma)) { + page = follow_huge_pmd_pte(vma, address, flags); + if (page) + return page; + return no_page_table(vma, flags); + } + retry: if (unlikely(pmd_bad(*pmd))) return no_page_table(vma, flags); @@ -591,7 +603,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma, if (pmd_none(pmdval)) return no_page_table(vma, flags); if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) { - page = follow_huge_pmd(mm, address, pmd, flags); + page = follow_huge_pmd_pte(vma, address, flags); if (page) return page; return no_page_table(vma, flags); @@ -2106,8 +2118,28 @@ static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, } #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL -static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, - unsigned int flags, struct page **pages, int *nr) +/* + * Fast-gup relies on pte change detection to avoid concurrent pgtable + * operations. + * + * To pin the page, fast-gup needs to do below in order: + * (1) pin the page (by prefetching pte), then (2) check pte not changed. + * + * For the rest of pgtable operations where pgtable updates can be racy + * with fast-gup, we need to do (1) clear pte, then (2) check whether page + * is pinned. + * + * Above will work for all pte-level operations, including THP split. + * + * For THP collapse, it's a bit more complicated because fast-gup may be + * walking a pgtable page that is being freed (pte is still valid but pmd + * can be cleared already). To avoid race in such condition, we need to + * also check pmd here to make sure pmd doesn't change (corresponds to + * pmdp_collapse_flush() in the THP collapse code path). + */ +static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, + unsigned long end, unsigned int flags, + struct page **pages, int *nr) { struct dev_pagemap *pgmap = NULL; int nr_start = *nr, ret = 0; @@ -2147,7 +2179,8 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, if (!head) goto pte_unmap; - if (unlikely(pte_val(pte) != pte_val(*ptep))) { + if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || + unlikely(pte_val(pte) != pte_val(*ptep))) { put_compound_head(head, 1, flags); goto pte_unmap; } @@ -2192,8 +2225,9 @@ pte_unmap: * get_user_pages_fast_only implementation that can pin pages. Thus it's still * useful to have gup_huge_pmd even if we can't operate on ptes. */ -static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, - unsigned int flags, struct page **pages, int *nr) +static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, + unsigned long end, unsigned int flags, + struct page **pages, int *nr) { return 0; } @@ -2500,7 +2534,7 @@ static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned lo if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, PMD_SHIFT, next, flags, pages, nr)) return 0; - } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr)) + } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); @@ -2520,7 +2554,7 @@ static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned lo next = pud_addr_end(addr, end); if (unlikely(!pud_present(pud))) return 0; - if (unlikely(pud_huge(pud))) { + if (unlikely(pud_huge(pud) || pud_devmap(pud))) { if (!gup_huge_pud(pud, pudp, addr, next, flags, pages, nr)) return 0; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1eea5787fd63..993816e3fc09 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -33,7 +33,7 @@ #include #include #include - +#include #include #include #include "internal.h" @@ -1481,7 +1481,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) */ get_page(page); spin_unlock(vmf->ptl); - anon_vma = page_lock_anon_vma_read(page); + anon_vma = page_lock_anon_vma_read(page, NULL); /* Confirm the PMD did not change while page_table_lock was released */ spin_lock(vmf->ptl); @@ -1691,7 +1691,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); entry = pmd_to_swp_entry(orig_pmd); - page = pfn_to_page(swp_offset(entry)); + page = migration_entry_to_page(entry); flush_needed = 0; } else WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); @@ -2033,6 +2033,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; unsigned long addr; int i; + bool success = false; VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); VM_BUG_ON_VMA(vma->vm_start > haddr, vma); @@ -2110,7 +2111,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, swp_entry_t entry; entry = pmd_to_swp_entry(old_pmd); - page = pfn_to_page(swp_offset(entry)); + page = migration_entry_to_page(entry); write = is_write_migration_entry(entry); young = false; soft_dirty = pmd_swp_soft_dirty(old_pmd); @@ -2164,8 +2165,12 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, pte = pte_offset_map(&_pmd, addr); BUG_ON(!pte_none(*pte)); set_pte_at(mm, addr, pte, entry); - if (!pmd_migration) - atomic_inc(&page[i]._mapcount); + if (!pmd_migration) { + trace_android_vh_update_page_mapcount(&page[i], true, + false, NULL, &success); + if (!success) + atomic_inc(&page[i]._mapcount); + } pte_unmap(pte); } @@ -2176,8 +2181,12 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, */ if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { - for (i = 0; i < HPAGE_PMD_NR; i++) - atomic_inc(&page[i]._mapcount); + for (i = 0; i < HPAGE_PMD_NR; i++) { + trace_android_vh_update_page_mapcount(&page[i], true, + false, NULL, &success); + if (!success) + atomic_inc(&page[i]._mapcount); + } } lock_page_memcg(page); @@ -2186,8 +2195,12 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, __dec_lruvec_page_state(page, NR_ANON_THPS); if (TestClearPageDoubleMap(page)) { /* No need in mapcount reference anymore */ - for (i = 0; i < HPAGE_PMD_NR; i++) - atomic_dec(&page[i]._mapcount); + for (i = 0; i < HPAGE_PMD_NR; i++) { + trace_android_vh_update_page_mapcount(&page[i], + false, false, NULL, &success); + if (!success) + atomic_dec(&page[i]._mapcount); + } } } unlock_page_memcg(page); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index fb145839f299..035ef50be130 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2388,11 +2388,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, page = alloc_buddy_huge_page_with_mpol(h, vma, addr); if (!page) goto out_uncharge_cgroup; + spin_lock(&hugetlb_lock); if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { SetPagePrivate(page); h->resv_huge_pages--; } - spin_lock(&hugetlb_lock); list_add(&page->lru, &h->hugepage_activelist); /* Fall through */ } @@ -4331,15 +4331,11 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, unsigned long haddr, unsigned long reason) { - vm_fault_t ret; - u32 hash; + u32 hash = hugetlb_fault_mutex_hash(mapping, idx); struct vm_fault vmf = { .vma = vma, .address = haddr, .flags = flags, - .vma_flags = vma->vm_flags, - .vma_page_prot = vma->vm_page_prot, - /* * Hard to debug if it ends up being * used by a callee that assumes @@ -4350,18 +4346,14 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, }; /* - * hugetlb_fault_mutex and i_mmap_rwsem must be - * dropped before handling userfault. Reacquire - * after handling fault to make calling code simpler. + * vma_lock and hugetlb_fault_mutex must be dropped + * before handling userfault. Also mmap_lock will + * be dropped during handling userfault, any vma + * operation should be careful from here. */ - hash = hugetlb_fault_mutex_hash(mapping, idx); mutex_unlock(&hugetlb_fault_mutex_table[hash]); i_mmap_unlock_read(mapping); - ret = handle_userfault(&vmf, reason); - i_mmap_lock_read(mapping); - mutex_lock(&hugetlb_fault_mutex_table[hash]); - - return ret; + return handle_userfault(&vmf, VM_UFFD_MISSING); } static vm_fault_t hugetlb_no_page(struct mm_struct *mm, @@ -4378,6 +4370,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, spinlock_t *ptl; unsigned long haddr = address & huge_page_mask(h); bool new_page = false; + u32 hash = hugetlb_fault_mutex_hash(mapping, idx); /* * Currently, we are forced to kill the process in the event the @@ -4387,7 +4380,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", current->pid); - return ret; + goto out; } /* @@ -4524,6 +4517,8 @@ retry: unlock_page(page); out: + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + i_mmap_unlock_read(mapping); return ret; backout: @@ -4619,10 +4614,12 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, mutex_lock(&hugetlb_fault_mutex_table[hash]); entry = huge_ptep_get(ptep); - if (huge_pte_none(entry)) { - ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); - goto out_mutex; - } + if (huge_pte_none(entry)) + /* + * hugetlb_no_page will drop vma lock and hugetlb fault + * mutex internally, which make us return immediately. + */ + return hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); ret = 0; @@ -5505,7 +5502,14 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, pud_clear(pud); put_page(virt_to_page(ptep)); mm_dec_nr_pmds(mm); - *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; + /* + * This update of passed address optimizes loops sequentially + * processing addresses in increments of huge page size (PMD_SIZE + * in this case). By clearing the pud, a PUD_SIZE area is unmapped. + * Update address to the 'last page' in the cleared area so that + * calling loop can move to first page past this area. + */ + *addr |= PUD_SIZE - PMD_SIZE; return 1; } @@ -5622,12 +5626,13 @@ follow_huge_pd(struct vm_area_struct *vma, } struct page * __weak -follow_huge_pmd(struct mm_struct *mm, unsigned long address, - pmd_t *pmd, int flags) +follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags) { + struct hstate *h = hstate_vma(vma); + struct mm_struct *mm = vma->vm_mm; struct page *page = NULL; spinlock_t *ptl; - pte_t pte; + pte_t *ptep, pte; /* FOLL_GET and FOLL_PIN are mutually exclusive. */ if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == @@ -5635,17 +5640,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, return NULL; retry: - ptl = pmd_lockptr(mm, pmd); - spin_lock(ptl); - /* - * make sure that the address range covered by this pmd is not - * unmapped from other threads. - */ - if (!pmd_huge(*pmd)) - goto out; - pte = huge_ptep_get((pte_t *)pmd); + ptep = huge_pte_offset(mm, address, huge_page_size(h)); + if (!ptep) + return NULL; + + ptl = huge_pte_lock(h, mm, ptep); + pte = huge_ptep_get(ptep); if (pte_present(pte)) { - page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); + page = pte_page(pte) + + ((address & ~huge_page_mask(h)) >> PAGE_SHIFT); /* * try_grab_page() should always succeed here, because: a) we * hold the pmd (ptl) lock, and b) we've just checked that the @@ -5661,7 +5664,7 @@ retry: } else { if (is_hugetlb_entry_migration(pte)) { spin_unlock(ptl); - __migration_entry_wait(mm, (pte_t *)pmd, ptl); + __migration_entry_wait(mm, ptep, ptl); goto retry; } /* @@ -5776,6 +5779,7 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) if (start >= end) return; + flush_cache_range(vma, start, end); /* * No need to call adjust_range_if_pmd_sharing_possible(), because * we have already done the PUD_SIZE alignment. diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 728fb24c5683..38cd75c5de8d 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -304,6 +304,13 @@ static void per_cpu_remove_cache(void *arg) struct qlist_head *q; q = this_cpu_ptr(&cpu_quarantine); + /* + * Ensure the ordering between the writing to q->offline and + * per_cpu_remove_cache. Prevent cpu_quarantine from being corrupted + * by interrupt. + */ + if (READ_ONCE(q->offline)) + return; qlist_move_cache(q, &to_free, cache); qlist_free_all(&to_free, cache); } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 8008e6c2714e..6ddc2761a408 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1147,14 +1147,17 @@ static void collapse_huge_page(struct mm_struct *mm, pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ /* - * After this gup_fast can't run anymore. This also removes - * any huge TLB entry from the CPU so we won't allow - * huge and small TLB entries for the same virtual address - * to avoid the risk of CPU bugs in that area. + * This removes any huge TLB entry from the CPU so we won't allow + * huge and small TLB entries for the same virtual address to + * avoid the risk of CPU bugs in that area. + * + * Parallel fast GUP is fine since fast GUP will back off when + * it detects PMD is changed. */ _pmd = pmdp_collapse_flush(vma, address, pmd); spin_unlock(pmd_ptl); mmu_notifier_invalidate_range_end(&range); + tlb_remove_table_sync_one(); spin_lock(pte_ptl); isolated = __collapse_huge_page_isolate(vma, address, pte, @@ -1446,6 +1449,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) spinlock_t *ptl; int count = 0; int i; + struct mmu_notifier_range range; if (!vma || !vma->vm_file || vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE) @@ -1460,6 +1464,14 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE)) return; + /* + * Symmetry with retract_page_tables(): Exclude MAP_PRIVATE mappings + * that got written to. Without this, we'd have to also lock the + * anon_vma if one exists. + */ + if (vma->anon_vma) + return; + hpage = find_lock_page(vma->vm_file->f_mapping, linear_page_index(vma, haddr)); if (!hpage) @@ -1472,6 +1484,21 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) if (!pmd) goto drop_hpage; + vm_write_begin(vma); + + /* + * We need to lock the mapping so that from here on, only GUP-fast and + * hardware page walks can access the parts of the page tables that + * we're operating on. + */ + i_mmap_lock_write(vma->vm_file->f_mapping); + + /* + * This spinlock should be unnecessary: Nobody else should be accessing + * the page tables under spinlock protection here, only + * lockless_pages_from_mm() and the hardware page walker can access page + * tables while all the high-level locks are held in write mode. + */ start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); /* step 1: check all mapped PTEs are to the right huge page */ @@ -1518,12 +1545,18 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) } /* step 4: collapse pmd */ - ptl = pmd_lock(vma->vm_mm, pmd); + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, haddr, + haddr + HPAGE_PMD_SIZE); + mmu_notifier_invalidate_range_start(&range); _pmd = pmdp_collapse_flush(vma, haddr, pmd); - spin_unlock(ptl); + vm_write_end(vma); mm_dec_nr_ptes(mm); + tlb_remove_table_sync_one(); + mmu_notifier_invalidate_range_end(&range); pte_free(mm, pmd_pgtable(_pmd)); + i_mmap_unlock_write(vma->vm_file->f_mapping); + drop_hpage: unlock_page(hpage); put_page(hpage); @@ -1531,6 +1564,8 @@ drop_hpage: abort: pte_unmap_unlock(start_pte, ptl); + vm_write_end(vma); + i_mmap_unlock_write(vma->vm_file->f_mapping); goto drop_hpage; } @@ -1580,7 +1615,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) * An alternative would be drop the check, but check that page * table is clear before calling pmdp_collapse_flush() under * ptl. It has higher chance to recover THP for the VMA, but - * has higher cost too. + * has higher cost too. It would also probably require locking + * the anon_vma. */ if (vma->anon_vma) continue; @@ -1602,12 +1638,21 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) */ if (mmap_write_trylock(mm)) { if (!khugepaged_test_exit(mm)) { - spinlock_t *ptl = pmd_lock(mm, pmd); + struct mmu_notifier_range range; + + vm_write_begin(vma); + mmu_notifier_range_init(&range, + MMU_NOTIFY_CLEAR, 0, + NULL, mm, addr, + addr + HPAGE_PMD_SIZE); + mmu_notifier_invalidate_range_start(&range); /* assume page table is clear */ _pmd = pmdp_collapse_flush(vma, addr, pmd); - spin_unlock(ptl); + vm_write_end(vma); mm_dec_nr_ptes(mm); + tlb_remove_table_sync_one(); pte_free(mm, pmd_pgtable(_pmd)); + mmu_notifier_invalidate_range_end(&range); } mmap_write_unlock(mm); } else { diff --git a/mm/ksm.c b/mm/ksm.c index e2464c04ede2..2695ddbeb47e 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2626,7 +2626,13 @@ again: struct vm_area_struct *vma; cond_resched(); - anon_vma_lock_read(anon_vma); + if (!anon_vma_trylock_read(anon_vma)) { + if (rwc->try_lock) { + rwc->contended = true; + return; + } + anon_vma_lock_read(anon_vma); + } anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 0, ULONG_MAX) { unsigned long addr; diff --git a/mm/maccess.c b/mm/maccess.c index 3bd70405f2d8..f6ea117a69eb 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -83,7 +83,7 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) return src - unsafe_addr; Efault: pagefault_enable(); - dst[-1] = '\0'; + dst[0] = '\0'; return -EFAULT; } #else /* HAVE_GET_KERNEL_NOFAULT */ diff --git a/mm/madvise.c b/mm/madvise.c index db54a747d6f3..b3761ca637bb 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -38,6 +38,7 @@ struct madvise_walk_private { struct mmu_gather *tlb; bool pageout; + bool can_pageout_file; }; /* @@ -313,16 +314,19 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, struct madvise_walk_private *private = walk->private; struct mmu_gather *tlb = private->tlb; bool pageout = private->pageout; + bool pageout_anon_only = pageout && !private->can_pageout_file; struct mm_struct *mm = tlb->mm; struct vm_area_struct *vma = walk->vma; pte_t *orig_pte, *pte, ptent; spinlock_t *ptl; struct page *page = NULL; LIST_HEAD(page_list); + bool allow_shared = false; if (fatal_signal_pending(current)) return -EINTR; + trace_android_vh_madvise_cold_or_pageout(vma, &allow_shared); #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (pmd_trans_huge(*pmd)) { pmd_t orig_pmd; @@ -349,6 +353,9 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, if (page_mapcount(page) != 1) goto huge_unlock; + if (pageout_anon_only && !PageAnon(page)) + goto huge_unlock; + if (next - addr != HPAGE_PMD_SIZE) { int err; @@ -417,6 +424,8 @@ regular_page: if (PageTransCompound(page)) { if (page_mapcount(page) != 1) break; + if (pageout_anon_only && !PageAnon(page)) + break; get_page(page); if (!trylock_page(page)) { put_page(page); @@ -437,8 +446,14 @@ regular_page: continue; } - /* Do not interfere with other mappings of this page */ - if (page_mapcount(page) != 1) + /* + * Do not interfere with other mappings of this page and + * non-LRU page. + */ + if (!allow_shared && (!PageLRU(page) || page_mapcount(page) != 1)) + continue; + + if (pageout_anon_only && !PageAnon(page)) continue; VM_BUG_ON_PAGE(PageTransCompound(page), page); @@ -494,11 +509,9 @@ static void madvise_cold_page_range(struct mmu_gather *tlb, .tlb = tlb, }; - vm_write_begin(vma); tlb_start_vma(tlb, vma); walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); tlb_end_vma(tlb, vma); - vm_write_end(vma); } static long madvise_cold(struct vm_area_struct *vma, @@ -522,24 +535,22 @@ static long madvise_cold(struct vm_area_struct *vma, static void madvise_pageout_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, - unsigned long addr, unsigned long end) + unsigned long addr, unsigned long end, + bool can_pageout_file) { struct madvise_walk_private walk_private = { .pageout = true, .tlb = tlb, + .can_pageout_file = can_pageout_file, }; - vm_write_begin(vma); tlb_start_vma(tlb, vma); walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); tlb_end_vma(tlb, vma); - vm_write_end(vma); } -static inline bool can_do_pageout(struct vm_area_struct *vma) +static inline bool can_do_file_pageout(struct vm_area_struct *vma) { - if (vma_is_anonymous(vma)) - return true; if (!vma->vm_file) return false; /* @@ -558,17 +569,23 @@ static long madvise_pageout(struct vm_area_struct *vma, { struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; + bool can_pageout_file; *prev = vma; if (!can_madv_lru_vma(vma)) return -EINVAL; - if (!can_do_pageout(vma)) - return 0; + /* + * If the VMA belongs to a private file mapping, there can be private + * dirty pages which can be paged out if even this process is neither + * owner nor write capable of the file. Cache the file access check + * here and use it later during page walk. + */ + can_pageout_file = can_do_file_pageout(vma); lru_add_drain(); tlb_gather_mmu(&tlb, mm, start_addr, end_addr); - madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); + madvise_pageout_page_range(&tlb, vma, start_addr, end_addr, can_pageout_file); tlb_finish_mmu(&tlb, start_addr, end_addr); return 0; @@ -736,12 +753,10 @@ static int madvise_free_single_vma(struct vm_area_struct *vma, update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(&range); - vm_write_begin(vma); tlb_start_vma(&tlb, vma); walk_page_range(vma->vm_mm, range.start, range.end, &madvise_free_walk_ops, &tlb); tlb_end_vma(&tlb, vma); - vm_write_end(vma); mmu_notifier_invalidate_range_end(&range); tlb_finish_mmu(&tlb, range.start, range.end); diff --git a/mm/memblock.c b/mm/memblock.c index dc46b0578ac8..6661739107d7 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -172,6 +172,7 @@ static __refdata struct memblock_type *memblock_memory = &memblock.memory; } while (0) static int memblock_debug __initdata_memblock; +static bool memblock_nomap_remove __initdata_memblock; static bool system_has_some_mirror __initdata_memblock = false; static int memblock_can_resize __initdata_memblock; static int memblock_memory_in_slab __initdata_memblock = 0; @@ -1924,6 +1925,17 @@ static int __init early_memblock(char *p) } early_param("memblock", early_memblock); +static int __init early_memblock_nomap(char *str) +{ + return kstrtobool(str, &memblock_nomap_remove); +} +early_param("android12_only.will_be_removed_soon.memblock_nomap_remove", early_memblock_nomap); + +bool __init memblock_is_nomap_remove(void) +{ + return memblock_nomap_remove; +} + static void __init __free_pages_memory(unsigned long start, unsigned long end) { int order; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 008e72c2b980..0501a27be6f1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1372,6 +1372,38 @@ out: return lruvec; } +struct lruvec *page_to_lruvec(struct page *page, pg_data_t *pgdat) +{ + struct lruvec *lruvec; + + lruvec = mem_cgroup_page_lruvec(page, pgdat); + + return lruvec; +} +EXPORT_SYMBOL_GPL(page_to_lruvec); + +void do_traversal_all_lruvec(void) +{ + pg_data_t *pgdat; + + for_each_online_pgdat(pgdat) { + struct mem_cgroup *memcg = NULL; + + spin_lock_irq(&pgdat->lru_lock); + memcg = mem_cgroup_iter(NULL, NULL, NULL); + do { + struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); + + trace_android_vh_do_traversal_lruvec(lruvec); + + memcg = mem_cgroup_iter(NULL, memcg, NULL); + } while (memcg); + + spin_unlock_irq(&pgdat->lru_lock); + } +} +EXPORT_SYMBOL_GPL(do_traversal_all_lruvec); + /** * mem_cgroup_update_lru_size - account for adding or removing an lru page * @lruvec: mem_cgroup per zone lru vector @@ -4867,6 +4899,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, unsigned int efd, cfd; struct fd efile; struct fd cfile; + struct dentry *cdentry; const char *name; char *endp; int ret; @@ -4917,6 +4950,16 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, if (ret < 0) goto out_put_cfile; + /* + * The control file must be a regular cgroup1 file. As a regular cgroup + * file can't be renamed, it's safe to access its name afterwards. + */ + cdentry = cfile.file->f_path.dentry; + if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) { + ret = -EINVAL; + goto out_put_cfile; + } + /* * Determine the event callbacks and set them in @event. This used * to be done via struct cftype but cgroup core no longer knows @@ -4925,7 +4968,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, * * DO NOT ADD NEW FILES. */ - name = cfile.file->f_path.dentry->d_name.name; + name = cdentry->d_name.name; if (!strcmp(name, "memory.usage_in_bytes")) { event->register_event = mem_cgroup_usage_register_event; @@ -4949,7 +4992,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, * automatically removed on cgroup destruction but the removal is * asynchronous, so take an extra ref on @css. */ - cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, + cfile_css = css_tryget_online_from_dir(cdentry->d_parent, &memory_cgrp_subsys); ret = -EINVAL; if (IS_ERR(cfile_css)) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index aef267c6a724..4bd73d6dc18a 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -477,7 +477,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, struct anon_vma *av; pgoff_t pgoff; - av = page_lock_anon_vma_read(page); + av = page_lock_anon_vma_read(page, NULL); if (av == NULL) /* Not actually mapped anymore */ return; diff --git a/mm/memory.c b/mm/memory.c index 7d70acf494d4..834078ab6e1c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -862,6 +862,17 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma if (likely(!page_maybe_dma_pinned(page))) return 1; + /* + * The vma->anon_vma of the child process may be NULL + * because the entire vma does not contain anonymous pages. + * A BUG will occur when the copy_present_page() passes + * a copy of a non-anonymous page of that vma to the + * page_add_new_anon_rmap() to set up new anonymous rmap. + * Return 1 if the page is not an anonymous page. + */ + if (!PageAnon(page)) + return 1; + new_page = *prealloc; if (!new_page) return -EAGAIN; @@ -1243,6 +1254,17 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) return ret; } +/* Whether we should zap all COWed (private) pages too */ +static inline bool should_zap_cows(struct zap_details *details) +{ + /* By default, zap all pages */ + if (!details) + return true; + + /* Or, we zap COWed pages only if the caller wants to */ + return !details->check_mapping; +} + static unsigned long zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, @@ -1335,16 +1357,18 @@ again: continue; } - /* If details->check_mapping, we leave swap entries. */ - if (unlikely(details)) - continue; - - if (!non_swap_entry(entry)) + if (!non_swap_entry(entry)) { + /* Genuine swap entry, hence a private anon page */ + if (!should_zap_cows(details)) + continue; rss[MM_SWAPENTS]--; - else if (is_migration_entry(entry)) { + } else if (is_migration_entry(entry)) { struct page *page; page = migration_entry_to_page(entry); + if (details && details->check_mapping && + details->check_mapping != page_rmapping(page)) + continue; rss[mm_counter(page)]--; } if (unlikely(!free_swap_and_cache(entry))) @@ -1482,7 +1506,6 @@ void unmap_page_range(struct mmu_gather *tlb, unsigned long next; BUG_ON(addr >= end); - vm_write_begin(vma); tlb_start_vma(tlb, vma); pgd = pgd_offset(vma->vm_mm, addr); do { @@ -1492,7 +1515,6 @@ void unmap_page_range(struct mmu_gather *tlb, next = zap_p4d_range(tlb, vma, pgd, addr, next, details); } while (pgd++, addr = next, addr != end); tlb_end_vma(tlb, vma); - vm_write_end(vma); } @@ -3374,6 +3396,8 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) if (userfaultfd_pte_wp(vma, *vmf->pte)) { pte_unmap_unlock(vmf->pte, vmf->ptl); + if (vmf->flags & FAULT_FLAG_SPECULATIVE) + return VM_FAULT_RETRY; return handle_userfault(vmf, VM_UFFD_WP); } @@ -3586,6 +3610,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) vm_fault_t ret; void *shadow = NULL; + if (vmf->flags & FAULT_FLAG_SPECULATIVE) { + pte_unmap(vmf->pte); + return VM_FAULT_RETRY; + } + ret = pte_unmap_same(vmf); if (ret) { /* @@ -3845,6 +3874,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) if (vmf->vma_flags & VM_SHARED) return VM_FAULT_SIGBUS; + /* Do not check unstable pmd, if it's changed will retry later */ + if (vmf->flags & FAULT_FLAG_SPECULATIVE) + goto skip_pmd_checks; + /* * Use pte_alloc() instead of pte_alloc_map(). We can't run * pte_offset_map() on pmds where a huge pmd might be created @@ -3862,6 +3895,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) if (unlikely(pmd_trans_unstable(vmf->pmd))) return 0; +skip_pmd_checks: /* Use the zero-page for reads */ if (!(vmf->flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(vma->vm_mm)) { @@ -3969,6 +4003,10 @@ static vm_fault_t __do_fault(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; vm_fault_t ret; + /* Do not check unstable pmd, if it's changed will retry later */ + if (vmf->flags & FAULT_FLAG_SPECULATIVE) + goto skip_pmd_checks; + /* * Preallocate pte before we take page_lock because this might lead to * deadlocks for memcg reclaim which waits for pages under writeback: @@ -3991,6 +4029,7 @@ static vm_fault_t __do_fault(struct vm_fault *vmf) smp_wmb(); /* See comment in __pte_alloc() */ } +skip_pmd_checks: ret = vma->vm_ops->fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | VM_FAULT_DONE_COW))) @@ -4166,7 +4205,11 @@ vm_fault_t finish_fault(struct vm_fault *vmf) return ret; } - if (pmd_none(*vmf->pmd) && !(vmf->flags & FAULT_FLAG_SPECULATIVE)) { + /* Do not check unstable pmd, if it's changed will retry later */ + if (vmf->flags & FAULT_FLAG_SPECULATIVE) + goto skip_pmd_checks; + + if (pmd_none(*vmf->pmd)) { if (PageTransCompound(page)) { ret = do_set_pmd(vmf, page); if (ret != VM_FAULT_FALLBACK) @@ -4186,10 +4229,14 @@ vm_fault_t finish_fault(struct vm_fault *vmf) } } - /* See comment in handle_pte_fault() */ + /* + * See comment in handle_pte_fault() for how this scenario happens, we + * need to return NOPAGE so that we drop this page. + */ if (pmd_devmap_trans_unstable(vmf->pmd)) - return 0; + return VM_FAULT_NOPAGE; +skip_pmd_checks: if (!pte_map_lock(vmf)) return VM_FAULT_RETRY; @@ -4289,7 +4336,8 @@ static vm_fault_t do_fault_around(struct vm_fault *vmf) end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, start_pgoff + nr_pages - 1); - if (pmd_none(*vmf->pmd)) { + if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) && + pmd_none(*vmf->pmd)) { vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); if (!vmf->prealloc_pte) return VM_FAULT_OOM; @@ -4596,6 +4644,19 @@ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) static vm_fault_t create_huge_pud(struct vm_fault *vmf) { +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) + /* No support for anonymous transparent PUD pages yet */ + if (vma_is_anonymous(vmf->vma)) + return VM_FAULT_FALLBACK; + if (vmf->vma->vm_ops->huge_fault) + return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + return VM_FAULT_FALLBACK; +} + +static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) +{ #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) /* No support for anonymous transparent PUD pages yet */ @@ -4610,19 +4671,7 @@ static vm_fault_t create_huge_pud(struct vm_fault *vmf) split: /* COW or write-notify not handled on PUD level: split pud.*/ __split_huge_pud(vmf->vma, vmf->pud, vmf->address); -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - return VM_FAULT_FALLBACK; -} - -static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) -{ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - /* No support for anonymous transparent PUD pages yet */ - if (vma_is_anonymous(vmf->vma)) - return VM_FAULT_FALLBACK; - if (vmf->vma->vm_ops->huge_fault) - return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ return VM_FAULT_FALLBACK; } @@ -4646,16 +4695,11 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) pte_t entry; vm_fault_t ret = 0; + /* Do not check unstable pmd, if it's changed will retry later */ + if (vmf->flags & FAULT_FLAG_SPECULATIVE) + goto skip_pmd_checks; + if (unlikely(pmd_none(*vmf->pmd))) { - /* - * In the case of the speculative page fault handler we abort - * the speculative path immediately as the pmd is probably - * in the way to be converted in a huge one. We will try - * again holding the mmap_sem (which implies that the collapse - * operation is done). - */ - if (vmf->flags & FAULT_FLAG_SPECULATIVE) - return VM_FAULT_RETRY; /* * Leave __pte_alloc() until later: because vm_ops->fault may * want to allocate huge page, and if we expose page table @@ -4663,7 +4707,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) * concurrent faults and from rmap lookups. */ vmf->pte = NULL; - } else if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) { + } else { /* * If a huge pmd materialized under us just retry later. Use * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead @@ -4705,6 +4749,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) } } +skip_pmd_checks: if (!vmf->pte) { if (vma_is_anonymous(vmf->vma)) return do_anonymous_page(vmf); @@ -4763,6 +4808,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) if (vmf->flags & FAULT_FLAG_WRITE) flush_tlb_fix_spurious_fault(vmf->vma, vmf->address); } + trace_android_rvh_handle_pte_fault_end(vmf, highest_memmap_pfn); trace_android_vh_handle_pte_fault_end(vmf, highest_memmap_pfn); unlock: pte_unmap_unlock(vmf->pte, vmf->ptl); @@ -4986,11 +5032,13 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm, vmf.vma_flags = READ_ONCE(vmf.vma->vm_flags); vmf.vma_page_prot = READ_ONCE(vmf.vma->vm_page_prot); +#ifdef CONFIG_USERFAULTFD /* Can't call userland page fault handler in the speculative path */ - if (unlikely(vmf.vma_flags & VM_UFFD_MISSING)) { + if (unlikely(vmf.vma_flags & __VM_UFFD_FLAGS)) { trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); return VM_FAULT_RETRY; } +#endif if (vmf.vma_flags & VM_GROWSDOWN || vmf.vma_flags & VM_GROWSUP) { /* @@ -5029,11 +5077,10 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm, pol = __get_vma_policy(vmf.vma, address); if (!pol) pol = get_task_policy(current); - if (!pol) - if (pol && pol->mode == MPOL_INTERLEAVE) { - trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); - return VM_FAULT_RETRY; - } + if (pol && pol->mode == MPOL_INTERLEAVE) { + trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; + } #endif /* @@ -5838,6 +5885,8 @@ long copy_huge_page_from_user(struct page *dst_page, if (rc) break; + flush_dcache_page(subpage); + cond_resched(); } return ret_val; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 6702c1114ced..9176bae95c9a 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1156,14 +1156,22 @@ int add_memory_subsection(int nid, u64 start, u64 size) ret = arch_add_memory(nid, start, size, ¶ms); if (ret) { - if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) - memblock_remove(start, size); pr_err("%s failed to add subsection start 0x%llx size 0x%llx\n", __func__, start, size); + goto err_add_memory; } mem_hotplug_done(); return ret; + +err_add_memory: + if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) + memblock_remove(start, size); + + mem_hotplug_done(); + + release_memory_resource(res); + return ret; } EXPORT_SYMBOL_GPL(add_memory_subsection); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index ee585fbc7899..142929e67a97 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -374,7 +374,7 @@ static void mpol_rebind_preferred(struct mempolicy *pol, */ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) { - if (!pol) + if (!pol || pol->mode == MPOL_LOCAL) return; if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) && nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) @@ -657,11 +657,9 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, { int nr_updated; - vm_write_begin(vma); nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA); if (nr_updated) count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); - vm_write_end(vma); return nr_updated; } @@ -2654,6 +2652,7 @@ alloc_new: mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!mpol_new) goto err_out; + atomic_set(&mpol_new->refcnt, 1); goto restart; } diff --git a/mm/memremap.c b/mm/memremap.c index 2455bac89506..299aad0d26e5 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -348,6 +348,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) WARN(1, "File system DAX not supported\n"); return ERR_PTR(-EINVAL); } + params.pgprot = pgprot_decrypted(params.pgprot); break; case MEMORY_DEVICE_GENERIC: break; diff --git a/mm/migrate.c b/mm/migrate.c index 6d09285f3c1a..9fec180b0eb3 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -56,6 +56,7 @@ #include #undef CREATE_TRACE_POINTS #include +#include #include "internal.h" @@ -606,6 +607,7 @@ void migrate_page_states(struct page *newpage, struct page *page) SetPageChecked(newpage); if (PageMappedToDisk(page)) SetPageMappedToDisk(newpage); + trace_android_vh_look_around_migrate_page(page, newpage); /* Move dirty on pages not done by migrate_page_move_mapping() */ if (PageDirty(page)) @@ -990,9 +992,12 @@ static int move_to_new_page(struct page *newpage, struct page *page, if (!PageMappingFlags(page)) page->mapping = NULL; - if (likely(!is_zone_device_page(newpage))) - flush_dcache_page(newpage); + if (likely(!is_zone_device_page(newpage))) { + int i, nr = compound_nr(newpage); + for (i = 0; i < nr; i++) + flush_dcache_page(newpage + i); + } } out: return rc; @@ -2454,13 +2459,14 @@ next: migrate->dst[migrate->npages] = 0; migrate->src[migrate->npages++] = mpfn; } - arch_leave_lazy_mmu_mode(); - pte_unmap_unlock(ptep - 1, ptl); /* Only flush the TLB if we actually modified any entries */ if (unmapped) flush_tlb_range(walk->vma, start, end); + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(ptep - 1, ptl); + return 0; } diff --git a/mm/mmap.c b/mm/mmap.c index 1ae340632e47..8ec92cc5ae69 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1760,8 +1760,12 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags))) return 0; - /* Do we need to track softdirty? */ - if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY)) + /* + * Do we need to track softdirty? hugetlb does not support softdirty + * tracking yet. + */ + if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY) && + !is_vm_hugetlb_page(vma)) return 1; /* Specialty mapping? */ @@ -1919,7 +1923,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, if (!arch_validate_flags(vma->vm_flags)) { error = -EINVAL; if (file) - goto unmap_and_free_vma; + goto close_and_free_vma; else goto free_vma; } @@ -1968,13 +1972,15 @@ out: return addr; +close_and_free_vma: + if (vma->vm_ops && vma->vm_ops->close) + vma->vm_ops->close(vma); unmap_and_free_vma: vma->vm_file = NULL; fput(file); /* Undo any partial mapping done by a device driver. */ unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); - charged = 0; if (vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); allow_write_and_free_vma: @@ -2218,14 +2224,6 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) } EXPORT_SYMBOL_GPL(vm_unmapped_area); -#ifndef arch_get_mmap_end -#define arch_get_mmap_end(addr) (TASK_SIZE) -#endif - -#ifndef arch_get_mmap_base -#define arch_get_mmap_base(addr, base) (base) -#endif - /* Get an address range which is currently unmapped. * For shmat() with addr=0. * @@ -2426,8 +2424,22 @@ struct vm_area_struct *get_vma(struct mm_struct *mm, unsigned long addr) read_lock(&mm->mm_rb_lock); vma = __find_vma(mm, addr); - if (vma) - atomic_inc(&vma->vm_ref_count); + + /* + * If there is a concurrent fast mremap, bail out since the entire + * PMD/PUD subtree may have been remapped. + * + * This is usually safe for conventional mremap since it takes the + * PTE locks as does SPF. However fast mremap only takes the lock + * at the PMD/PUD level which is ok as it is done with the mmap + * write lock held. But since SPF, as the term implies forgoes, + * taking the mmap read lock and also cannot take PTL lock at the + * larger PMD/PUD granualrity, since it would introduce huge + * contention in the page fault path; fall back to regular fault + * handling. + */ + if (vma && !atomic_inc_unless_negative(&vma->vm_ref_count)) + vma = NULL; read_unlock(&mm->mm_rb_lock); return vma; @@ -2773,11 +2785,28 @@ static void unmap_region(struct mm_struct *mm, { struct vm_area_struct *next = vma_next(mm, prev); struct mmu_gather tlb; + struct vm_area_struct *cur_vma; lru_add_drain(); tlb_gather_mmu(&tlb, mm, start, end); update_hiwater_rss(mm); unmap_vmas(&tlb, vma, start, end); + + /* + * Ensure we have no stale TLB entries by the time this mapping is + * removed from the rmap. + * Note that we don't have to worry about nested flushes here because + * we're holding the mm semaphore for removing the mapping - so any + * concurrent flush in this region has to be coming through the rmap, + * and we synchronize against that using the rmap lock. + */ + for (cur_vma = vma; cur_vma; cur_vma = cur_vma->vm_next) { + if ((cur_vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0) { + tlb_flush_mmu(&tlb); + break; + } + } + free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, next ? next->vm_start : USER_PGTABLES_CEILING); tlb_finish_mmu(&tlb, start, end); @@ -3334,6 +3363,7 @@ void exit_mmap(struct mm_struct *mm) vma = remove_vma(vma); cond_resched(); } + mm->mmap = NULL; mmap_write_unlock(mm); vm_unacct_memory(nr_accounted); } diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 03c33c93a582..205fdbb5792a 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -139,7 +139,7 @@ static void tlb_remove_table_smp_sync(void *arg) /* Simply deliver the interrupt */ } -static void tlb_remove_table_sync_one(void) +void tlb_remove_table_sync_one(void) { /* * This isn't an RCU grace period and hence the page-tables cannot be @@ -163,8 +163,6 @@ static void tlb_remove_table_free(struct mmu_table_batch *batch) #else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */ -static void tlb_remove_table_sync_one(void) { } - static void tlb_remove_table_free(struct mmu_table_batch *batch) { __tlb_remove_table_free(batch); diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index cee61bab214b..a399b8fa23ff 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -1081,6 +1081,18 @@ int mmu_interval_notifier_insert_locked( } EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked); +static bool +mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions, + unsigned long seq) +{ + bool ret; + + spin_lock(&subscriptions->lock); + ret = subscriptions->invalidate_seq != seq; + spin_unlock(&subscriptions->lock); + return ret; +} + /** * mmu_interval_notifier_remove - Remove a interval notifier * @interval_sub: Interval subscription to unregister @@ -1128,7 +1140,7 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub) lock_map_release(&__mmu_notifier_invalidate_range_start_map); if (seq) wait_event(subscriptions->wq, - READ_ONCE(subscriptions->invalidate_seq) != seq); + mmu_interval_seq_released(subscriptions, seq)); /* pairs with mmgrab in mmu_interval_notifier_insert() */ mmdrop(mm); diff --git a/mm/mmzone.c b/mm/mmzone.c index 2241281ffe16..61e4a35311b2 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -74,20 +74,6 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z, } EXPORT_SYMBOL_GPL(__next_zones_zonelist); -#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL -bool memmap_valid_within(unsigned long pfn, - struct page *page, struct zone *zone) -{ - if (page_to_pfn(page) != pfn) - return false; - - if (page_zone(page) != zone) - return false; - - return true; -} -#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ - void lruvec_init(struct lruvec *lruvec) { enum lru_list lru; diff --git a/mm/mremap.c b/mm/mremap.c index c859b117f8e5..b1263e9a16af 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -210,6 +210,38 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, drop_rmap_locks(vma); } +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +static inline bool trylock_vma_ref_count(struct vm_area_struct *vma) +{ + /* + * If we have the only reference, swap the refcount to -1. This + * will prevent other concurrent references by get_vma() for SPFs. + */ + return atomic_cmpxchg(&vma->vm_ref_count, 1, -1) == 1; +} + +/* + * Restore the VMA reference count to 1 after a fast mremap. + */ +static inline void unlock_vma_ref_count(struct vm_area_struct *vma) +{ + /* + * This should only be called after a corresponding, + * successful trylock_vma_ref_count(). + */ + VM_BUG_ON_VMA(atomic_cmpxchg(&vma->vm_ref_count, -1, 1) != -1, + vma); +} +#else /* !CONFIG_SPECULATIVE_PAGE_FAULT */ +static inline bool trylock_vma_ref_count(struct vm_area_struct *vma) +{ + return true; +} +static inline void unlock_vma_ref_count(struct vm_area_struct *vma) +{ +} +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ + #ifdef CONFIG_HAVE_MOVE_PMD static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) @@ -244,6 +276,14 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, if (WARN_ON_ONCE(!pmd_none(*new_pmd))) return false; + /* + * We hold both exclusive mmap_lock and rmap_lock at this point and + * cannot block. If we cannot immediately take exclusive ownership + * of the VMA fallback to the move_ptes(). + */ + if (!trylock_vma_ref_count(vma)) + return false; + /* * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. @@ -266,6 +306,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, spin_unlock(new_ptl); spin_unlock(old_ptl); + unlock_vma_ref_count(vma); return true; } #else @@ -292,6 +333,14 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, if (WARN_ON_ONCE(!pud_none(*new_pud))) return false; + /* + * We hold both exclusive mmap_lock and rmap_lock at this point and + * cannot block. If we cannot immediately take exclusive ownership + * of the VMA fallback to the move_ptes(). + */ + if (!trylock_vma_ref_count(vma)) + return false; + /* * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. @@ -314,6 +363,7 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, spin_unlock(new_ptl); spin_unlock(old_ptl); + unlock_vma_ref_count(vma); return true; } #else @@ -416,6 +466,9 @@ unsigned long move_page_tables(struct vm_area_struct *vma, struct mmu_notifier_range range; pmd_t *old_pmd, *new_pmd; + if (!len) + return 0; + old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 94e39fe25d35..38685860d1ab 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -72,6 +72,7 @@ #include #include #include +#include #include #include @@ -2409,6 +2410,7 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags set_page_pfmemalloc(page); else clear_page_pfmemalloc(page); + trace_android_vh_test_clear_look_around_ref(page); } /* @@ -3841,11 +3843,15 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order, * need to be calculated. */ if (!order) { - long fast_free; + long usable_free; + long reserved; - fast_free = free_pages; - fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags); - if (fast_free > mark + z->lowmem_reserve[highest_zoneidx]) + usable_free = free_pages; + reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); + + /* reserved may over estimate high-atomic reserves. */ + usable_free -= min(usable_free, reserved); + if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) return true; } @@ -4482,6 +4488,30 @@ void fs_reclaim_release(gfp_t gfp_mask) EXPORT_SYMBOL_GPL(fs_reclaim_release); #endif +/* + * Zonelists may change due to hotplug during allocation. Detect when zonelists + * have been rebuilt so allocation retries. Reader side does not lock and + * retries the allocation if zonelist changes. Writer side is protected by the + * embedded spin_lock. + */ +static DEFINE_SEQLOCK(zonelist_update_seq); + +static unsigned int zonelist_iter_begin(void) +{ + if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) + return read_seqbegin(&zonelist_update_seq); + + return 0; +} + +static unsigned int check_retry_zonelist(unsigned int seq) +{ + if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) + return read_seqretry(&zonelist_update_seq, seq); + + return seq; +} + /* Perform direct synchronous page reclaim */ static unsigned long __perform_reclaim(gfp_t gfp_mask, unsigned int order, @@ -4795,8 +4825,11 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, int compaction_retries; int no_progress_loops; unsigned int cpuset_mems_cookie; + unsigned int zonelist_iter_cookie; int reserve_flags; + unsigned long vh_record; + trace_android_vh_alloc_pages_slowpath_begin(gfp_mask, order, &vh_record); /* * We also sanity check to catch abuse of atomic reserves being used by * callers that are not in atomic context. @@ -4805,11 +4838,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) gfp_mask &= ~__GFP_ATOMIC; -retry_cpuset: +restart: compaction_retries = 0; no_progress_loops = 0; compact_priority = DEF_COMPACT_PRIORITY; cpuset_mems_cookie = read_mems_allowed_begin(); + zonelist_iter_cookie = zonelist_iter_begin(); /* * The fast path uses conservative alloc_flags to succeed only until @@ -4928,6 +4962,12 @@ retry: if (current->flags & PF_MEMALLOC) goto nopage; + trace_android_vh_alloc_pages_reclaim_bypass(gfp_mask, order, + alloc_flags, ac->migratetype, &page); + + if (page) + goto got_pg; + /* Try direct reclaim and then allocating */ page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, &did_some_progress); @@ -4968,9 +5008,13 @@ retry: goto retry; - /* Deal with possible cpuset update races before we start OOM killing */ - if (check_retry_cpuset(cpuset_mems_cookie, ac)) - goto retry_cpuset; + /* + * Deal with possible cpuset update races or zonelist updates to avoid + * a unnecessary OOM kill. + */ + if (check_retry_cpuset(cpuset_mems_cookie, ac) || + check_retry_zonelist(zonelist_iter_cookie)) + goto restart; /* Reclaim has failed us, start killing things */ page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); @@ -4990,9 +5034,13 @@ retry: } nopage: - /* Deal with possible cpuset update races before we fail */ - if (check_retry_cpuset(cpuset_mems_cookie, ac)) - goto retry_cpuset; + /* + * Deal with possible cpuset update races or zonelist updates to avoid + * a unnecessary OOM kill. + */ + if (check_retry_cpuset(cpuset_mems_cookie, ac) || + check_retry_zonelist(zonelist_iter_cookie)) + goto restart; /* * Make sure that __GFP_NOFAIL request doesn't leak out and make sure @@ -5035,9 +5083,15 @@ nopage: goto retry; } fail: + trace_android_vh_alloc_pages_failure_bypass(gfp_mask, order, + alloc_flags, ac->migratetype, &page); + if (page) + goto got_pg; + warn_alloc(gfp_mask, ac->nodemask, "page allocation failure: order:%u", order); got_pg: + trace_android_vh_alloc_pages_slowpath_end(gfp_mask, order, vh_record); return page; } @@ -5296,6 +5350,18 @@ refill: /* reset page count bias and offset to start of new frag */ nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; offset = size - fragsz; + if (unlikely(offset < 0)) { + /* + * The caller is trying to allocate a fragment + * with fragsz > PAGE_SIZE but the cache isn't big + * enough to satisfy the request, this may + * happen in low memory conditions. + * We don't release the cache page because + * it could make memory pressure worse + * so we simply return NULL here. + */ + return NULL; + } } nc->pagecnt_bias--; @@ -5649,6 +5715,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) free_pcp, global_zone_page_state(NR_FREE_CMA_PAGES)); + trace_android_vh_show_mapcount_pages(NULL); for_each_online_pgdat(pgdat) { if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) continue; @@ -5824,7 +5891,7 @@ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) do { zone_type--; zone = pgdat->node_zones + zone_type; - if (managed_zone(zone)) { + if (populated_zone(zone)) { zoneref_set_zone(zone, &zonerefs[nr_zones++]); check_highest_zone(zone_type); } @@ -6091,9 +6158,8 @@ static void __build_all_zonelists(void *data) int nid; int __maybe_unused cpu; pg_data_t *self = data; - static DEFINE_SPINLOCK(lock); - spin_lock(&lock); + write_seqlock(&zonelist_update_seq); #ifdef CONFIG_NUMA memset(node_load, 0, sizeof(node_load)); @@ -6126,7 +6192,7 @@ static void __build_all_zonelists(void *data) #endif } - spin_unlock(&lock); + write_sequnlock(&zonelist_update_seq); } static noinline void __init @@ -6368,7 +6434,6 @@ static void __meminit zone_init_free_lists(struct zone *zone) } } -#if !defined(CONFIG_FLAT_NODE_MEM_MAP) /* * Only struct pages that correspond to ranges defined by memblock.memory * are zeroed and initialized by going through __init_single_page() during @@ -6413,13 +6478,6 @@ static void __init init_unavailable_range(unsigned long spfn, pr_info("On node %d, zone %s: %lld pages in unavailable ranges", node, zone_names[zone], pgcnt); } -#else -static inline void init_unavailable_range(unsigned long spfn, - unsigned long epfn, - int zone, int node) -{ -} -#endif static void __init memmap_init_zone_range(struct zone *zone, unsigned long start_pfn, @@ -7859,7 +7917,7 @@ void __init mem_init_print_info(const char *str) */ #define adj_init_size(start, end, size, pos, adj) \ do { \ - if (start <= pos && pos < end && size > adj) \ + if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \ size -= adj; \ } while (0) diff --git a/mm/page_ext.c b/mm/page_ext.c index dde9f33f8a2d..7718f211f1ae 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -8,7 +8,7 @@ #include #include #include - +#include /* * struct page extension * @@ -58,6 +58,10 @@ * can utilize this callback to initialize the state of it correctly. */ +#ifdef CONFIG_SPARSEMEM +#define PAGE_EXT_INVALID (0x1) +#endif + #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT) static bool need_page_idle(void) { @@ -124,6 +128,48 @@ static inline struct page_ext *get_entry(void *base, unsigned long index) return base + page_ext_size * index; } +/** + * page_ext_get() - Get the extended information for a page. + * @page: The page we're interested in. + * + * Ensures that the page_ext will remain valid until page_ext_put() + * is called. + * + * Return: NULL if no page_ext exists for this page. + * Context: Any context. Caller may not sleep until they have called + * page_ext_put(). + */ +struct page_ext *page_ext_get(struct page *page) +{ + struct page_ext *page_ext; + + rcu_read_lock(); + page_ext = lookup_page_ext(page); + if (!page_ext) { + rcu_read_unlock(); + return NULL; + } + + return page_ext; +} + +/** + * page_ext_put() - Working with page extended information is done. + * @page_ext - Page extended information received from page_ext_get(). + * + * The page extended information of the page may not be valid after this + * function is called. + * + * Return: None. + * Context: Any context with corresponding page_ext_get() is called. + */ +void page_ext_put(struct page_ext *page_ext) +{ + if (unlikely(!page_ext)) + return; + + rcu_read_unlock(); +} #ifndef CONFIG_SPARSEMEM @@ -138,6 +184,7 @@ struct page_ext *lookup_page_ext(const struct page *page) unsigned long index; struct page_ext *base; + WARN_ON_ONCE(!rcu_read_lock_held()); base = NODE_DATA(page_to_nid(page))->node_page_ext; /* * The sanity checks the page allocator does upon freeing a @@ -206,20 +253,27 @@ fail: } #else /* CONFIG_FLAT_NODE_MEM_MAP */ +static bool page_ext_invalid(struct page_ext *page_ext) +{ + return !page_ext || (((unsigned long)page_ext & PAGE_EXT_INVALID) == PAGE_EXT_INVALID); +} struct page_ext *lookup_page_ext(const struct page *page) { unsigned long pfn = page_to_pfn(page); struct mem_section *section = __pfn_to_section(pfn); + struct page_ext *page_ext = READ_ONCE(section->page_ext); + + WARN_ON_ONCE(!rcu_read_lock_held()); /* * The sanity checks the page allocator does upon freeing a * page can reach here before the page_ext arrays are * allocated when feeding a range of pages to the allocator * for the first time during bootup or memory hotplug. */ - if (!section->page_ext) + if (page_ext_invalid(page_ext)) return NULL; - return get_entry(section->page_ext, pfn); + return get_entry(page_ext, pfn); } EXPORT_SYMBOL_GPL(lookup_page_ext); @@ -299,9 +353,30 @@ static void __free_page_ext(unsigned long pfn) ms = __pfn_to_section(pfn); if (!ms || !ms->page_ext) return; - base = get_entry(ms->page_ext, pfn); + + base = READ_ONCE(ms->page_ext); + /* + * page_ext here can be valid while doing the roll back + * operation in online_page_ext(). + */ + if (page_ext_invalid(base)) + base = (void *)base - PAGE_EXT_INVALID; + WRITE_ONCE(ms->page_ext, NULL); + + base = get_entry(base, pfn); free_page_ext(base); - ms->page_ext = NULL; +} + +static void __invalidate_page_ext(unsigned long pfn) +{ + struct mem_section *ms; + void *val; + + ms = __pfn_to_section(pfn); + if (!ms || !ms->page_ext) + return; + val = (void *)ms->page_ext + PAGE_EXT_INVALID; + WRITE_ONCE(ms->page_ext, val); } static int __meminit online_page_ext(unsigned long start_pfn, @@ -344,6 +419,20 @@ static int __meminit offline_page_ext(unsigned long start_pfn, start = SECTION_ALIGN_DOWN(start_pfn); end = SECTION_ALIGN_UP(start_pfn + nr_pages); + /* + * Freeing of page_ext is done in 3 steps to avoid + * use-after-free of it: + * 1) Traverse all the sections and mark their page_ext + * as invalid. + * 2) Wait for all the existing users of page_ext who + * started before invalidation to finish. + * 3) Free the page_ext. + */ + for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) + __invalidate_page_ext(pfn); + + synchronize_rcu(); + for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) __free_page_ext(pfn); return 0; diff --git a/mm/page_idle.c b/mm/page_idle.c index 144fb4ed961d..b5613232e881 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -92,10 +92,10 @@ static bool page_idle_clear_pte_refs_one(struct page *page, static void page_idle_clear_pte_refs(struct page *page) { /* - * Since rwc.arg is unused, rwc is effectively immutable, so we - * can make it static const to save some cycles and stack. + * Since rwc.try_lock is unused, rwc is effectively immutable, so we + * can make it static to save some cycles and stack. */ - static const struct rmap_walk_control rwc = { + static struct rmap_walk_control rwc = { .rmap_one = page_idle_clear_pte_refs_one, .anon_lock = page_lock_anon_vma_read, }; diff --git a/mm/page_owner.c b/mm/page_owner.c index 9501940a3ccd..8dd3f27db21a 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -173,7 +173,7 @@ void __reset_page_owner(struct page *page, unsigned int order) handle = save_stack(GFP_NOWAIT | __GFP_NOWARN); - page_ext = lookup_page_ext(page); + page_ext = page_ext_get(page); if (unlikely(!page_ext)) return; for (i = 0; i < (1 << order); i++) { @@ -183,6 +183,7 @@ void __reset_page_owner(struct page *page, unsigned int order) page_owner->free_ts_nsec = free_ts_nsec; page_ext = page_ext_next(page_ext); } + page_ext_put(page_ext); } static inline void __set_page_owner_handle(struct page *page, @@ -210,19 +211,21 @@ static inline void __set_page_owner_handle(struct page *page, noinline void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) { - struct page_ext *page_ext = lookup_page_ext(page); + struct page_ext *page_ext; depot_stack_handle_t handle; + handle = save_stack(gfp_mask); + + page_ext = page_ext_get(page); if (unlikely(!page_ext)) return; - - handle = save_stack(gfp_mask); __set_page_owner_handle(page, page_ext, handle, order, gfp_mask); + page_ext_put(page_ext); } void __set_page_owner_migrate_reason(struct page *page, int reason) { - struct page_ext *page_ext = lookup_page_ext(page); + struct page_ext *page_ext = page_ext_get(page); struct page_owner *page_owner; if (unlikely(!page_ext)) @@ -230,12 +233,13 @@ void __set_page_owner_migrate_reason(struct page *page, int reason) page_owner = get_page_owner(page_ext); page_owner->last_migrate_reason = reason; + page_ext_put(page_ext); } void __split_page_owner(struct page *page, unsigned int nr) { int i; - struct page_ext *page_ext = lookup_page_ext(page); + struct page_ext *page_ext = page_ext_get(page); struct page_owner *page_owner; if (unlikely(!page_ext)) @@ -246,17 +250,25 @@ void __split_page_owner(struct page *page, unsigned int nr) page_owner->order = 0; page_ext = page_ext_next(page_ext); } + page_ext_put(page_ext); } void __copy_page_owner(struct page *oldpage, struct page *newpage) { - struct page_ext *old_ext = lookup_page_ext(oldpage); - struct page_ext *new_ext = lookup_page_ext(newpage); + struct page_ext *old_ext; + struct page_ext *new_ext; struct page_owner *old_page_owner, *new_page_owner; - if (unlikely(!old_ext || !new_ext)) + old_ext = page_ext_get(oldpage); + if (unlikely(!old_ext)) return; + new_ext = page_ext_get(newpage); + if (unlikely(!new_ext)) { + page_ext_put(old_ext); + return; + } + old_page_owner = get_page_owner(old_ext); new_page_owner = get_page_owner(new_ext); new_page_owner->order = old_page_owner->order; @@ -279,6 +291,8 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) */ __set_bit(PAGE_EXT_OWNER, &new_ext->flags); __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags); + page_ext_put(new_ext); + page_ext_put(old_ext); } void pagetypeinfo_showmixedcount_print(struct seq_file *m, @@ -335,12 +349,12 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m, if (PageReserved(page)) continue; - page_ext = lookup_page_ext(page); + page_ext = page_ext_get(page); if (unlikely(!page_ext)) continue; if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) - continue; + goto ext_put_continue; page_owner = get_page_owner(page_ext); page_mt = gfp_migratetype(page_owner->gfp_mask); @@ -351,9 +365,12 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m, count[pageblock_mt]++; pfn = block_end_pfn; + page_ext_put(page_ext); break; } pfn += (1UL << page_owner->order) - 1; +ext_put_continue: + page_ext_put(page_ext); } } @@ -432,7 +449,7 @@ err: void __dump_page_owner(struct page *page) { - struct page_ext *page_ext = lookup_page_ext(page); + struct page_ext *page_ext = page_ext_get((void *)page); struct page_owner *page_owner; depot_stack_handle_t handle; unsigned long *entries; @@ -451,6 +468,7 @@ void __dump_page_owner(struct page *page) if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { pr_alert("page_owner info is not present (never set?)\n"); + page_ext_put(page_ext); return; } @@ -483,6 +501,7 @@ void __dump_page_owner(struct page *page) if (page_owner->last_migrate_reason != -1) pr_alert("page has been migrated, last migrate reason: %s\n", migrate_reason_names[page_owner->last_migrate_reason]); + page_ext_put(page_ext); } static ssize_t @@ -508,6 +527,14 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) /* Find an allocated page */ for (; pfn < max_pfn; pfn++) { + /* + * This temporary page_owner is required so + * that we can avoid the context switches while holding + * the rcu lock and copying the page owner information to + * user through copy_to_user() or GFP_KERNEL allocations. + */ + struct page_owner page_owner_tmp; + /* * If the new page is in a new MAX_ORDER_NR_PAGES area, * validate the area as existing, skip it if not @@ -530,7 +557,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) continue; } - page_ext = lookup_page_ext(page); + page_ext = page_ext_get(page); if (unlikely(!page_ext)) continue; @@ -539,14 +566,14 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) * because we don't hold the zone lock. */ if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) - continue; + goto ext_put_continue; /* * Although we do have the info about past allocation of free * pages, it's not relevant for current memory usage. */ if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) - continue; + goto ext_put_continue; page_owner = get_page_owner(page_ext); @@ -555,7 +582,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) * would inflate the stats. */ if (!IS_ALIGNED(pfn, 1 << page_owner->order)) - continue; + goto ext_put_continue; /* * Access to page_ext->handle isn't synchronous so we should @@ -563,13 +590,17 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) */ handle = READ_ONCE(page_owner->handle); if (!handle) - continue; + goto ext_put_continue; /* Record the next PFN to read in the file offset */ *ppos = (pfn - min_low_pfn) + 1; + page_owner_tmp = *page_owner; + page_ext_put(page_ext); return print_page_owner(buf, count, pfn, page, - page_owner, handle); + &page_owner_tmp, handle); +ext_put_continue: + page_ext_put(page_ext); } return 0; @@ -627,18 +658,20 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) if (PageReserved(page)) continue; - page_ext = lookup_page_ext(page); + page_ext = page_ext_get(page); if (unlikely(!page_ext)) continue; /* Maybe overlapping zone */ if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) - continue; + goto ext_put_continue; /* Found early allocated page */ __set_page_owner_handle(page, page_ext, early_handle, 0, 0); count++; +ext_put_continue: + page_ext_put(page_ext); } cond_resched(); } diff --git a/mm/page_pinner.c b/mm/page_pinner.c index 8bccb54fbbd3..a444584103ad 100644 --- a/mm/page_pinner.c +++ b/mm/page_pinner.c @@ -162,7 +162,7 @@ void __reset_page_pinner(struct page *page, unsigned int order, bool free) struct page_ext *page_ext; int i; - page_ext = lookup_page_ext(page); + page_ext = page_ext_get(page); if (unlikely(!page_ext)) return; @@ -184,6 +184,7 @@ void __reset_page_pinner(struct page *page, unsigned int order, bool free) clear_bit(PAGE_EXT_GET, &page_ext->flags); page_ext = page_ext_next(page_ext); } + page_ext_put(page_ext); } static inline void __set_page_pinner_handle(struct page *page, @@ -206,14 +207,16 @@ static inline void __set_page_pinner_handle(struct page *page, noinline void __set_page_pinner(struct page *page, unsigned int order) { - struct page_ext *page_ext = lookup_page_ext(page); + struct page_ext *page_ext; depot_stack_handle_t handle; + handle = save_stack(GFP_NOWAIT|__GFP_NOWARN); + + page_ext = page_ext_get(page); if (unlikely(!page_ext)) return; - - handle = save_stack(GFP_NOWAIT|__GFP_NOWARN); __set_page_pinner_handle(page, page_ext, handle, order); + page_ext_put(page_ext); } static ssize_t @@ -233,7 +236,7 @@ print_page_pinner(bool longterm, char __user *buf, size_t count, struct captured ret = snprintf(kbuf, count, "Page pinned for %lld us\n", record->elapsed); } else { - s64 ts_usec = record->ts_usec; + u64 ts_usec = record->ts_usec; unsigned long rem_usec = do_div(ts_usec, 1000000); ret = snprintf(kbuf, count, @@ -279,7 +282,7 @@ err: void __dump_page_pinner(struct page *page) { - struct page_ext *page_ext = lookup_page_ext(page); + struct page_ext *page_ext = page_ext_get(page); struct page_pinner *page_pinner; depot_stack_handle_t handle; unsigned long *entries; @@ -288,7 +291,7 @@ void __dump_page_pinner(struct page *page) unsigned long pfn; int count; unsigned long rem_usec; - s64 ts_usec; + u64 ts_usec; if (unlikely(!page_ext)) { pr_alert("There is not page extension available.\n"); @@ -300,6 +303,7 @@ void __dump_page_pinner(struct page *page) count = atomic_read(&page_pinner->count); if (!count) { pr_alert("page_pinner info is not present (never set?)\n"); + page_ext_put(page_ext); return; } @@ -323,11 +327,12 @@ void __dump_page_pinner(struct page *page) nr_entries = stack_depot_fetch(handle, &entries); stack_trace_print(entries, nr_entries, 0); } + page_ext_put(page_ext); } void __page_pinner_migration_failed(struct page *page) { - struct page_ext *page_ext = lookup_page_ext(page); + struct page_ext *page_ext = page_ext_get(page); struct captured_pinner record; unsigned long flags; unsigned int idx; @@ -335,9 +340,12 @@ void __page_pinner_migration_failed(struct page *page) if (unlikely(!page_ext)) return; - if (!test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags)) + if (!test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags)) { + page_ext_put(page_ext); return; + } + page_ext_put(page_ext); record.handle = save_stack(GFP_NOWAIT|__GFP_NOWARN); record.ts_usec = ktime_to_us(ktime_get_boottime()); capture_page_state(page, &record); @@ -359,10 +367,11 @@ void __page_pinner_mark_migration_failed_pages(struct list_head *page_list) /* The page will be freed by putback_movable_pages soon */ if (page_count(page) == 1) continue; - page_ext = lookup_page_ext(page); + page_ext = page_ext_get(page); if (unlikely(!page_ext)) continue; __set_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags); + page_ext_put(page_ext); __page_pinner_migration_failed(page); } } diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 5d5d1c47894e..d11b6ba53d64 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -71,7 +71,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, do { again: next = pmd_addr_end(addr, end); - if (pmd_none(*pmd) || (!walk->vma && !walk->no_vma)) { + if (pmd_none(*pmd)) { if (ops->pte_hole) err = ops->pte_hole(addr, next, depth, walk); if (err) @@ -129,7 +129,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, do { again: next = pud_addr_end(addr, end); - if (pud_none(*pud) || (!walk->vma && !walk->no_vma)) { + if (pud_none(*pud)) { if (ops->pte_hole) err = ops->pte_hole(addr, next, depth, walk); if (err) @@ -318,19 +318,19 @@ static int __walk_page_range(unsigned long start, unsigned long end, struct vm_area_struct *vma = walk->vma; const struct mm_walk_ops *ops = walk->ops; - if (vma && ops->pre_vma) { + if (ops->pre_vma) { err = ops->pre_vma(start, end, walk); if (err) return err; } - if (vma && is_vm_hugetlb_page(vma)) { + if (is_vm_hugetlb_page(vma)) { if (ops->hugetlb_entry) err = walk_hugetlb_range(start, end, walk); } else err = walk_pgd_range(start, end, walk); - if (vma && ops->post_vma) + if (ops->post_vma) ops->post_vma(walk); return err; @@ -402,9 +402,13 @@ int walk_page_range(struct mm_struct *mm, unsigned long start, if (!vma) { /* after the last vma */ walk.vma = NULL; next = end; + if (ops->pte_hole) + err = ops->pte_hole(start, next, -1, &walk); } else if (start < vma->vm_start) { /* outside vma */ walk.vma = NULL; next = min(end, vma->vm_start); + if (ops->pte_hole) + err = ops->pte_hole(start, next, -1, &walk); } else { /* inside vma */ walk.vma = vma; next = min(end, vma->vm_end); @@ -422,9 +426,8 @@ int walk_page_range(struct mm_struct *mm, unsigned long start, } if (err < 0) break; - } - if (walk.vma || walk.ops->pte_hole) err = __walk_page_range(start, next, &walk); + } if (err) break; } while (start = next, start < end); @@ -454,9 +457,9 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start, if (start >= end || !walk.mm) return -EINVAL; - mmap_assert_locked(walk.mm); + mmap_assert_write_locked(walk.mm); - return __walk_page_range(start, end, &walk); + return walk_pgd_range(start, end, &walk); } int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, diff --git a/mm/ptdump.c b/mm/ptdump.c index da751448d0e4..f84ea700662f 100644 --- a/mm/ptdump.c +++ b/mm/ptdump.c @@ -144,13 +144,13 @@ void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd) { const struct ptdump_range *range = st->range; - mmap_read_lock(mm); + mmap_write_lock(mm); while (range->start != range->end) { walk_page_range_novma(mm, range->start, range->end, &ptdump_ops, pgd, st); range++; } - mmap_read_unlock(mm); + mmap_write_unlock(mm); /* Flush out the last page */ st->note_page(st, 0, -1, 0); diff --git a/mm/rmap.c b/mm/rmap.c index 1a0a75d8b4ce..033b04704b59 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -518,13 +518,16 @@ out: * * Its a little more complex as it tries to keep the fast path to a single * atomic op -- the trylock. If we fail the trylock, we fall back to getting a - * reference like with page_get_anon_vma() and then block on the mutex. + * reference like with page_get_anon_vma() and then block on the mutex + * on !rwc->try_lock case. */ -struct anon_vma *page_lock_anon_vma_read(struct page *page) +struct anon_vma *page_lock_anon_vma_read(struct page *page, + struct rmap_walk_control *rwc) { struct anon_vma *anon_vma = NULL; struct anon_vma *root_anon_vma; unsigned long anon_mapping; + bool success = false; rcu_read_lock(); anon_mapping = (unsigned long)READ_ONCE(page->mapping); @@ -547,6 +550,17 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page) } goto out; } + trace_android_vh_do_page_trylock(page, NULL, NULL, &success); + if (success) { + anon_vma = NULL; + goto out; + } + + if (rwc && rwc->try_lock) { + anon_vma = NULL; + rwc->contended = true; + goto out; + } /* trylock failed, we got to sleep */ if (!atomic_inc_not_zero(&anon_vma->refcount)) { @@ -784,6 +798,7 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, } if (pvmw.pte) { + trace_android_vh_look_around(&pvmw, page, vma, &referenced); if (ptep_clear_flush_young_notify(vma, address, pvmw.pte)) { /* @@ -844,8 +859,10 @@ static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) * @memcg: target memory cgroup * @vm_flags: collect encountered vma->vm_flags who actually referenced the page * - * Quick test_and_clear_referenced for all mappings to a page, - * returns the number of ptes which referenced the page. + * Quick test_and_clear_referenced for all mappings of a page, + * + * Return: The number of mappings which referenced the page. Return -1 if + * the function bailed out due to rmap lock contention. */ int page_referenced(struct page *page, int is_locked, @@ -861,6 +878,7 @@ int page_referenced(struct page *page, .rmap_one = page_referenced_one, .arg = (void *)&pra, .anon_lock = page_lock_anon_vma_read, + .try_lock = true, }; *vm_flags = 0; @@ -891,7 +909,7 @@ int page_referenced(struct page *page, if (we_locked) unlock_page(page); - return pra.referenced; + return rwc.contended ? -1 : pra.referenced; } static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, @@ -1113,6 +1131,7 @@ void do_page_add_anon_rmap(struct page *page, { bool compound = flags & RMAP_COMPOUND; bool first; + bool success = false; if (unlikely(PageKsm(page))) lock_page_memcg(page); @@ -1126,7 +1145,10 @@ void do_page_add_anon_rmap(struct page *page, mapcount = compound_mapcount_ptr(page); first = atomic_inc_and_test(mapcount); } else { - first = atomic_inc_and_test(&page->_mapcount); + trace_android_vh_update_page_mapcount(page, true, compound, + &first, &success); + if (!success) + first = atomic_inc_and_test(&page->_mapcount); } if (first) { @@ -1200,13 +1222,22 @@ void __page_add_new_anon_rmap(struct page *page, void page_add_file_rmap(struct page *page, bool compound) { int i, nr = 1; + bool first_mapping; + bool success = false; VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); lock_page_memcg(page); if (compound && PageTransHuge(page)) { for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { - if (atomic_inc_and_test(&page[i]._mapcount)) - nr++; + trace_android_vh_update_page_mapcount(&page[i], true, + compound, &first_mapping, &success); + if ((success)) { + if (first_mapping) + nr++; + } else { + if (atomic_inc_and_test(&page[i]._mapcount)) + nr++; + } } if (!atomic_inc_and_test(compound_mapcount_ptr(page))) goto out; @@ -1222,8 +1253,15 @@ void page_add_file_rmap(struct page *page, bool compound) if (PageMlocked(page)) clear_page_mlock(compound_head(page)); } - if (!atomic_inc_and_test(&page->_mapcount)) - goto out; + trace_android_vh_update_page_mapcount(page, true, + compound, &first_mapping, &success); + if (success) { + if (!first_mapping) + goto out; + } else { + if (!atomic_inc_and_test(&page->_mapcount)) + goto out; + } } __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); out: @@ -1233,6 +1271,8 @@ out: static void page_remove_file_rmap(struct page *page, bool compound) { int i, nr = 1; + bool first_mapping; + bool success = false; VM_BUG_ON_PAGE(compound && !PageHead(page), page); @@ -1246,8 +1286,15 @@ static void page_remove_file_rmap(struct page *page, bool compound) /* page still mapped by someone else? */ if (compound && PageTransHuge(page)) { for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { - if (atomic_add_negative(-1, &page[i]._mapcount)) - nr++; + trace_android_vh_update_page_mapcount(&page[i], false, + compound, &first_mapping, &success); + if (success) { + if (first_mapping) + nr++; + } else { + if (atomic_add_negative(-1, &page[i]._mapcount)) + nr++; + } } if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) return; @@ -1256,8 +1303,15 @@ static void page_remove_file_rmap(struct page *page, bool compound) else __dec_node_page_state(page, NR_FILE_PMDMAPPED); } else { - if (!atomic_add_negative(-1, &page->_mapcount)) - return; + trace_android_vh_update_page_mapcount(page, false, + compound, &first_mapping, &success); + if (success) { + if (!first_mapping) + return; + } else { + if (!atomic_add_negative(-1, &page->_mapcount)) + return; + } } /* @@ -1274,6 +1328,8 @@ static void page_remove_file_rmap(struct page *page, bool compound) static void page_remove_anon_compound_rmap(struct page *page) { int i, nr; + bool first_mapping; + bool success = false; if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) return; @@ -1293,8 +1349,15 @@ static void page_remove_anon_compound_rmap(struct page *page) * them are still mapped. */ for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { - if (atomic_add_negative(-1, &page[i]._mapcount)) - nr++; + trace_android_vh_update_page_mapcount(&page[i], false, + false, &first_mapping, &success); + if (success) { + if (first_mapping) + nr++; + } else { + if (atomic_add_negative(-1, &page[i]._mapcount)) + nr++; + } } /* @@ -1324,6 +1387,8 @@ static void page_remove_anon_compound_rmap(struct page *page) */ void page_remove_rmap(struct page *page, bool compound) { + bool first_mapping; + bool success = false; lock_page_memcg(page); if (!PageAnon(page)) { @@ -1336,10 +1401,16 @@ void page_remove_rmap(struct page *page, bool compound) goto out; } - /* page still mapped by someone else? */ - if (!atomic_add_negative(-1, &page->_mapcount)) - goto out; - + trace_android_vh_update_page_mapcount(page, false, + compound, &first_mapping, &success); + if (success) { + if (!first_mapping) + goto out; + } else { + /* page still mapped by someone else? */ + if (!atomic_add_negative(-1, &page->_mapcount)) + goto out; + } /* * We use the irq-unsafe __{inc|mod}_zone_page_stat because * these counters are not modified in interrupt context, and @@ -1642,7 +1713,30 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, /* MADV_FREE page check */ if (!PageSwapBacked(page)) { - if (!PageDirty(page)) { + int ref_count, map_count; + + /* + * Synchronize with gup_pte_range(): + * - clear PTE; barrier; read refcount + * - inc refcount; barrier; read PTE + */ + smp_mb(); + + ref_count = page_ref_count(page); + map_count = page_mapcount(page); + + /* + * Order reads for page refcount and dirty flag + * (see comments in __remove_mapping()). + */ + smp_rmb(); + + /* + * The only page refs must be one from isolation + * plus the rmap(s) (dropped by discard:). + */ + if (ref_count == 1 + map_count && + !PageDirty(page)) { /* Invalidate as we cleared the pte */ mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE); @@ -1816,7 +1910,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page, struct anon_vma *anon_vma; if (rwc->anon_lock) - return rwc->anon_lock(page); + return rwc->anon_lock(page, rwc); /* * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() @@ -1828,7 +1922,17 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page, if (!anon_vma) return NULL; + if (anon_vma_trylock_read(anon_vma)) + goto out; + + if (rwc->try_lock) { + anon_vma = NULL; + rwc->contended = true; + goto out; + } + anon_vma_lock_read(anon_vma); +out: return anon_vma; } @@ -1905,6 +2009,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, struct address_space *mapping = page_mapping(page); pgoff_t pgoff_start, pgoff_end; struct vm_area_struct *vma; + bool got_lock = false, success = false; /* * The page lock not only makes sure that page->mapping cannot @@ -1919,8 +2024,25 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, pgoff_start = page_to_pgoff(page); pgoff_end = pgoff_start + thp_nr_pages(page) - 1; - if (!locked) - i_mmap_lock_read(mapping); + if (!locked) { + trace_android_vh_do_page_trylock(page, + &mapping->i_mmap_rwsem, &got_lock, &success); + if (success) { + if (!got_lock) + return; + } else { + if (i_mmap_trylock_read(mapping)) + goto lookup; + + if (rwc->try_lock) { + rwc->contended = true; + return; + } + + i_mmap_lock_read(mapping); + } + } +lookup: vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff_start, pgoff_end) { unsigned long address = vma_address(page, vma); diff --git a/mm/shmem.c b/mm/shmem.c index 8fb9d2ddec70..768c91896036 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -46,6 +46,7 @@ #undef CREATE_TRACE_POINTS #include +#include static struct vfsmount *shm_mnt; @@ -1430,6 +1431,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) SetPageUptodate(page); } + trace_android_vh_set_shmem_page_flag(page); swap = get_swap_page(page); if (!swap.val) goto redirty; @@ -4311,7 +4313,6 @@ int reclaim_shmem_address_space(struct address_space *mapping) pgoff_t start = 0; struct page *page; LIST_HEAD(page_list); - int reclaimed; XA_STATE(xas, &mapping->i_pages, start); if (!shmem_mapping(mapping)) @@ -4329,8 +4330,6 @@ int reclaim_shmem_address_space(struct address_space *mapping) continue; list_add(&page->lru, &page_list); - inc_node_page_state(page, NR_ISOLATED_ANON + - page_is_file_lru(page)); if (need_resched()) { xas_pause(&xas); @@ -4338,9 +4337,8 @@ int reclaim_shmem_address_space(struct address_space *mapping) } } rcu_read_unlock(); - reclaimed = reclaim_pages_from_list(&page_list); - return reclaimed; + return reclaim_pages(&page_list); #else return 0; #endif diff --git a/mm/slab.c b/mm/slab.c index eced754636ad..aa4ef18ddfb6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3016,10 +3016,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, objp += obj_offset(cachep); if (cachep->ctor && cachep->flags & SLAB_POISON) cachep->ctor(objp); - if (ARCH_SLAB_MINALIGN && - ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { - pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n", - objp, (int)ARCH_SLAB_MINALIGN); + if ((unsigned long)objp & (arch_slab_minalign() - 1)) { + pr_err("0x%px: not aligned to arch_slab_minalign()=%u\n", objp, + arch_slab_minalign()); } return objp; } @@ -3428,6 +3427,7 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, if (is_kfence_address(objp)) { kmemleak_free_recursive(objp, cachep->flags); + memcg_slab_free_hook(cachep, &objp, 1); __kfence_free(objp); return; } diff --git a/mm/slab_common.c b/mm/slab_common.c index c751b18f7e60..05135ebb6159 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -155,8 +155,7 @@ static unsigned int calculate_alignment(slab_flags_t flags, align = max(align, ralign); } - if (align < ARCH_SLAB_MINALIGN) - align = ARCH_SLAB_MINALIGN; + align = max(align, arch_slab_minalign()); return ALIGN(align, sizeof(void *)); } diff --git a/mm/slob.c b/mm/slob.c index 7cc9805c8091..37072a7d2f42 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -469,9 +469,11 @@ static __always_inline void * __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) { unsigned int *m; - int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + unsigned int minalign; void *ret; + minalign = max_t(unsigned int, ARCH_KMALLOC_MINALIGN, + arch_slab_minalign()); gfp &= gfp_allowed_mask; fs_reclaim_acquire(gfp); @@ -485,7 +487,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) * kmalloc()'d objects. */ if (is_power_of_2(size)) - align = max(minalign, (int) size); + align = max_t(unsigned int, minalign, size); if (!size) return ZERO_SIZE_PTR; @@ -547,8 +549,11 @@ void kfree(const void *block) sp = virt_to_page(block); if (PageSlab(sp)) { - int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + unsigned int align = max_t(unsigned int, + ARCH_KMALLOC_MINALIGN, + arch_slab_minalign()); unsigned int *m = (unsigned int *)(block - align); + slob_free(m, *m + align); } else { unsigned int order = compound_order(sp); @@ -564,7 +569,7 @@ EXPORT_SYMBOL(kfree); size_t __ksize(const void *block) { struct page *sp; - int align; + unsigned int align; unsigned int *m; BUG_ON(!block); @@ -575,7 +580,8 @@ size_t __ksize(const void *block) if (unlikely(!PageSlab(sp))) return page_size(sp); - align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + align = max_t(unsigned int, ARCH_KMALLOC_MINALIGN, + arch_slab_minalign()); m = (unsigned int *)(block - align); return SLOB_UNITS(*m) * SLOB_UNIT; } diff --git a/mm/slub.c b/mm/slub.c index 00b4e2ab6252..3acf083aaa73 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2351,6 +2351,7 @@ redo: c->page = NULL; c->freelist = NULL; + c->tid = next_tid(c->tid); } /* @@ -2484,8 +2485,6 @@ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) { stat(s, CPUSLAB_FLUSH); deactivate_slab(s, c->page, c->freelist, c); - - c->tid = next_tid(c->tid); } /* @@ -2771,6 +2770,7 @@ redo: if (!freelist) { c->page = NULL; + c->tid = next_tid(c->tid); stat(s, DEACTIVATE_BYPASS); goto new_slab; } @@ -5547,7 +5547,8 @@ static char *create_unique_id(struct kmem_cache *s) char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); char *p = name; - BUG_ON(!name); + if (!name) + return ERR_PTR(-ENOMEM); *p++ = ':'; /* @@ -5605,6 +5606,8 @@ static int sysfs_slab_add(struct kmem_cache *s) * for the symlinks. */ name = create_unique_id(s); + if (IS_ERR(name)) + return PTR_ERR(name); } s->kobj.kset = kset; diff --git a/mm/swap.c b/mm/swap.c index abf445bd7721..16b2bc48211e 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -437,6 +437,7 @@ void mark_page_accessed(struct page *page) { page = compound_head(page); + trace_android_vh_mark_page_accessed(page); if (!PageReferenced(page)) { SetPageReferenced(page); } else if (PageUnevictable(page)) { diff --git a/mm/swap_slots.c b/mm/swap_slots.c index 1392649a4d9a..43231ae6c3fd 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -133,6 +133,8 @@ static int alloc_swap_slot_cache(unsigned int cpu) * as kvzalloc could trigger reclaim and get_swap_page, * which can lock swap_slots_cache_mutex. */ + trace_android_rvh_alloc_swap_slot_cache(&per_cpu(swp_slots, cpu), + &ret, &skip); trace_android_vh_alloc_swap_slot_cache(&per_cpu(swp_slots, cpu), &ret, &skip); if (skip) @@ -190,6 +192,8 @@ static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type, bool skip = false; cache = &per_cpu(swp_slots, cpu); + trace_android_rvh_drain_slots_cache_cpu(cache, type, + free_slots, &skip); trace_android_vh_drain_slots_cache_cpu(cache, type, free_slots, &skip); if (skip) @@ -298,6 +302,7 @@ int free_swap_slot(swp_entry_t entry) bool skip = false; cache = raw_cpu_ptr(&swp_slots); + trace_android_rvh_free_swap_slot(entry, cache, &skip); trace_android_vh_free_swap_slot(entry, cache, &skip); if (skip) return 0; @@ -335,6 +340,7 @@ swp_entry_t get_swap_page(struct page *page) bool found = false; entry.val = 0; + trace_android_rvh_get_swap_page(page, &entry, raw_cpu_ptr(&swp_slots), &found); trace_android_vh_get_swap_page(page, &entry, raw_cpu_ptr(&swp_slots), &found); if (found) goto out; diff --git a/mm/swapfile.c b/mm/swapfile.c index 677f235806c2..b3cc17423d38 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2908,6 +2908,7 @@ static struct swap_info_struct *alloc_swap_info(void) int i; bool skip = false; + trace_android_rvh_alloc_si(&p, &skip); trace_android_vh_alloc_si(&p, &skip); if (!skip) p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL); diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 5508f7d9e2dc..80da102e4c36 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -151,6 +151,8 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm, /* don't free the page */ goto out; } + + flush_dcache_page(page); } else { page = *pagep; *pagep = NULL; @@ -669,6 +671,7 @@ retry: err = -EFAULT; goto out; } + flush_dcache_page(page); goto retry; } else BUG_ON(page); diff --git a/mm/util.c b/mm/util.c index 8a859bd9d41f..9e2b223ebde3 100644 --- a/mm/util.c +++ b/mm/util.c @@ -334,6 +334,38 @@ unsigned long randomize_stack_top(unsigned long stack_top) #endif } +/** + * randomize_page - Generate a random, page aligned address + * @start: The smallest acceptable address the caller will take. + * @range: The size of the area, starting at @start, within which the + * random address must fall. + * + * If @start + @range would overflow, @range is capped. + * + * NOTE: Historical use of randomize_range, which this replaces, presumed that + * @start was already page aligned. We now align it regardless. + * + * Return: A page aligned address within [start, start + range). On error, + * @start is returned. + */ +unsigned long randomize_page(unsigned long start, unsigned long range) +{ + if (!PAGE_ALIGNED(start)) { + range -= PAGE_ALIGN(start) - start; + start = PAGE_ALIGN(start); + } + + if (start > ULONG_MAX - range) + range = ULONG_MAX - start; + + range >>= PAGE_SHIFT; + + if (range == 0) + return start; + + return start + (get_random_long() % range << PAGE_SHIFT); +} + #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT unsigned long arch_randomize_brk(struct mm_struct *mm) { @@ -634,6 +666,21 @@ void kvfree_sensitive(const void *addr, size_t len) } EXPORT_SYMBOL(kvfree_sensitive); +void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) +{ + void *newp; + + if (oldsize >= newsize) + return (void *)p; + newp = kvmalloc(newsize, flags); + if (!newp) + return NULL; + memcpy(newp, p, oldsize); + kvfree(p); + return newp; +} +EXPORT_SYMBOL(kvrealloc); + static inline void *__page_rmapping(struct page *page) { unsigned long mapping; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d5615977e7f3..3b56c30a8e93 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1277,6 +1277,7 @@ int unregister_vmap_purge_notifier(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); +bool lazy_vunmap_enable __read_mostly = true; /* * lazy_max_pages is the maximum amount of virtual address space we gather up * before attempting to purge with a TLB flush. @@ -1297,6 +1298,9 @@ static unsigned long lazy_max_pages(void) { unsigned int log; + if (!lazy_vunmap_enable) + return 0; + log = fls(num_online_cpus()); return log * (32UL * 1024 * 1024 / PAGE_SIZE); @@ -2170,6 +2174,7 @@ struct vm_struct *remove_vm_area(const void *addr) if (va && va->vm) { struct vm_struct *vm = va->vm; + trace_android_vh_remove_vmalloc_stack(vm); va->vm = NULL; spin_unlock(&vmap_area_lock); diff --git a/mm/vmscan.c b/mm/vmscan.c index 24b0c38c2c5c..4a11800e1b8f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1020,11 +1020,24 @@ static enum page_references page_check_references(struct page *page, { int referenced_ptes, referenced_page; unsigned long vm_flags; + bool should_protect = false; + bool trylock_fail = false; + int ret = 0; + trace_android_vh_page_should_be_protected(page, &should_protect); + if (unlikely(should_protect)) + return PAGEREF_ACTIVATE; + + trace_android_vh_page_trylock_set(page); + trace_android_vh_check_page_look_around_ref(page, &ret); + if (ret) + return ret; referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, &vm_flags); referenced_page = TestClearPageReferenced(page); - + trace_android_vh_page_trylock_get_result(page, &trylock_fail); + if (trylock_fail) + return PAGEREF_KEEP; /* * Mlock lost the isolation race with us. Let try_to_unmap() * move the page to the unevictable list. @@ -1032,6 +1045,10 @@ static enum page_references page_check_references(struct page *page, if (vm_flags & VM_LOCKED) return PAGEREF_RECLAIM; + /* rmap lock contention: rotate */ + if (referenced_ptes == -1) + return PAGEREF_KEEP; + if (referenced_ptes) { /* * All mapped pages start out with page table @@ -1335,7 +1352,8 @@ static unsigned int shrink_page_list(struct list_head *page_list, if (unlikely(PageTransHuge(page))) flags |= TTU_SPLIT_HUGE_PMD; - + if (!ignore_references) + trace_android_vh_page_trylock_set(page); if (!try_to_unmap(page, flags)) { stat->nr_unmap_fail += nr_pages; if (!was_swapbacked && PageSwapBacked(page)) @@ -1446,6 +1464,7 @@ static unsigned int shrink_page_list(struct list_head *page_list, * increment nr_reclaimed here (and * leave it off the LRU). */ + trace_android_vh_page_trylock_clear(page); nr_reclaimed++; continue; } @@ -1479,6 +1498,7 @@ free_it: * Is there need to periodically free_page_list? It would * appear not as the counts should be low */ + trace_android_vh_page_trylock_clear(page); if (unlikely(PageTransHuge(page))) destroy_compound_page(page); else @@ -1564,36 +1584,6 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone, return nr_reclaimed; } -int reclaim_pages_from_list(struct list_head *page_list) -{ - struct scan_control sc = { - .gfp_mask = GFP_KERNEL, - .priority = DEF_PRIORITY, - .may_writepage = 1, - .may_unmap = 1, - .may_swap = 1, - }; - unsigned long nr_reclaimed; - struct reclaim_stat dummy_stat; - struct page *page; - - list_for_each_entry(page, page_list, lru) - ClearPageActive(page); - - nr_reclaimed = shrink_page_list(page_list, NULL, &sc, - &dummy_stat, false); - while (!list_empty(page_list)) { - - page = lru_to_page(page_list); - list_del(&page->lru); - dec_node_page_state(page, NR_ISOLATED_ANON + - page_is_file_lru(page)); - putback_lru_page(page); - } - - return nr_reclaimed; -} - /* * Attempt to remove the specified page from its LRU. Only take this page * if it is of the appropriate PageActive status. Pages which are being @@ -1757,6 +1747,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, case 0: nr_taken += nr_pages; nr_zone_taken[page_zonenum(page)] += nr_pages; + trace_android_vh_del_page_from_lrulist(page, false, lru); list_move(&page->lru, dst); break; @@ -1931,6 +1922,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec, nr_pages = thp_nr_pages(page); update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); list_move(&page->lru, &lruvec->lists[lru]); + trace_android_vh_add_page_to_lrulist(page, false, lru); if (put_page_testzero(page)) { __ClearPageLRU(page); @@ -2022,6 +2014,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, return 0; nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false); + trace_android_vh_handle_failed_page_trylock(&page_list); spin_lock_irq(&pgdat->lru_lock); @@ -2034,7 +2027,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, __count_vm_events(item, nr_reclaimed); __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed); __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed); - spin_unlock_irq(&pgdat->lru_lock); mem_cgroup_uncharge_list(&page_list); @@ -2085,6 +2077,7 @@ static void shrink_active_list(unsigned long nr_to_scan, int file = is_file_lru(lru); struct pglist_data *pgdat = lruvec_pgdat(lruvec); bool bypass = false; + bool should_protect = false; lru_add_drain(); @@ -2119,12 +2112,20 @@ static void shrink_active_list(unsigned long nr_to_scan, } } + trace_android_vh_page_should_be_protected(page, &should_protect); + if (unlikely(should_protect)) { + nr_rotated += thp_nr_pages(page); + list_add(&page->lru, &l_active); + continue; + } + trace_android_vh_page_referenced_check_bypass(page, nr_to_scan, lru, &bypass); if (bypass) goto skip_page_referenced; - + trace_android_vh_page_trylock_set(page); + /* Referenced or rmap lock contention: rotate */ if (page_referenced(page, 0, sc->target_mem_cgroup, - &vm_flags)) { + &vm_flags) != 0) { /* * Identify referenced, file-backed active pages and * give them one more trip around the active list. So @@ -2135,11 +2136,13 @@ static void shrink_active_list(unsigned long nr_to_scan, * so we ignore them here. */ if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) { + trace_android_vh_page_trylock_clear(page); nr_rotated += thp_nr_pages(page); list_add(&page->lru, &l_active); continue; } } + trace_android_vh_page_trylock_clear(page); skip_page_referenced: ClearPageActive(page); /* we are de-activating */ SetPageWorkingset(page); @@ -2221,6 +2224,7 @@ unsigned long reclaim_pages(struct list_head *page_list) return nr_reclaimed; } +EXPORT_SYMBOL_GPL(reclaim_pages); static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc) @@ -2402,7 +2406,6 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, denominator = ap + fp; out: trace_android_vh_tune_scan_type((char *)(&scan_balance)); - trace_android_vh_tune_memcg_scan_type(memcg, (char *)(&scan_balance)); for_each_evictable_lru(lru) { int file = is_file_lru(lru); unsigned long lruvec_size; @@ -2519,8 +2522,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) enum lru_list lru; unsigned long nr_reclaimed = 0; unsigned long nr_to_reclaim = sc->nr_to_reclaim; + bool proportional_reclaim; struct blk_plug plug; - bool scan_adjusted; get_scan_count(lruvec, sc, nr); @@ -2538,8 +2541,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) * abort proportional reclaim if either the file or anon lru has already * dropped to zero at the first pass. */ - scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() && - sc->priority == DEF_PRIORITY); + proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() && + sc->priority == DEF_PRIORITY); blk_start_plug(&plug); while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || @@ -2559,7 +2562,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) cond_resched(); - if (nr_reclaimed < nr_to_reclaim || scan_adjusted) + if (nr_reclaimed < nr_to_reclaim || proportional_reclaim) continue; /* @@ -2610,8 +2613,6 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) nr_scanned = targets[lru] - nr[lru]; nr[lru] = targets[lru] * (100 - percentage) / 100; nr[lru] -= min(nr[lru], nr_scanned); - - scan_adjusted = true; } blk_finish_plug(&plug); sc->nr_reclaimed += nr_reclaimed; diff --git a/mm/vmstat.c b/mm/vmstat.c index 80c1e0a0f094..604a993782ce 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1509,10 +1509,6 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m, if (!page) continue; - /* Watch for unexpected holes punched in the memmap */ - if (!memmap_valid_within(pfn, page, zone)) - continue; - if (page_zone(page) != zone) continue; diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 6fce68d224b2..1b309c67d1d6 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1748,11 +1748,40 @@ static enum fullness_group putback_zspage(struct size_class *class, */ static void lock_zspage(struct zspage *zspage) { - struct page *page = get_first_page(zspage); + struct page *curr_page, *page; - do { - lock_page(page); - } while ((page = get_next_page(page)) != NULL); + /* + * Pages we haven't locked yet can be migrated off the list while we're + * trying to lock them, so we need to be careful and only attempt to + * lock each page under migrate_read_lock(). Otherwise, the page we lock + * may no longer belong to the zspage. This means that we may wait for + * the wrong page to unlock, so we must take a reference to the page + * prior to waiting for it to unlock outside migrate_read_lock(). + */ + while (1) { + migrate_read_lock(zspage); + page = get_first_page(zspage); + if (trylock_page(page)) + break; + get_page(page); + migrate_read_unlock(zspage); + wait_on_page_locked(page); + put_page(page); + } + + curr_page = page; + while ((page = get_next_page(curr_page))) { + if (trylock_page(page)) { + curr_page = page; + } else { + get_page(page); + migrate_read_unlock(zspage); + wait_on_page_locked(page); + put_page(page); + migrate_read_lock(zspage); + } + } + migrate_read_unlock(zspage); } static int zs_init_fs_context(struct fs_context *fc) diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index d12c9a8a9953..64a94c9812da 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -278,9 +278,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) return 0; out_free_newdev: - if (new_dev->reg_state == NETREG_UNINITIALIZED || - new_dev->reg_state == NETREG_UNREGISTERED) - free_netdev(new_dev); + free_netdev(new_dev); return err; } diff --git a/net/9p/client.c b/net/9p/client.c index bf6ed00d7c37..e8862cd4f91b 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -893,16 +893,13 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt) struct p9_fid *fid; p9_debug(P9_DEBUG_FID, "clnt %p\n", clnt); - fid = kmalloc(sizeof(struct p9_fid), GFP_KERNEL); + fid = kzalloc(sizeof(struct p9_fid), GFP_KERNEL); if (!fid) return NULL; - memset(&fid->qid, 0, sizeof(struct p9_qid)); fid->mode = -1; fid->uid = current_fsuid(); fid->clnt = clnt; - fid->rdir = NULL; - fid->fid = 0; idr_preload(GFP_KERNEL); spin_lock_irq(&clnt->lock); diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 8f528e783a6c..e070a0b8e5ca 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -118,7 +118,7 @@ struct p9_conn { struct list_head unsent_req_list; struct p9_req_t *rreq; struct p9_req_t *wreq; - char tmp_buf[7]; + char tmp_buf[P9_HDRSZ]; struct p9_fcall rc; int wpos; int wsize; @@ -200,11 +200,15 @@ static void p9_conn_cancel(struct p9_conn *m, int err) list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { list_move(&req->req_list, &cancel_list); + req->status = REQ_STATUS_ERROR; } list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { list_move(&req->req_list, &cancel_list); + req->status = REQ_STATUS_ERROR; } + spin_unlock(&m->client->lock); + list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req); list_del(&req->req_list); @@ -212,7 +216,6 @@ static void p9_conn_cancel(struct p9_conn *m, int err) req->t_err = err; p9_client_cb(m->client, req, REQ_STATUS_ERROR); } - spin_unlock(&m->client->lock); } static __poll_t @@ -288,7 +291,7 @@ static void p9_read_work(struct work_struct *work) if (!m->rc.sdata) { m->rc.sdata = m->tmp_buf; m->rc.offset = 0; - m->rc.capacity = 7; /* start by reading header */ + m->rc.capacity = P9_HDRSZ; /* start by reading header */ } clear_bit(Rpending, &m->wsched); @@ -311,7 +314,7 @@ static void p9_read_work(struct work_struct *work) p9_debug(P9_DEBUG_TRANS, "got new header\n"); /* Header size */ - m->rc.size = 7; + m->rc.size = P9_HDRSZ; err = p9_parse_header(&m->rc, &m->rc.size, NULL, NULL, 0); if (err) { p9_debug(P9_DEBUG_ERROR, @@ -820,11 +823,14 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd) goto out_free_ts; if (!(ts->rd->f_mode & FMODE_READ)) goto out_put_rd; + /* prevent workers from hanging on IO when fd is a pipe */ + ts->rd->f_flags |= O_NONBLOCK; ts->wr = fget(wfd); if (!ts->wr) goto out_put_rd; if (!(ts->wr->f_mode & FMODE_WRITE)) goto out_put_wr; + ts->wr->f_flags |= O_NONBLOCK; client->trans = ts; client->status = Connected; @@ -846,8 +852,10 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket) struct file *file; p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL); - if (!p) + if (!p) { + sock_release(csocket); return -ENOMEM; + } csocket->sk->sk_allocation = GFP_NOIO; file = sock_alloc_file(csocket, 0, NULL); diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index 432ac5a16f2e..6c8a33f98f09 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c @@ -231,6 +231,14 @@ static void p9_xen_response(struct work_struct *work) continue; } + if (h.size > req->rc.capacity) { + dev_warn(&priv->dev->dev, + "requested packet size too big: %d for tag %d with capacity %zd\n", + h.size, h.tag, req->rc.capacity); + req->status = REQ_STATUS_ERROR; + goto recv_error; + } + memcpy(&req->rc, &h, sizeof(h)); req->rc.offset = 0; @@ -240,6 +248,7 @@ static void p9_xen_response(struct work_struct *work) masked_prod, &masked_cons, XEN_9PFS_RING_SIZE(ring)); +recv_error: virt_mb(); cons += h.size; ring->intf->in_cons = cons; diff --git a/net/TEST_MAPPING b/net/TEST_MAPPING new file mode 100644 index 000000000000..c8614a6ad3d2 --- /dev/null +++ b/net/TEST_MAPPING @@ -0,0 +1,7 @@ +{ + "presubmit": [ + { + "name": "CtsNetTestCases" + } + ] +} diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c index 829db9eba0cb..aaf64b953915 100644 --- a/net/atm/mpoa_proc.c +++ b/net/atm/mpoa_proc.c @@ -219,11 +219,12 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff, if (!page) return -ENOMEM; - for (p = page, len = 0; len < nbytes; p++, len++) { + for (p = page, len = 0; len < nbytes; p++) { if (get_user(*p, buff++)) { free_page((unsigned long)page); return -EFAULT; } + len += 1; if (*p == '\0' || *p == '\n') break; } diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 9e0eef7fe9ad..a1f4cb836fcf 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -89,17 +89,21 @@ again: sk = s->sk; if (!sk) { spin_unlock_bh(&ax25_list_lock); - s->ax25_dev = NULL; ax25_disconnect(s, ENETUNREACH); + s->ax25_dev = NULL; spin_lock_bh(&ax25_list_lock); goto again; } sock_hold(sk); spin_unlock_bh(&ax25_list_lock); lock_sock(sk); - s->ax25_dev = NULL; - release_sock(sk); ax25_disconnect(s, ENETUNREACH); + s->ax25_dev = NULL; + if (sk->sk_socket) { + dev_put(ax25_dev->dev); + ax25_dev_put(ax25_dev); + } + release_sock(sk); spin_lock_bh(&ax25_list_lock); sock_put(sk); /* The entry could have been deleted from the @@ -365,21 +369,25 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl))) return -EFAULT; - if ((ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr)) == NULL) - return -ENODEV; - if (ax25_ctl.digi_count > AX25_MAX_DIGIS) return -EINVAL; if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL) return -EINVAL; + ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr); + if (!ax25_dev) + return -ENODEV; + digi.ndigi = ax25_ctl.digi_count; for (k = 0; k < digi.ndigi; k++) digi.calls[k] = ax25_ctl.digi_addr[k]; - if ((ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev)) == NULL) + ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev); + if (!ax25) { + ax25_dev_put(ax25_dev); return -ENOTCONN; + } switch (ax25_ctl.cmd) { case AX25_KILL: @@ -446,6 +454,7 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) } out_put: + ax25_dev_put(ax25_dev); ax25_cb_put(ax25); return ret; @@ -971,14 +980,16 @@ static int ax25_release(struct socket *sock) { struct sock *sk = sock->sk; ax25_cb *ax25; + ax25_dev *ax25_dev; if (sk == NULL) return 0; sock_hold(sk); - sock_orphan(sk); lock_sock(sk); + sock_orphan(sk); ax25 = sk_to_ax25(sk); + ax25_dev = ax25->ax25_dev; if (sk->sk_type == SOCK_SEQPACKET) { switch (ax25->state) { @@ -1040,6 +1051,15 @@ static int ax25_release(struct socket *sock) sk->sk_state_change(sk); ax25_destroy_socket(ax25); } + if (ax25_dev) { + del_timer_sync(&ax25->timer); + del_timer_sync(&ax25->t1timer); + del_timer_sync(&ax25->t2timer); + del_timer_sync(&ax25->t3timer); + del_timer_sync(&ax25->idletimer); + dev_put(ax25_dev->dev); + ax25_dev_put(ax25_dev); + } sock->sk = NULL; release_sock(sk); @@ -1116,8 +1136,10 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) } } - if (ax25_dev != NULL) + if (ax25_dev) { ax25_fillin_cb(ax25, ax25_dev); + dev_hold(ax25_dev->dev); + } done: ax25_cb_add(ax25); @@ -1631,9 +1653,12 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; - struct sk_buff *skb; + struct sk_buff *skb, *last; + struct sk_buff_head *sk_queue; int copied; int err = 0; + int off = 0; + long timeo; lock_sock(sk); /* @@ -1645,11 +1670,29 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, goto out; } - /* Now we can treat all alike */ - skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, - flags & MSG_DONTWAIT, &err); - if (skb == NULL) - goto out; + /* We need support for non-blocking reads. */ + sk_queue = &sk->sk_receive_queue; + skb = __skb_try_recv_datagram(sk, sk_queue, flags, &off, &err, &last); + /* If no packet is available, release_sock(sk) and try again. */ + if (!skb) { + if (err != -EAGAIN) + goto out; + release_sock(sk); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); + while (timeo && !__skb_wait_for_more_packets(sk, sk_queue, &err, + &timeo, last)) { + skb = __skb_try_recv_datagram(sk, sk_queue, flags, &off, + &err, &last); + if (skb) + break; + + if (err != -EAGAIN) + goto done; + } + if (!skb) + goto done; + lock_sock(sk); + } if (!sk_to_ax25(sk)->pidincl) skb_pull(skb, 1); /* Remove PID */ @@ -1696,6 +1739,7 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, out: release_sock(sk); +done: return err; } diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index 4ac2e0847652..d2e0cc67d91a 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c @@ -37,6 +37,7 @@ ax25_dev *ax25_addr_ax25dev(ax25_address *addr) for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) if (ax25cmp(addr, (ax25_address *)ax25_dev->dev->dev_addr) == 0) { res = ax25_dev; + ax25_dev_hold(ax25_dev); } spin_unlock_bh(&ax25_dev_lock); @@ -56,6 +57,7 @@ void ax25_dev_device_up(struct net_device *dev) return; } + refcount_set(&ax25_dev->refcount, 1); dev->ax25_ptr = ax25_dev; ax25_dev->dev = dev; dev_hold(dev); @@ -84,6 +86,7 @@ void ax25_dev_device_up(struct net_device *dev) ax25_dev->next = ax25_dev_list; ax25_dev_list = ax25_dev; spin_unlock_bh(&ax25_dev_lock); + ax25_dev_hold(ax25_dev); ax25_register_dev_sysctl(ax25_dev); } @@ -113,9 +116,10 @@ void ax25_dev_device_down(struct net_device *dev) if ((s = ax25_dev_list) == ax25_dev) { ax25_dev_list = s->next; spin_unlock_bh(&ax25_dev_lock); + ax25_dev_put(ax25_dev); dev->ax25_ptr = NULL; dev_put(dev); - kfree(ax25_dev); + ax25_dev_put(ax25_dev); return; } @@ -123,9 +127,10 @@ void ax25_dev_device_down(struct net_device *dev) if (s->next == ax25_dev) { s->next = ax25_dev->next; spin_unlock_bh(&ax25_dev_lock); + ax25_dev_put(ax25_dev); dev->ax25_ptr = NULL; dev_put(dev); - kfree(ax25_dev); + ax25_dev_put(ax25_dev); return; } @@ -133,6 +138,7 @@ void ax25_dev_device_down(struct net_device *dev) } spin_unlock_bh(&ax25_dev_lock); dev->ax25_ptr = NULL; + ax25_dev_put(ax25_dev); } int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) @@ -144,20 +150,32 @@ int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) switch (cmd) { case SIOCAX25ADDFWD: - if ((fwd_dev = ax25_addr_ax25dev(&fwd->port_to)) == NULL) + fwd_dev = ax25_addr_ax25dev(&fwd->port_to); + if (!fwd_dev) { + ax25_dev_put(ax25_dev); return -EINVAL; - if (ax25_dev->forward != NULL) + } + if (ax25_dev->forward) { + ax25_dev_put(fwd_dev); + ax25_dev_put(ax25_dev); return -EINVAL; + } ax25_dev->forward = fwd_dev->dev; + ax25_dev_put(fwd_dev); + ax25_dev_put(ax25_dev); break; case SIOCAX25DELFWD: - if (ax25_dev->forward == NULL) + if (!ax25_dev->forward) { + ax25_dev_put(ax25_dev); return -EINVAL; + } ax25_dev->forward = NULL; + ax25_dev_put(ax25_dev); break; default: + ax25_dev_put(ax25_dev); return -EINVAL; } diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index b40e0bce67ea..dc2168d2a32a 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c @@ -75,11 +75,13 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) ax25_dev *ax25_dev; int i; - if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL) - return -EINVAL; if (route->digi_count > AX25_MAX_DIGIS) return -EINVAL; + ax25_dev = ax25_addr_ax25dev(&route->port_addr); + if (!ax25_dev) + return -EINVAL; + write_lock_bh(&ax25_route_lock); ax25_rt = ax25_route_list; @@ -91,6 +93,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) if (route->digi_count != 0) { if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return -ENOMEM; } ax25_rt->digipeat->lastrepeat = -1; @@ -101,6 +104,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) } } write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return 0; } ax25_rt = ax25_rt->next; @@ -108,6 +112,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) if ((ax25_rt = kmalloc(sizeof(ax25_route), GFP_ATOMIC)) == NULL) { write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return -ENOMEM; } @@ -120,6 +125,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { write_unlock_bh(&ax25_route_lock); kfree(ax25_rt); + ax25_dev_put(ax25_dev); return -ENOMEM; } ax25_rt->digipeat->lastrepeat = -1; @@ -132,6 +138,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) ax25_rt->next = ax25_route_list; ax25_route_list = ax25_rt; write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return 0; } @@ -173,6 +180,7 @@ static int ax25_rt_del(struct ax25_routes_struct *route) } } write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return 0; } @@ -215,6 +223,7 @@ static int ax25_rt_opt(struct ax25_route_opt_struct *rt_option) out: write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return err; } diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c index 15ab812c4fe4..3a476e4f6cd0 100644 --- a/net/ax25/ax25_subr.c +++ b/net/ax25/ax25_subr.c @@ -261,12 +261,20 @@ void ax25_disconnect(ax25_cb *ax25, int reason) { ax25_clear_queues(ax25); - if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY)) - ax25_stop_heartbeat(ax25); - ax25_stop_t1timer(ax25); - ax25_stop_t2timer(ax25); - ax25_stop_t3timer(ax25); - ax25_stop_idletimer(ax25); + if (reason == ENETUNREACH) { + del_timer_sync(&ax25->timer); + del_timer_sync(&ax25->t1timer); + del_timer_sync(&ax25->t2timer); + del_timer_sync(&ax25->t3timer); + del_timer_sync(&ax25->idletimer); + } else { + if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY)) + ax25_stop_heartbeat(ax25); + ax25_stop_t1timer(ax25); + ax25_stop_t2timer(ax25); + ax25_stop_t3timer(ax25); + ax25_stop_idletimer(ax25); + } ax25->state = AX25_STATE_0; diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 1f1f5b0873b2..895d834d479d 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -478,6 +478,17 @@ int batadv_frag_send_packet(struct sk_buff *skb, goto free_skb; } + /* GRO might have added fragments to the fragment list instead of + * frags[]. But this is not handled by skb_split and must be + * linearized to avoid incorrect length information after all + * batman-adv fragments were created and submitted to the + * hard-interface + */ + if (skb_has_frag_list(skb) && __skb_linearize(skb)) { + ret = -ENOMEM; + goto free_skb; + } + /* Create one header to be copied to all fragments */ frag_header.packet_type = BATADV_UNICAST_FRAG; frag_header.version = BATADV_COMPAT_VERSION; diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index 139894ca788b..c8a341cd652c 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -136,7 +136,7 @@ static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) { struct inet6_dev *in6_dev = __in6_dev_get(dev); - if (in6_dev && in6_dev->cnf.mc_forwarding) + if (in6_dev && atomic_read(&in6_dev->cnf.mc_forwarding)) return BATADV_NO_FLAGS; else return BATADV_MCAST_WANT_NO_RTR6; diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index cff4944d5b66..7601ce9143c1 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -1010,6 +1010,7 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, hci_dev_lock(hdev); hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type); hci_dev_unlock(hdev); + hci_dev_put(hdev); if (!hcon) return -ENOENT; diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 4ef6a54403aa..2f87f57e7a4f 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -736,7 +736,7 @@ static int __init bt_init(void) err = bt_sysfs_init(); if (err < 0) - return err; + goto cleanup_led; err = sock_register(&bt_sock_family_ops); if (err) @@ -772,6 +772,8 @@ unregister_socket: sock_unregister(PF_BLUETOOTH); cleanup_sysfs: bt_sysfs_cleanup(); +cleanup_led: + bt_leds_cleanup(); return err; } diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index ecd2ffcf2ba2..140d9764c77e 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -240,7 +240,7 @@ int hci_disconnect(struct hci_conn *conn, __u8 reason) { BT_DBG("hcon %p", conn); - /* When we are master of an established connection and it enters + /* When we are central of an established connection and it enters * the disconnect timeout, then go ahead and try to read the * current clock offset. Processing of the result is done * within the event handling and hci_clock_offset_evt function. @@ -1065,16 +1065,16 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, hci_req_init(&req, hdev); - /* Disable advertising if we're active. For master role + /* Disable advertising if we're active. For central role * connections most controllers will refuse to connect if - * advertising is enabled, and for slave role connections we + * advertising is enabled, and for peripheral role connections we * anyway have to disable it in order to start directed * advertising. */ if (hci_dev_test_flag(hdev, HCI_LE_ADV)) __hci_req_disable_advertising(&req); - /* If requested to connect as slave use directed advertising */ + /* If requested to connect as peripheral use directed advertising */ if (conn->role == HCI_ROLE_SLAVE) { /* If we're active scanning most controllers are unable * to initiate advertising. Simply reject the attempt. diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 2e7998bad133..42ba5051c315 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3718,10 +3718,10 @@ int hci_register_dev(struct hci_dev *hdev) */ switch (hdev->dev_type) { case HCI_PRIMARY: - id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL); + id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL); break; case HCI_AMP: - id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL); + id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL); break; default: return -EINVAL; @@ -3730,7 +3730,7 @@ int hci_register_dev(struct hci_dev *hdev) if (id < 0) return id; - sprintf(hdev->name, "hci%d", id); + snprintf(hdev->name, sizeof(hdev->name), "hci%d", id); hdev->id = id; BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); @@ -3796,7 +3796,8 @@ int hci_register_dev(struct hci_dev *hdev) hci_sock_dev_event(hdev, HCI_DEV_REG); hci_dev_hold(hdev); - if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { + if (!hdev->suspend_notifier.notifier_call && + !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { hdev->suspend_notifier.notifier_call = hci_suspend_notifier; error = register_pm_notifier(&hdev->suspend_notifier); if (error) @@ -4479,15 +4480,27 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); } -static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) +static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type) { - if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { - /* ACL tx timeout must be longer than maximum - * link supervision timeout (40.9 seconds) */ - if (!cnt && time_after(jiffies, hdev->acl_last_tx + - HCI_ACL_TX_TIMEOUT)) - hci_link_tx_to(hdev, ACL_LINK); + unsigned long last_tx; + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + return; + + switch (type) { + case LE_LINK: + last_tx = hdev->le_last_tx; + break; + default: + last_tx = hdev->acl_last_tx; + break; } + + /* tx timeout must be longer than maximum link supervision timeout + * (40.9 seconds) + */ + if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT)) + hci_link_tx_to(hdev, type); } /* Schedule SCO */ @@ -4545,7 +4558,7 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev) struct sk_buff *skb; int quote; - __check_timeout(hdev, cnt); + __check_timeout(hdev, cnt, ACL_LINK); while (hdev->acl_cnt && (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { @@ -4588,8 +4601,6 @@ static void hci_sched_acl_blk(struct hci_dev *hdev) int quote; u8 type; - __check_timeout(hdev, cnt); - BT_DBG("%s", hdev->name); if (hdev->dev_type == HCI_AMP) @@ -4597,6 +4608,8 @@ static void hci_sched_acl_blk(struct hci_dev *hdev) else type = ACL_LINK; + __check_timeout(hdev, cnt, type); + while (hdev->block_cnt > 0 && (chan = hci_chan_sent(hdev, type, "e))) { u32 priority = (skb_peek(&chan->data_q))->priority; @@ -4670,7 +4683,7 @@ static void hci_sched_le(struct hci_dev *hdev) cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; - __check_timeout(hdev, cnt); + __check_timeout(hdev, cnt, LE_LINK); tmp = cnt; while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 72b4127360c7..061ef20e135e 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2759,9 +2759,9 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) bacpy(&cp.bdaddr, &ev->bdaddr); if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) - cp.role = 0x00; /* Become master */ + cp.role = 0x00; /* Become central */ else - cp.role = 0x01; /* Remain slave */ + cp.role = 0x01; /* Remain peripheral */ hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); } else if (!(flags & HCI_PROTO_DEFER)) { @@ -5061,8 +5061,9 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, hci_dev_lock(hdev); hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); - if (hcon) { + if (hcon && hcon->type == AMP_LINK) { hcon->state = BT_CLOSED; + hci_disconn_cfm(hcon, ev->reason); hci_conn_del(hcon); } @@ -5152,7 +5153,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, conn->dst_type = bdaddr_type; /* If we didn't have a hci_conn object previously - * but we're in master role this must be something + * but we're in central role this must be something * initiated using a white list. Since white list based * connections are not "first class citizens" we don't * have full tracking of them. Therefore, we go ahead diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index ee7ae88d4557..a21df117edff 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -796,6 +796,10 @@ static u8 update_white_list(struct hci_request *req) */ bool allow_rpa = hdev->suspended; + if (use_ll_privacy(hdev) && + hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + allow_rpa = true; + /* Go through the current white list programmed into the * controller one by one and check if that address is still * in the list of pending connections or list of devices to diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index b69d88b88d2e..ccd2c377bf83 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -48,6 +48,9 @@ void hci_conn_add_sysfs(struct hci_conn *conn) BT_DBG("conn %p", conn); + if (device_is_registered(&conn->dev)) + return; + dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); if (device_add(&conn->dev) < 0) { diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 0ddbc415ce15..e851f8047f74 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -61,6 +61,9 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err); static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff_head *skbs, u8 event); +static void l2cap_retrans_timeout(struct work_struct *work); +static void l2cap_monitor_timeout(struct work_struct *work); +static void l2cap_ack_timeout(struct work_struct *work); static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type) { @@ -111,7 +114,8 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, } /* Find channel with given SCID. - * Returns locked channel. */ + * Returns a reference locked channel. + */ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) { @@ -119,15 +123,19 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, mutex_lock(&conn->chan_lock); c = __l2cap_get_chan_by_scid(conn, cid); - if (c) - l2cap_chan_lock(c); + if (c) { + /* Only lock if chan reference is not 0 */ + c = l2cap_chan_hold_unless_zero(c); + if (c) + l2cap_chan_lock(c); + } mutex_unlock(&conn->chan_lock); return c; } /* Find channel with given DCID. - * Returns locked channel. + * Returns a reference locked channel. */ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) @@ -136,8 +144,12 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn, mutex_lock(&conn->chan_lock); c = __l2cap_get_chan_by_dcid(conn, cid); - if (c) - l2cap_chan_lock(c); + if (c) { + /* Only lock if chan reference is not 0 */ + c = l2cap_chan_hold_unless_zero(c); + if (c) + l2cap_chan_lock(c); + } mutex_unlock(&conn->chan_lock); return c; @@ -162,8 +174,12 @@ static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, mutex_lock(&conn->chan_lock); c = __l2cap_get_chan_by_ident(conn, ident); - if (c) - l2cap_chan_lock(c); + if (c) { + /* Only lock if chan reference is not 0 */ + c = l2cap_chan_hold_unless_zero(c); + if (c) + l2cap_chan_lock(c); + } mutex_unlock(&conn->chan_lock); return c; @@ -463,6 +479,9 @@ struct l2cap_chan *l2cap_chan_create(void) write_unlock(&chan_list_lock); INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout); + INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); + INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); + INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); chan->state = BT_OPEN; @@ -497,6 +516,16 @@ void l2cap_chan_hold(struct l2cap_chan *c) kref_get(&c->kref); } +struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c) +{ + BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); + + if (!kref_get_unless_zero(&c->kref)) + return NULL; + + return c; +} + void l2cap_chan_put(struct l2cap_chan *c) { BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref)); @@ -1438,6 +1467,7 @@ static void l2cap_ecred_connect(struct l2cap_chan *chan) l2cap_ecred_init(chan, 0); + memset(&data, 0, sizeof(data)); data.pdu.req.psm = chan->psm; data.pdu.req.mtu = cpu_to_le16(chan->imtu); data.pdu.req.mps = cpu_to_le16(chan->mps); @@ -1687,8 +1717,8 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) if (hcon->out) smp_conn_security(hcon, hcon->pending_sec_level); - /* For LE slave connections, make sure the connection interval - * is in the range of the minium and maximum interval that has + /* For LE peripheral connections, make sure the connection interval + * is in the range of the minimum and maximum interval that has * been configured for this connection. If not, then trigger * the connection update procedure. */ @@ -1942,11 +1972,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *dst, u8 link_type) { - struct l2cap_chan *c, *c1 = NULL; + struct l2cap_chan *c, *tmp, *c1 = NULL; read_lock(&chan_list_lock); - list_for_each_entry(c, &chan_list, global_l) { + list_for_each_entry_safe(c, tmp, &chan_list, global_l) { if (state && c->state != state) continue; @@ -1956,7 +1986,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, if (link_type == LE_LINK && c->src_type == BDADDR_BREDR) continue; - if (c->psm == psm) { + if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) { int src_match, dst_match; int src_any, dst_any; @@ -1964,7 +1994,9 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, src_match = !bacmp(&c->src, src); dst_match = !bacmp(&c->dst, dst); if (src_match && dst_match) { - l2cap_chan_hold(c); + if (!l2cap_chan_hold_unless_zero(c)) + continue; + read_unlock(&chan_list_lock); return c; } @@ -1979,7 +2011,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, } if (c1) - l2cap_chan_hold(c1); + c1 = l2cap_chan_hold_unless_zero(c1); read_unlock(&chan_list_lock); @@ -3290,10 +3322,6 @@ int l2cap_ertm_init(struct l2cap_chan *chan) chan->rx_state = L2CAP_RX_STATE_RECV; chan->tx_state = L2CAP_TX_STATE_XMIT; - INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); - INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); - INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); - skb_queue_head_init(&chan->srej_q); err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); @@ -3732,7 +3760,8 @@ done: l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); - if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { + if (remote_efs && + test_bit(FLAG_EFS_ENABLE, &chan->flags)) { chan->remote_id = efs.id; chan->remote_stype = efs.stype; chan->remote_msdu = le16_to_cpu(efs.msdu); @@ -4277,6 +4306,12 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn, } } + chan = l2cap_chan_hold_unless_zero(chan); + if (!chan) { + err = -EBADSLT; + goto unlock; + } + err = 0; l2cap_chan_lock(chan); @@ -4306,6 +4341,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn, } l2cap_chan_unlock(chan); + l2cap_chan_put(chan); unlock: mutex_unlock(&conn->chan_lock); @@ -4413,7 +4449,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, chan->ident = cmd->ident; l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); - chan->num_conf_rsp++; + if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP) + chan->num_conf_rsp++; /* Reset config buffer. */ chan->conf_len = 0; @@ -4459,6 +4496,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, unlock: l2cap_chan_unlock(chan); + l2cap_chan_put(chan); return err; } @@ -4572,6 +4610,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, done: l2cap_chan_unlock(chan); + l2cap_chan_put(chan); return err; } @@ -5299,6 +5338,7 @@ send_move_response: l2cap_send_move_chan_rsp(chan, result); l2cap_chan_unlock(chan); + l2cap_chan_put(chan); return 0; } @@ -5391,6 +5431,7 @@ static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result) } l2cap_chan_unlock(chan); + l2cap_chan_put(chan); } static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid, @@ -5420,6 +5461,7 @@ static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid, l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); l2cap_chan_unlock(chan); + l2cap_chan_put(chan); } static int l2cap_move_channel_rsp(struct l2cap_conn *conn, @@ -5483,6 +5525,7 @@ static int l2cap_move_channel_confirm(struct l2cap_conn *conn, l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); l2cap_chan_unlock(chan); + l2cap_chan_put(chan); return 0; } @@ -5518,6 +5561,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn, } l2cap_chan_unlock(chan); + l2cap_chan_put(chan); return 0; } @@ -5766,6 +5810,19 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn, BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm), scid, mtu, mps); + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A + * page 1059: + * + * Valid range: 0x0001-0x00ff + * + * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges + */ + if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { + result = L2CAP_CR_LE_BAD_PSM; + chan = NULL; + goto response; + } + /* Check if we have socket listening on psm */ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, &conn->hcon->dst, LE_LINK); @@ -5890,12 +5947,11 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn, if (credits > max_credits) { BT_ERR("LE credits overflow"); l2cap_send_disconn_req(chan, ECONNRESET); - l2cap_chan_unlock(chan); /* Return 0 so that we don't trigger an unnecessary * command reject packet. */ - return 0; + goto unlock; } chan->tx_credits += credits; @@ -5906,7 +5962,9 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn, if (chan->tx_credits) chan->ops->resume(chan); +unlock: l2cap_chan_unlock(chan); + l2cap_chan_put(chan); return 0; } @@ -5945,6 +6003,18 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, psm = req->psm; + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A + * page 1059: + * + * Valid range: 0x0001-0x00ff + * + * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges + */ + if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { + result = L2CAP_CR_LE_BAD_PSM; + goto response; + } + BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps); memset(&pdu, 0, sizeof(pdu)); @@ -6831,6 +6901,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb, u8 event) { + struct l2cap_ctrl local_control; int err = 0; bool skb_in_use = false; @@ -6855,15 +6926,32 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan, chan->buffer_seq = chan->expected_tx_seq; skb_in_use = true; + /* l2cap_reassemble_sdu may free skb, hence invalidate + * control, so make a copy in advance to use it after + * l2cap_reassemble_sdu returns and to avoid the race + * condition, for example: + * + * The current thread calls: + * l2cap_reassemble_sdu + * chan->ops->recv == l2cap_sock_recv_cb + * __sock_queue_rcv_skb + * Another thread calls: + * bt_sock_recvmsg + * skb_recv_datagram + * skb_free_datagram + * Then the current thread tries to access control, but + * it was freed by skb_free_datagram. + */ + local_control = *control; err = l2cap_reassemble_sdu(chan, skb, control); if (err) break; - if (control->final) { + if (local_control.final) { if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) { - control->final = 0; - l2cap_retransmit_all(chan, control); + local_control.final = 0; + l2cap_retransmit_all(chan, &local_control); l2cap_ertm_send(chan); } } @@ -7243,11 +7331,27 @@ static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb) { + /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store + * the txseq field in advance to use it after l2cap_reassemble_sdu + * returns and to avoid the race condition, for example: + * + * The current thread calls: + * l2cap_reassemble_sdu + * chan->ops->recv == l2cap_sock_recv_cb + * __sock_queue_rcv_skb + * Another thread calls: + * bt_sock_recvmsg + * skb_recv_datagram + * skb_free_datagram + * Then the current thread tries to access control, but it was freed by + * skb_free_datagram. + */ + u16 txseq = control->txseq; + BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, chan->rx_state); - if (l2cap_classify_txseq(chan, control->txseq) == - L2CAP_TXSEQ_EXPECTED) { + if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) { l2cap_pass_to_tx(chan, control); BT_DBG("buffer_seq %d->%d", chan->buffer_seq, @@ -7270,8 +7374,8 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, } } - chan->last_acked_seq = control->txseq; - chan->expected_tx_seq = __next_seq(chan, control->txseq); + chan->last_acked_seq = txseq; + chan->expected_tx_seq = __next_seq(chan, txseq); return 0; } @@ -7527,6 +7631,7 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, return; } + l2cap_chan_hold(chan); l2cap_chan_lock(chan); } else { BT_DBG("unknown cid 0x%4.4x", cid); @@ -7539,7 +7644,7 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, BT_DBG("chan %p, len %d", chan, skb->len); /* If we receive data on a fixed channel before the info req/rsp - * procdure is done simply assume that the channel is supported + * procedure is done simply assume that the channel is supported * and mark it as ready. */ if (chan->chan_type == L2CAP_CHAN_FIXED) @@ -7586,6 +7691,7 @@ drop: done: l2cap_chan_unlock(chan); + l2cap_chan_put(chan); } static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, @@ -8073,7 +8179,7 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, if (src_type != c->src_type) continue; - l2cap_chan_hold(c); + c = l2cap_chan_hold_unless_zero(c); read_unlock(&chan_list_lock); return c; } diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index f2bacb464ccf..7324764384b6 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -549,22 +549,58 @@ struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel) return dlc; } -int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb) +static int rfcomm_dlc_send_frag(struct rfcomm_dlc *d, struct sk_buff *frag) { - int len = skb->len; - - if (d->state != BT_CONNECTED) - return -ENOTCONN; + int len = frag->len; BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len); if (len > d->mtu) return -EINVAL; - rfcomm_make_uih(skb, d->addr); - skb_queue_tail(&d->tx_queue, skb); + rfcomm_make_uih(frag, d->addr); + __skb_queue_tail(&d->tx_queue, frag); - if (!test_bit(RFCOMM_TX_THROTTLED, &d->flags)) + return len; +} + +int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb) +{ + unsigned long flags; + struct sk_buff *frag, *next; + int len; + + if (d->state != BT_CONNECTED) + return -ENOTCONN; + + frag = skb_shinfo(skb)->frag_list; + skb_shinfo(skb)->frag_list = NULL; + + /* Queue all fragments atomically. */ + spin_lock_irqsave(&d->tx_queue.lock, flags); + + len = rfcomm_dlc_send_frag(d, skb); + if (len < 0 || !frag) + goto unlock; + + for (; frag; frag = next) { + int ret; + + next = frag->next; + + ret = rfcomm_dlc_send_frag(d, frag); + if (ret < 0) { + kfree_skb(frag); + goto unlock; + } + + len += ret; + } + +unlock: + spin_unlock_irqrestore(&d->tx_queue.lock, flags); + + if (len > 0 && !test_bit(RFCOMM_TX_THROTTLED, &d->flags)) rfcomm_schedule(); return len; } diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index ae6f80730561..4cf1fa9900ca 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -575,47 +575,21 @@ static int rfcomm_sock_sendmsg(struct socket *sock, struct msghdr *msg, lock_sock(sk); sent = bt_sock_wait_ready(sk, msg->msg_flags); - if (sent) - goto done; - while (len) { - size_t size = min_t(size_t, len, d->mtu); - int err; - - skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE, - msg->msg_flags & MSG_DONTWAIT, &err); - if (!skb) { - if (sent == 0) - sent = err; - break; - } - skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE); - - err = memcpy_from_msg(skb_put(skb, size), msg, size); - if (err) { - kfree_skb(skb); - if (sent == 0) - sent = err; - break; - } - - skb->priority = sk->sk_priority; - - err = rfcomm_dlc_send(d, skb); - if (err < 0) { - kfree_skb(skb); - if (sent == 0) - sent = err; - break; - } - - sent += size; - len -= size; - } - -done: release_sock(sk); + if (sent) + return sent; + + skb = bt_skb_sendmmsg(sk, msg, len, d->mtu, RFCOMM_SKB_HEAD_RESERVE, + RFCOMM_SKB_TAIL_RESERVE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + sent = rfcomm_dlc_send(d, skb); + if (sent < 0) + kfree_skb(skb); + return sent; } diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 2f2b8ddc4dd5..8244d3ae185b 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -280,12 +280,10 @@ static int sco_connect(struct hci_dev *hdev, struct sock *sk) return err; } -static int sco_send_frame(struct sock *sk, void *buf, int len, - unsigned int msg_flags) +static int sco_send_frame(struct sock *sk, struct sk_buff *skb) { struct sco_conn *conn = sco_pi(sk)->conn; - struct sk_buff *skb; - int err; + int len = skb->len; /* Check outgoing MTU */ if (len > conn->mtu) @@ -293,11 +291,6 @@ static int sco_send_frame(struct sock *sk, void *buf, int len, BT_DBG("sk %p len %d", sk, len); - skb = bt_skb_send_alloc(sk, len, msg_flags & MSG_DONTWAIT, &err); - if (!skb) - return err; - - memcpy(skb_put(skb, len), buf, len); hci_send_sco(conn->hcon, skb); return len; @@ -575,19 +568,24 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen addr->sa_family != AF_BLUETOOTH) return -EINVAL; - if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) - return -EBADFD; + lock_sock(sk); + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { + err = -EBADFD; + goto done; + } - if (sk->sk_type != SOCK_SEQPACKET) - return -EINVAL; + if (sk->sk_type != SOCK_SEQPACKET) { + err = -EINVAL; + goto done; + } hdev = hci_get_route(&sa->sco_bdaddr, &sco_pi(sk)->src, BDADDR_BREDR); - if (!hdev) - return -EHOSTUNREACH; + if (!hdev) { + err = -EHOSTUNREACH; + goto done; + } hci_dev_lock(hdev); - lock_sock(sk); - /* Set destination address and psm */ bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr); @@ -722,7 +720,7 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; - void *buf; + struct sk_buff *skb; int err; BT_DBG("sock %p, sk %p", sock, sk); @@ -734,24 +732,21 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; - buf = kmalloc(len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - if (memcpy_from_msg(buf, msg, len)) { - kfree(buf); - return -EFAULT; - } + skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0); + if (IS_ERR(skb)) + return PTR_ERR(skb); lock_sock(sk); if (sk->sk_state == BT_CONNECTED) - err = sco_send_frame(sk, buf, len, msg->msg_flags); + err = sco_send_frame(sk, skb); else err = -ENOTCONN; release_sock(sk); - kfree(buf); + + if (err < 0) + kfree_skb(skb); return err; } diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 2b7879afc333..b7374dbee23a 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -909,8 +909,8 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, hcon->pending_sec_level = BT_SECURITY_HIGH; } - /* If both devices have Keyoard-Display I/O, the master - * Confirms and the slave Enters the passkey. + /* If both devices have Keyboard-Display I/O, the initiator + * Confirms and the responder Enters the passkey. */ if (smp->method == OVERLAP) { if (hcon->role == HCI_ROLE_MASTER) @@ -3076,7 +3076,7 @@ static void bredr_pairing(struct l2cap_chan *chan) if (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) return; - /* Only master may initiate SMP over BR/EDR */ + /* Only initiator may initiate SMP over BR/EDR */ if (hcon->role != HCI_ROLE_MASTER) return; diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index eb684f31fd69..717b01ff9b2b 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -10,20 +10,86 @@ #include #include #include +#include #include #include +#include #define CREATE_TRACE_POINTS #include +struct bpf_test_timer { + enum { NO_PREEMPT, NO_MIGRATE } mode; + u32 i; + u64 time_start, time_spent; +}; + +static void bpf_test_timer_enter(struct bpf_test_timer *t) + __acquires(rcu) +{ + rcu_read_lock(); + if (t->mode == NO_PREEMPT) + preempt_disable(); + else + migrate_disable(); + + t->time_start = ktime_get_ns(); +} + +static void bpf_test_timer_leave(struct bpf_test_timer *t) + __releases(rcu) +{ + t->time_start = 0; + + if (t->mode == NO_PREEMPT) + preempt_enable(); + else + migrate_enable(); + rcu_read_unlock(); +} + +static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration) + __must_hold(rcu) +{ + t->i++; + if (t->i >= repeat) { + /* We're done. */ + t->time_spent += ktime_get_ns() - t->time_start; + do_div(t->time_spent, t->i); + *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent; + *err = 0; + goto reset; + } + + if (signal_pending(current)) { + /* During iteration: we've been cancelled, abort. */ + *err = -EINTR; + goto reset; + } + + if (need_resched()) { + /* During iteration: we need to reschedule between runs. */ + t->time_spent += ktime_get_ns() - t->time_start; + bpf_test_timer_leave(t); + cond_resched(); + bpf_test_timer_enter(t); + } + + /* Do another round. */ + return true; + +reset: + t->i = 0; + return false; +} + static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *retval, u32 *time, bool xdp) { struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL }; + struct bpf_test_timer t = { NO_MIGRATE }; enum bpf_cgroup_storage_type stype; - u64 time_start, time_spent = 0; - int ret = 0; - u32 i; + int ret; for_each_cgroup_storage_type(stype) { storage[stype] = bpf_cgroup_storage_alloc(prog, stype); @@ -38,10 +104,8 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, if (!repeat) repeat = 1; - rcu_read_lock(); - migrate_disable(); - time_start = ktime_get_ns(); - for (i = 0; i < repeat; i++) { + bpf_test_timer_enter(&t); + do { ret = bpf_cgroup_storage_set(storage); if (ret) break; @@ -53,29 +117,8 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, bpf_cgroup_storage_unset(); - if (signal_pending(current)) { - ret = -EINTR; - break; - } - - if (need_resched()) { - time_spent += ktime_get_ns() - time_start; - migrate_enable(); - rcu_read_unlock(); - - cond_resched(); - - rcu_read_lock(); - migrate_disable(); - time_start = ktime_get_ns(); - } - } - time_spent += ktime_get_ns() - time_start; - migrate_enable(); - rcu_read_unlock(); - - do_div(time_spent, repeat); - *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; + } while (bpf_test_timer_continue(&t, repeat, &ret, time)); + bpf_test_timer_leave(&t); for_each_cgroup_storage_type(stype) bpf_cgroup_storage_free(storage[stype]); @@ -188,6 +231,7 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 size, if (user_size > size) return ERR_PTR(-EMSGSIZE); + size = SKB_DATA_ALIGN(size); data = kzalloc(size + headroom + tailroom, GFP_USER); if (!data) return ERR_PTR(-ENOMEM); @@ -398,6 +442,9 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) { struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; + if (!skb->len) + return -EINVAL; + if (!__skb) return 0; @@ -688,18 +735,17 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr) { + struct bpf_test_timer t = { NO_PREEMPT }; u32 size = kattr->test.data_size_in; struct bpf_flow_dissector ctx = {}; u32 repeat = kattr->test.repeat; struct bpf_flow_keys *user_ctx; struct bpf_flow_keys flow_keys; - u64 time_start, time_spent = 0; const struct ethhdr *eth; unsigned int flags = 0; u32 retval, duration; void *data; int ret; - u32 i; if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR) return -EINVAL; @@ -735,39 +781,15 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, ctx.data = data; ctx.data_end = (__u8 *)data + size; - rcu_read_lock(); - preempt_disable(); - time_start = ktime_get_ns(); - for (i = 0; i < repeat; i++) { + bpf_test_timer_enter(&t); + do { retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN, size, flags); + } while (bpf_test_timer_continue(&t, repeat, &ret, &duration)); + bpf_test_timer_leave(&t); - if (signal_pending(current)) { - preempt_enable(); - rcu_read_unlock(); - - ret = -EINTR; - goto out; - } - - if (need_resched()) { - time_spent += ktime_get_ns() - time_start; - preempt_enable(); - rcu_read_unlock(); - - cond_resched(); - - rcu_read_lock(); - preempt_disable(); - time_start = ktime_get_ns(); - } - } - time_spent += ktime_get_ns() - time_start; - preempt_enable(); - rcu_read_unlock(); - - do_div(time_spent, repeat); - duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; + if (ret < 0) + goto out; ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys), retval, duration); @@ -780,3 +802,106 @@ out: kfree(data); return ret; } + +int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr __user *uattr) +{ + struct bpf_test_timer t = { NO_PREEMPT }; + struct bpf_prog_array *progs = NULL; + struct bpf_sk_lookup_kern ctx = {}; + u32 repeat = kattr->test.repeat; + struct bpf_sk_lookup *user_ctx; + u32 retval, duration; + int ret = -EINVAL; + + if (prog->type != BPF_PROG_TYPE_SK_LOOKUP) + return -EINVAL; + + if (kattr->test.flags || kattr->test.cpu) + return -EINVAL; + + if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out || + kattr->test.data_size_out) + return -EINVAL; + + if (!repeat) + repeat = 1; + + user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx)); + if (IS_ERR(user_ctx)) + return PTR_ERR(user_ctx); + + if (!user_ctx) + return -EINVAL; + + if (user_ctx->sk) + goto out; + + if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx))) + goto out; + + if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) { + ret = -ERANGE; + goto out; + } + + ctx.family = (u16)user_ctx->family; + ctx.protocol = (u16)user_ctx->protocol; + ctx.dport = (u16)user_ctx->local_port; + ctx.sport = (__force __be16)user_ctx->remote_port; + + switch (ctx.family) { + case AF_INET: + ctx.v4.daddr = (__force __be32)user_ctx->local_ip4; + ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4; + break; + +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6; + ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6; + break; +#endif + + default: + ret = -EAFNOSUPPORT; + goto out; + } + + progs = bpf_prog_array_alloc(1, GFP_KERNEL); + if (!progs) { + ret = -ENOMEM; + goto out; + } + + progs->items[0].prog = prog; + + bpf_test_timer_enter(&t); + do { + ctx.selected_sk = NULL; + retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, BPF_PROG_RUN); + } while (bpf_test_timer_continue(&t, repeat, &ret, &duration)); + bpf_test_timer_leave(&t); + + if (ret < 0) + goto out; + + user_ctx->cookie = 0; + if (ctx.selected_sk) { + if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) { + ret = -EOPNOTSUPP; + goto out; + } + + user_ctx->cookie = sock_gen_cookie(ctx.selected_sk); + } + + ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration); + if (!ret) + ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx)); + +out: + bpf_prog_array_free(progs); + kfree(user_ctx); + return ret; +} diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 59a318b9f646..bf5bf148091f 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -43,6 +43,13 @@ static int br_pass_frame_up(struct sk_buff *skb) u64_stats_update_end(&brstats->syncp); vg = br_vlan_group_rcu(br); + + /* Reset the offload_fwd_mark because there could be a stacked + * bridge above, and it should not think this bridge it doing + * that bridge's work forwarding out its ports. + */ + br_switchdev_frame_unmark(skb); + /* Bridge is just like any other port. Make sure the * packet is allowed except in promisc modue when someone * may be running packet capture. diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 68c0d0f92890..a718204c4bfd 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -384,6 +384,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_ /* - Bridged-and-DNAT'ed traffic doesn't * require ip_forwarding. */ if (rt->dst.dev == dev) { + skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); goto bridged_dnat; } @@ -413,6 +414,7 @@ bridged_dnat: kfree_skb(skb); return 0; } + skb_dst_drop(skb); skb_dst_set_noref(skb, &rt->dst); } @@ -1012,9 +1014,24 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net, return okfn(net, sk, skb); ops = nf_hook_entries_get_hook_ops(e); - for (i = 0; i < e->num_hook_entries && - ops[i]->priority <= NF_BR_PRI_BRNF; i++) - ; + for (i = 0; i < e->num_hook_entries; i++) { + /* These hooks have already been called */ + if (ops[i]->priority < NF_BR_PRI_BRNF) + continue; + + /* These hooks have not been called yet, run them. */ + if (ops[i]->priority > NF_BR_PRI_BRNF) + break; + + /* take a closer look at NF_BR_PRI_BRNF. */ + if (ops[i]->hook == br_nf_pre_routing) { + /* This hook diverted the skb to this function, + * hooks after this have not been run yet. + */ + i++; + break; + } + } nf_hook_state_init(&state, hook, NFPROTO_BRIDGE, indev, outdev, sk, net, okfn); diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c index e4e0c836c3f5..6b07f30675bb 100644 --- a/net/bridge/br_netfilter_ipv6.c +++ b/net/bridge/br_netfilter_ipv6.c @@ -197,6 +197,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc kfree_skb(skb); return 0; } + skb_dst_drop(skb); skb_dst_set_noref(skb, &rt->dst); } diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 852f4b54e881..1dc5db07650c 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -855,26 +855,37 @@ EXPORT_SYMBOL_GPL(br_vlan_get_proto); int __br_vlan_set_proto(struct net_bridge *br, __be16 proto) { + struct switchdev_attr attr = { + .orig_dev = br->dev, + .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL, + .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP, + .u.vlan_protocol = ntohs(proto), + }; int err = 0; struct net_bridge_port *p; struct net_bridge_vlan *vlan; struct net_bridge_vlan_group *vg; - __be16 oldproto; + __be16 oldproto = br->vlan_proto; if (br->vlan_proto == proto) return 0; + err = switchdev_port_attr_set(br->dev, &attr); + if (err && err != -EOPNOTSUPP) + return err; + /* Add VLANs for the new proto to the device filter. */ list_for_each_entry(p, &br->port_list, list) { vg = nbp_vlan_group(p); list_for_each_entry(vlan, &vg->vlan_list, vlist) { + if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV) + continue; err = vlan_vid_add(p->dev, proto, vlan->vid); if (err) goto err_filt; } } - oldproto = br->vlan_proto; br->vlan_proto = proto; recalculate_group_addr(br); @@ -883,20 +894,32 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto) /* Delete VLANs for the old proto from the device filter. */ list_for_each_entry(p, &br->port_list, list) { vg = nbp_vlan_group(p); - list_for_each_entry(vlan, &vg->vlan_list, vlist) + list_for_each_entry(vlan, &vg->vlan_list, vlist) { + if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV) + continue; vlan_vid_del(p->dev, oldproto, vlan->vid); + } } return 0; err_filt: - list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist) + attr.u.vlan_protocol = ntohs(oldproto); + switchdev_port_attr_set(br->dev, &attr); + + list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist) { + if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV) + continue; vlan_vid_del(p->dev, proto, vlan->vid); + } list_for_each_entry_continue_reverse(p, &br->port_list, list) { vg = nbp_vlan_group(p); - list_for_each_entry(vlan, &vg->vlan_list, vlist) + list_for_each_entry(vlan, &vg->vlan_list, vlist) { + if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV) + continue; vlan_vid_del(p->dev, proto, vlan->vid); + } } return err; diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c index 32bc2821027f..57f91efce0f7 100644 --- a/net/bridge/netfilter/ebtable_broute.c +++ b/net/bridge/netfilter/ebtable_broute.c @@ -36,18 +36,10 @@ static struct ebt_replace_kernel initial_table = { .entries = (char *)&initial_chain, }; -static int check(const struct ebt_table_info *info, unsigned int valid_hooks) -{ - if (valid_hooks & ~(1 << NF_BR_BROUTING)) - return -EINVAL; - return 0; -} - static const struct ebt_table broute_table = { .name = "broute", .table = &initial_table, .valid_hooks = 1 << NF_BR_BROUTING, - .check = check, .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c index bcf982e12f16..7f2e620f4978 100644 --- a/net/bridge/netfilter/ebtable_filter.c +++ b/net/bridge/netfilter/ebtable_filter.c @@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = { .entries = (char *)initial_chains, }; -static int check(const struct ebt_table_info *info, unsigned int valid_hooks) -{ - if (valid_hooks & ~FILTER_VALID_HOOKS) - return -EINVAL; - return 0; -} - static const struct ebt_table frame_filter = { .name = "filter", .table = &initial_table, .valid_hooks = FILTER_VALID_HOOKS, - .check = check, .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c index 0d092773f816..1743a105485c 100644 --- a/net/bridge/netfilter/ebtable_nat.c +++ b/net/bridge/netfilter/ebtable_nat.c @@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = { .entries = (char *)initial_chains, }; -static int check(const struct ebt_table_info *info, unsigned int valid_hooks) -{ - if (valid_hooks & ~NAT_VALID_HOOKS) - return -EINVAL; - return 0; -} - static const struct ebt_table frame_nat = { .name = "nat", .table = &initial_table, .valid_hooks = NAT_VALID_HOOKS, - .check = check, .me = THIS_MODULE, }; diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index d481ff24a150..06b80b584381 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -999,9 +999,10 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, goto free_iterate; } - /* the table doesn't like it */ - if (t->check && (ret = t->check(newinfo, repl->valid_hooks))) + if (repl->valid_hooks != t->valid_hooks) { + ret = -EINVAL; goto free_unlock; + } if (repl->num_counters && repl->num_counters != t->private->nentries) { ret = -EINVAL; @@ -1186,11 +1187,6 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table, if (ret != 0) goto free_chainstack; - if (table->check && table->check(newinfo, table->valid_hooks)) { - ret = -EINVAL; - goto free_chainstack; - } - table->private = newinfo; rwlock_init(&table->lock); mutex_lock(&ebt_mutex); diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c index 8e8ffac037cd..97805ec424c1 100644 --- a/net/bridge/netfilter/nft_meta_bridge.c +++ b/net/bridge/netfilter/nft_meta_bridge.c @@ -87,9 +87,8 @@ static int nft_meta_bridge_get_init(const struct nft_ctx *ctx, return nft_meta_get_init(ctx, expr, tb); } - priv->dreg = nft_parse_register(tb[NFTA_META_DREG]); - return nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, len); + return nft_parse_register_store(ctx, tb[NFTA_META_DREG], &priv->dreg, + NULL, NFT_DATA_VALUE, len); } static struct nft_expr_type nft_meta_bridge_type; diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 42dc080a4dbb..806fb4d84fd3 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -315,9 +315,6 @@ static int chnl_net_open(struct net_device *dev) if (result == 0) { pr_debug("connect timeout\n"); - caif_disconnect_client(dev_net(dev), &priv->chnl); - priv->state = CAIF_DISCONNECTED; - pr_debug("state disconnected\n"); result = -ETIMEDOUT; goto error; } diff --git a/net/can/af_can.c b/net/can/af_can.c index 1c95ede2c9a6..79f24c6f43c8 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -451,7 +451,7 @@ int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id, /* insert new receiver (dev,canid,mask) -> (func,data) */ - if (dev && dev->type != ARPHRD_CAN) + if (dev && (dev->type != ARPHRD_CAN || !can_get_ml_priv(dev))) return -ENODEV; if (dev && !net_eq(net, dev_net(dev))) @@ -680,7 +680,7 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev, { struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU)) { + if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || skb->len != CAN_MTU)) { pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n", dev->type, skb->len); goto free_skb; @@ -706,7 +706,7 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, { struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU)) { + if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || skb->len != CANFD_MTU)) { pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n", dev->type, skb->len); goto free_skb; diff --git a/net/can/bcm.c b/net/can/bcm.c index 0928a39c4423..afa82adaf6cd 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -100,6 +100,7 @@ static inline u64 get_u64(const struct canfd_frame *cp, int offset) struct bcm_op { struct list_head list; + struct rcu_head rcu; int ifindex; canid_t can_id; u32 flags; @@ -273,6 +274,7 @@ static void bcm_can_tx(struct bcm_op *op) struct sk_buff *skb; struct net_device *dev; struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe; + int err; /* no target device? => exit */ if (!op->ifindex) @@ -297,11 +299,11 @@ static void bcm_can_tx(struct bcm_op *op) /* send with loopback */ skb->dev = dev; can_skb_set_owner(skb, op->sk); - can_send(skb, 1); + err = can_send(skb, 1); + if (!err) + op->frames_abs++; - /* update statistics */ op->currframe++; - op->frames_abs++; /* reached last frame? */ if (op->currframe >= op->nframes) @@ -718,10 +720,9 @@ static struct bcm_op *bcm_find_op(struct list_head *ops, return NULL; } -static void bcm_remove_op(struct bcm_op *op) +static void bcm_free_op_rcu(struct rcu_head *rcu_head) { - hrtimer_cancel(&op->timer); - hrtimer_cancel(&op->thrtimer); + struct bcm_op *op = container_of(rcu_head, struct bcm_op, rcu); if ((op->frames) && (op->frames != &op->sframe)) kfree(op->frames); @@ -732,6 +733,14 @@ static void bcm_remove_op(struct bcm_op *op) kfree(op); } +static void bcm_remove_op(struct bcm_op *op) +{ + hrtimer_cancel(&op->timer); + hrtimer_cancel(&op->thrtimer); + + call_rcu(&op->rcu, bcm_free_op_rcu); +} + static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) { if (op->rx_reg_dev == dev) { @@ -757,6 +766,9 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh, if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { + /* disable automatic timer on frame reception */ + op->flags |= RX_NO_AUTOTIMER; + /* * Don't care if we're bound or not (due to netdev * problems) can_rx_unregister() is always a save @@ -785,7 +797,6 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh, bcm_rx_handler, op); list_del(&op->list); - synchronize_rcu(); bcm_remove_op(op); return 1; /* done */ } diff --git a/net/can/isotp.c b/net/can/isotp.c index 63e6e8923200..f2f0bc7f0cb4 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -141,6 +141,7 @@ struct isotp_sock { struct can_isotp_options opt; struct can_isotp_fc_options rxfc, txfc; struct can_isotp_ll_options ll; + u32 frame_txtime; u32 force_tx_stmin; u32 force_rx_stmin; struct tpcon rx, tx; @@ -360,7 +361,7 @@ static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae) so->tx_gap = ktime_set(0, 0); /* add transmission time for CAN frame N_As */ - so->tx_gap = ktime_add_ns(so->tx_gap, so->opt.frame_txtime); + so->tx_gap = ktime_add_ns(so->tx_gap, so->frame_txtime); /* add waiting time for consecutive frames N_Cs */ if (so->opt.flags & CAN_ISOTP_FORCE_TXSTMIN) so->tx_gap = ktime_add_ns(so->tx_gap, @@ -863,6 +864,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) struct canfd_frame *cf; int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0; int wait_tx_done = (so->opt.flags & CAN_ISOTP_WAIT_TX_DONE) ? 1 : 0; + s64 hrtimer_sec = 0; int off; int err; @@ -961,7 +963,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) isotp_create_fframe(cf, so, ae); /* start timeout for FC */ - hrtimer_start(&so->txtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT); + hrtimer_sec = 1; + hrtimer_start(&so->txtimer, ktime_set(hrtimer_sec, 0), + HRTIMER_MODE_REL_SOFT); } /* send the first or only CAN frame */ @@ -974,6 +978,11 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) if (err) { pr_notice_once("can-isotp: %s: can_send_ret %d\n", __func__, err); + + /* no transmission -> no timeout monitoring */ + if (hrtimer_sec) + hrtimer_cancel(&so->txtimer); + goto err_out_drop; } @@ -1135,6 +1144,11 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) lock_sock(sk); + if (so->bound) { + err = -EINVAL; + goto out; + } + /* do not register frame reception for functional addressing */ if (so->opt.flags & CAN_ISOTP_SF_BROADCAST) do_rx_reg = 0; @@ -1145,10 +1159,6 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) goto out; } - if (so->bound && addr->can_ifindex == so->ifindex && - rx_id == so->rxid && tx_id == so->txid) - goto out; - dev = dev_get_by_index(net, addr->can_ifindex); if (!dev) { err = -ENODEV; @@ -1175,19 +1185,6 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) dev_put(dev); - if (so->bound && do_rx_reg) { - /* unregister old filter */ - if (so->ifindex) { - dev = dev_get_by_index(net, so->ifindex); - if (dev) { - can_rx_unregister(net, dev, so->rxid, - SINGLE_MASK(so->rxid), - isotp_rcv, sk); - dev_put(dev); - } - } - } - /* switch to new settings */ so->ifindex = ifindex; so->rxid = rx_id; @@ -1245,6 +1242,14 @@ static int isotp_setsockopt_locked(struct socket *sock, int level, int optname, /* no separate rx_ext_address is given => use ext_address */ if (!(so->opt.flags & CAN_ISOTP_RX_EXT_ADDR)) so->opt.rx_ext_address = so->opt.ext_address; + + /* check for frame_txtime changes (0 => no changes) */ + if (so->opt.frame_txtime) { + if (so->opt.frame_txtime == CAN_ISOTP_FRAME_TXTIME_ZERO) + so->frame_txtime = 0; + else + so->frame_txtime = so->opt.frame_txtime; + } break; case CAN_ISOTP_RECV_FC: @@ -1446,6 +1451,7 @@ static int isotp_init(struct sock *sk) so->opt.rxpad_content = CAN_ISOTP_DEFAULT_PAD_CONTENT; so->opt.txpad_content = CAN_ISOTP_DEFAULT_PAD_CONTENT; so->opt.frame_txtime = CAN_ISOTP_DEFAULT_FRAME_TXTIME; + so->frame_txtime = CAN_ISOTP_DEFAULT_FRAME_TXTIME; so->rxfc.bs = CAN_ISOTP_DEFAULT_RECV_BS; so->rxfc.stmin = CAN_ISOTP_DEFAULT_RECV_STMIN; so->rxfc.wftmax = CAN_ISOTP_DEFAULT_RECV_WFTMAX; diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c index ca75d1b8f415..9da8fbc81c04 100644 --- a/net/can/j1939/main.c +++ b/net/can/j1939/main.c @@ -332,6 +332,9 @@ int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb) /* re-claim the CAN_HDR from the SKB */ cf = skb_push(skb, J1939_CAN_HDR); + /* initialize header structure */ + memset(cf, 0, J1939_CAN_HDR); + /* make it a full can frame again */ skb_put(skb, J1939_CAN_FTR + (8 - dlc)); diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index e1a399821238..709141abd131 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -178,7 +178,10 @@ activate_next: if (!first) return; - if (WARN_ON_ONCE(j1939_session_activate(first))) { + if (j1939_session_activate(first)) { + netdev_warn_once(first->priv->ndev, + "%s: 0x%p: Identical session is already activated.\n", + __func__, first); first->err = -EBUSY; goto activate_next; } else { diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c index 9c39b0f5d6e0..78f6a9110699 100644 --- a/net/can/j1939/transport.c +++ b/net/can/j1939/transport.c @@ -260,6 +260,8 @@ static void __j1939_session_drop(struct j1939_session *session) static void j1939_session_destroy(struct j1939_session *session) { + struct sk_buff *skb; + if (session->err) j1939_sk_errqueue(session, J1939_ERRQUEUE_ABORT); else @@ -270,7 +272,11 @@ static void j1939_session_destroy(struct j1939_session *session) WARN_ON_ONCE(!list_empty(&session->sk_session_queue_entry)); WARN_ON_ONCE(!list_empty(&session->active_session_list_entry)); - skb_queue_purge(&session->skb_queue); + while ((skb = skb_dequeue(&session->skb_queue)) != NULL) { + /* drop ref taken in j1939_session_skb_queue() */ + skb_unref(skb); + kfree_skb(skb); + } __j1939_session_drop(session); j1939_priv_put(session->priv); kfree(session); @@ -332,10 +338,12 @@ static void j1939_session_skb_drop_old(struct j1939_session *session) __skb_unlink(do_skb, &session->skb_queue); /* drop ref taken in j1939_session_skb_queue() */ skb_unref(do_skb); + spin_unlock_irqrestore(&session->skb_queue.lock, flags); kfree_skb(do_skb); + } else { + spin_unlock_irqrestore(&session->skb_queue.lock, flags); } - spin_unlock_irqrestore(&session->skb_queue.lock, flags); } void j1939_session_skb_queue(struct j1939_session *session, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 7901ab6c79fd..1e9fab79e245 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -537,43 +537,6 @@ static void request_init(struct ceph_osd_request *req) target_init(&req->r_t); } -/* - * This is ugly, but it allows us to reuse linger registration and ping - * requests, keeping the structure of the code around send_linger{_ping}() - * reasonable. Setting up a min_nr=2 mempool for each linger request - * and dealing with copying ops (this blasts req only, watch op remains - * intact) isn't any better. - */ -static void request_reinit(struct ceph_osd_request *req) -{ - struct ceph_osd_client *osdc = req->r_osdc; - bool mempool = req->r_mempool; - unsigned int num_ops = req->r_num_ops; - u64 snapid = req->r_snapid; - struct ceph_snap_context *snapc = req->r_snapc; - bool linger = req->r_linger; - struct ceph_msg *request_msg = req->r_request; - struct ceph_msg *reply_msg = req->r_reply; - - dout("%s req %p\n", __func__, req); - WARN_ON(kref_read(&req->r_kref) != 1); - request_release_checks(req); - - WARN_ON(kref_read(&request_msg->kref) != 1); - WARN_ON(kref_read(&reply_msg->kref) != 1); - target_destroy(&req->r_t); - - request_init(req); - req->r_osdc = osdc; - req->r_mempool = mempool; - req->r_num_ops = num_ops; - req->r_snapid = snapid; - req->r_snapc = snapc; - req->r_linger = linger; - req->r_request = request_msg; - req->r_reply = reply_msg; -} - struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, struct ceph_snap_context *snapc, unsigned int num_ops, @@ -918,14 +881,30 @@ EXPORT_SYMBOL(osd_req_op_xattr_init); * @watch_opcode: CEPH_OSD_WATCH_OP_* */ static void osd_req_op_watch_init(struct ceph_osd_request *req, int which, - u64 cookie, u8 watch_opcode) + u8 watch_opcode, u64 cookie, u32 gen) { struct ceph_osd_req_op *op; op = osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0); op->watch.cookie = cookie; op->watch.op = watch_opcode; - op->watch.gen = 0; + op->watch.gen = gen; +} + +/* + * prot_ver, timeout and notify payload (may be empty) should already be + * encoded in @request_pl + */ +static void osd_req_op_notify_init(struct ceph_osd_request *req, int which, + u64 cookie, struct ceph_pagelist *request_pl) +{ + struct ceph_osd_req_op *op; + + op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0); + op->notify.cookie = cookie; + + ceph_osd_data_pagelist_init(&op->notify.request_data, request_pl); + op->indata_len = request_pl->length; } /* @@ -2727,10 +2706,13 @@ static void linger_release(struct kref *kref) WARN_ON(!list_empty(&lreq->pending_lworks)); WARN_ON(lreq->osd); - if (lreq->reg_req) - ceph_osdc_put_request(lreq->reg_req); - if (lreq->ping_req) - ceph_osdc_put_request(lreq->ping_req); + if (lreq->request_pl) + ceph_pagelist_release(lreq->request_pl); + if (lreq->notify_id_pages) + ceph_release_page_vector(lreq->notify_id_pages, 1); + + ceph_osdc_put_request(lreq->reg_req); + ceph_osdc_put_request(lreq->ping_req); target_destroy(&lreq->t); kfree(lreq); } @@ -2999,6 +2981,12 @@ static void linger_commit_cb(struct ceph_osd_request *req) struct ceph_osd_linger_request *lreq = req->r_priv; mutex_lock(&lreq->lock); + if (req != lreq->reg_req) { + dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n", + __func__, lreq, lreq->linger_id, req, lreq->reg_req); + goto out; + } + dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq, lreq->linger_id, req->r_result); linger_reg_commit_complete(lreq, req->r_result); @@ -3022,6 +3010,7 @@ static void linger_commit_cb(struct ceph_osd_request *req) } } +out: mutex_unlock(&lreq->lock); linger_put(lreq); } @@ -3044,6 +3033,12 @@ static void linger_reconnect_cb(struct ceph_osd_request *req) struct ceph_osd_linger_request *lreq = req->r_priv; mutex_lock(&lreq->lock); + if (req != lreq->reg_req) { + dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n", + __func__, lreq, lreq->linger_id, req, lreq->reg_req); + goto out; + } + dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__, lreq, lreq->linger_id, req->r_result, lreq->last_error); if (req->r_result < 0) { @@ -3053,46 +3048,64 @@ static void linger_reconnect_cb(struct ceph_osd_request *req) } } +out: mutex_unlock(&lreq->lock); linger_put(lreq); } static void send_linger(struct ceph_osd_linger_request *lreq) { - struct ceph_osd_request *req = lreq->reg_req; - struct ceph_osd_req_op *op = &req->r_ops[0]; + struct ceph_osd_client *osdc = lreq->osdc; + struct ceph_osd_request *req; + int ret; - verify_osdc_wrlocked(req->r_osdc); + verify_osdc_wrlocked(osdc); + mutex_lock(&lreq->lock); dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); - if (req->r_osd) - cancel_linger_request(req); + if (lreq->reg_req) { + if (lreq->reg_req->r_osd) + cancel_linger_request(lreq->reg_req); + ceph_osdc_put_request(lreq->reg_req); + } + + req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO); + BUG_ON(!req); - request_reinit(req); target_copy(&req->r_t, &lreq->t); req->r_mtime = lreq->mtime; - mutex_lock(&lreq->lock); if (lreq->is_watch && lreq->committed) { - WARN_ON(op->op != CEPH_OSD_OP_WATCH || - op->watch.cookie != lreq->linger_id); - op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT; - op->watch.gen = ++lreq->register_gen; + osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_RECONNECT, + lreq->linger_id, ++lreq->register_gen); dout("lreq %p reconnect register_gen %u\n", lreq, - op->watch.gen); + req->r_ops[0].watch.gen); req->r_callback = linger_reconnect_cb; } else { - if (!lreq->is_watch) + if (lreq->is_watch) { + osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_WATCH, + lreq->linger_id, 0); + } else { lreq->notify_id = 0; - else - WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH); + + refcount_inc(&lreq->request_pl->refcnt); + osd_req_op_notify_init(req, 0, lreq->linger_id, + lreq->request_pl); + ceph_osd_data_pages_init( + osd_req_op_data(req, 0, notify, response_data), + lreq->notify_id_pages, PAGE_SIZE, 0, false, false); + } dout("lreq %p register\n", lreq); req->r_callback = linger_commit_cb; } - mutex_unlock(&lreq->lock); + + ret = ceph_osdc_alloc_messages(req, GFP_NOIO); + BUG_ON(ret); req->r_priv = linger_get(lreq); req->r_linger = true; + lreq->reg_req = req; + mutex_unlock(&lreq->lock); submit_request(req, true); } @@ -3102,6 +3115,12 @@ static void linger_ping_cb(struct ceph_osd_request *req) struct ceph_osd_linger_request *lreq = req->r_priv; mutex_lock(&lreq->lock); + if (req != lreq->ping_req) { + dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n", + __func__, lreq, lreq->linger_id, req, lreq->ping_req); + goto out; + } + dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n", __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent, lreq->last_error); @@ -3117,6 +3136,7 @@ static void linger_ping_cb(struct ceph_osd_request *req) lreq->register_gen, req->r_ops[0].watch.gen); } +out: mutex_unlock(&lreq->lock); linger_put(lreq); } @@ -3124,8 +3144,8 @@ static void linger_ping_cb(struct ceph_osd_request *req) static void send_linger_ping(struct ceph_osd_linger_request *lreq) { struct ceph_osd_client *osdc = lreq->osdc; - struct ceph_osd_request *req = lreq->ping_req; - struct ceph_osd_req_op *op = &req->r_ops[0]; + struct ceph_osd_request *req; + int ret; if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { dout("%s PAUSERD\n", __func__); @@ -3137,19 +3157,26 @@ static void send_linger_ping(struct ceph_osd_linger_request *lreq) __func__, lreq, lreq->linger_id, lreq->ping_sent, lreq->register_gen); - if (req->r_osd) - cancel_linger_request(req); + if (lreq->ping_req) { + if (lreq->ping_req->r_osd) + cancel_linger_request(lreq->ping_req); + ceph_osdc_put_request(lreq->ping_req); + } + + req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO); + BUG_ON(!req); - request_reinit(req); target_copy(&req->r_t, &lreq->t); - - WARN_ON(op->op != CEPH_OSD_OP_WATCH || - op->watch.cookie != lreq->linger_id || - op->watch.op != CEPH_OSD_WATCH_OP_PING); - op->watch.gen = lreq->register_gen; + osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_PING, lreq->linger_id, + lreq->register_gen); req->r_callback = linger_ping_cb; + + ret = ceph_osdc_alloc_messages(req, GFP_NOIO); + BUG_ON(ret); + req->r_priv = linger_get(lreq); req->r_linger = true; + lreq->ping_req = req; ceph_osdc_get_request(req); account_request(req); @@ -3165,12 +3192,6 @@ static void linger_submit(struct ceph_osd_linger_request *lreq) down_write(&osdc->lock); linger_register(lreq); - if (lreq->is_watch) { - lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id; - lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id; - } else { - lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id; - } calc_target(osdc, &lreq->t, false); osd = lookup_create_osd(osdc, lreq->t.osd, true); @@ -3202,9 +3223,9 @@ static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq) */ static void __linger_cancel(struct ceph_osd_linger_request *lreq) { - if (lreq->is_watch && lreq->ping_req->r_osd) + if (lreq->ping_req && lreq->ping_req->r_osd) cancel_linger_request(lreq->ping_req); - if (lreq->reg_req->r_osd) + if (lreq->reg_req && lreq->reg_req->r_osd) cancel_linger_request(lreq->reg_req); cancel_linger_map_check(lreq); unlink_linger(lreq->osd, lreq); @@ -4651,43 +4672,6 @@ again: } EXPORT_SYMBOL(ceph_osdc_sync); -static struct ceph_osd_request * -alloc_linger_request(struct ceph_osd_linger_request *lreq) -{ - struct ceph_osd_request *req; - - req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO); - if (!req) - return NULL; - - ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid); - ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); - return req; -} - -static struct ceph_osd_request * -alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode) -{ - struct ceph_osd_request *req; - - req = alloc_linger_request(lreq); - if (!req) - return NULL; - - /* - * Pass 0 for cookie because we don't know it yet, it will be - * filled in by linger_submit(). - */ - osd_req_op_watch_init(req, 0, 0, watch_opcode); - - if (ceph_osdc_alloc_messages(req, GFP_NOIO)) { - ceph_osdc_put_request(req); - return NULL; - } - - return req; -} - /* * Returns a handle, caller owns a ref. */ @@ -4717,18 +4701,6 @@ ceph_osdc_watch(struct ceph_osd_client *osdc, lreq->t.flags = CEPH_OSD_FLAG_WRITE; ktime_get_real_ts64(&lreq->mtime); - lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH); - if (!lreq->reg_req) { - ret = -ENOMEM; - goto err_put_lreq; - } - - lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING); - if (!lreq->ping_req) { - ret = -ENOMEM; - goto err_put_lreq; - } - linger_submit(lreq); ret = linger_reg_commit_wait(lreq); if (ret) { @@ -4766,8 +4738,8 @@ int ceph_osdc_unwatch(struct ceph_osd_client *osdc, ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); req->r_flags = CEPH_OSD_FLAG_WRITE; ktime_get_real_ts64(&req->r_mtime); - osd_req_op_watch_init(req, 0, lreq->linger_id, - CEPH_OSD_WATCH_OP_UNWATCH); + osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_UNWATCH, + lreq->linger_id, 0); ret = ceph_osdc_alloc_messages(req, GFP_NOIO); if (ret) @@ -4853,35 +4825,6 @@ out_put_req: } EXPORT_SYMBOL(ceph_osdc_notify_ack); -static int osd_req_op_notify_init(struct ceph_osd_request *req, int which, - u64 cookie, u32 prot_ver, u32 timeout, - void *payload, u32 payload_len) -{ - struct ceph_osd_req_op *op; - struct ceph_pagelist *pl; - int ret; - - op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0); - op->notify.cookie = cookie; - - pl = ceph_pagelist_alloc(GFP_NOIO); - if (!pl) - return -ENOMEM; - - ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */ - ret |= ceph_pagelist_encode_32(pl, timeout); - ret |= ceph_pagelist_encode_32(pl, payload_len); - ret |= ceph_pagelist_append(pl, payload, payload_len); - if (ret) { - ceph_pagelist_release(pl); - return -ENOMEM; - } - - ceph_osd_data_pagelist_init(&op->notify.request_data, pl); - op->indata_len = pl->length; - return 0; -} - /* * @timeout: in seconds * @@ -4900,7 +4843,6 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc, size_t *preply_len) { struct ceph_osd_linger_request *lreq; - struct page **pages; int ret; WARN_ON(!timeout); @@ -4913,6 +4855,29 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc, if (!lreq) return -ENOMEM; + lreq->request_pl = ceph_pagelist_alloc(GFP_NOIO); + if (!lreq->request_pl) { + ret = -ENOMEM; + goto out_put_lreq; + } + + ret = ceph_pagelist_encode_32(lreq->request_pl, 1); /* prot_ver */ + ret |= ceph_pagelist_encode_32(lreq->request_pl, timeout); + ret |= ceph_pagelist_encode_32(lreq->request_pl, payload_len); + ret |= ceph_pagelist_append(lreq->request_pl, payload, payload_len); + if (ret) { + ret = -ENOMEM; + goto out_put_lreq; + } + + /* for notify_id */ + lreq->notify_id_pages = ceph_alloc_page_vector(1, GFP_NOIO); + if (IS_ERR(lreq->notify_id_pages)) { + ret = PTR_ERR(lreq->notify_id_pages); + lreq->notify_id_pages = NULL; + goto out_put_lreq; + } + lreq->preply_pages = preply_pages; lreq->preply_len = preply_len; @@ -4920,35 +4885,6 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc, ceph_oloc_copy(&lreq->t.base_oloc, oloc); lreq->t.flags = CEPH_OSD_FLAG_READ; - lreq->reg_req = alloc_linger_request(lreq); - if (!lreq->reg_req) { - ret = -ENOMEM; - goto out_put_lreq; - } - - /* - * Pass 0 for cookie because we don't know it yet, it will be - * filled in by linger_submit(). - */ - ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout, - payload, payload_len); - if (ret) - goto out_put_lreq; - - /* for notify_id */ - pages = ceph_alloc_page_vector(1, GFP_NOIO); - if (IS_ERR(pages)) { - ret = PTR_ERR(pages); - goto out_put_lreq; - } - ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify, - response_data), - pages, PAGE_SIZE, 0, false, true); - - ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO); - if (ret) - goto out_put_lreq; - linger_submit(lreq); ret = linger_reg_commit_wait(lreq); if (!ret) diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index c907f0dc7f87..d67d06d6b817 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -15,18 +15,6 @@ DEFINE_BPF_STORAGE_CACHE(sk_cache); -static int omem_charge(struct sock *sk, unsigned int size) -{ - /* same check as in sock_kmalloc() */ - if (size <= sysctl_optmem_max && - atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { - atomic_add(size, &sk->sk_omem_alloc); - return 0; - } - - return -ENOMEM; -} - static struct bpf_local_storage_data * sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit) { @@ -316,7 +304,17 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk) static int sk_storage_charge(struct bpf_local_storage_map *smap, void *owner, u32 size) { - return omem_charge(owner, size); + int optmem_max = READ_ONCE(sysctl_optmem_max); + struct sock *sk = (struct sock *)owner; + + /* same check as in sock_kmalloc() */ + if (size <= optmem_max && + atomic_read(&sk->sk_omem_alloc) + size < optmem_max) { + atomic_add(size, &sk->sk_omem_alloc); + return 0; + } + + return -ENOMEM; } static void sk_storage_uncharge(struct bpf_local_storage_map *smap, @@ -794,10 +792,18 @@ static int bpf_iter_init_sk_storage_map(void *priv_data, { struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data; + bpf_map_inc_with_uref(aux->map); seq_info->map = aux->map; return 0; } +static void bpf_iter_fini_sk_storage_map(void *priv_data) +{ + struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data; + + bpf_map_put_with_uref(seq_info->map); +} + static int bpf_iter_attach_map(struct bpf_prog *prog, union bpf_iter_link_info *linfo, struct bpf_iter_aux_info *aux) @@ -815,7 +821,7 @@ static int bpf_iter_attach_map(struct bpf_prog *prog, if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) goto put_map; - if (prog->aux->max_rdonly_access > map->value_size) { + if (prog->aux->max_rdwr_access > map->value_size) { err = -EACCES; goto put_map; } @@ -843,7 +849,7 @@ static const struct seq_operations bpf_sk_storage_map_seq_ops = { static const struct bpf_iter_seq_info iter_seq_info = { .seq_ops = &bpf_sk_storage_map_seq_ops, .init_seq_private = bpf_iter_init_sk_storage_map, - .fini_seq_private = NULL, + .fini_seq_private = bpf_iter_fini_sk_storage_map, .seq_priv_size = sizeof(struct bpf_iter_seq_sk_storage_map_info), }; diff --git a/net/core/dev.c b/net/core/dev.c index a6342c3b8afb..bc5dcf57ebe6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3246,11 +3246,15 @@ int skb_checksum_help(struct sk_buff *skb) } offset = skb_checksum_start_offset(skb); - BUG_ON(offset >= skb_headlen(skb)); + ret = -EINVAL; + if (WARN_ON_ONCE(offset >= skb_headlen(skb))) + goto out; + csum = skb_checksum(skb, offset, skb->len - offset, 0); offset += skb->csum_offset; - BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); + if (WARN_ON_ONCE(offset + sizeof(__sum16) > skb_headlen(skb))) + goto out; ret = skb_ensure_writable(skb, offset + sizeof(__sum16)); if (ret) @@ -4098,6 +4102,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) bool again = false; skb_reset_mac_header(skb); + skb_assert_len(skb); if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED); @@ -4517,7 +4522,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) struct softnet_data *sd; unsigned int old_flow, new_flow; - if (qlen < (netdev_max_backlog >> 1)) + if (qlen < (READ_ONCE(netdev_max_backlog) >> 1)) return false; sd = this_cpu_ptr(&softnet_data); @@ -4565,7 +4570,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, if (!netif_running(skb->dev)) goto drop; qlen = skb_queue_len(&sd->input_pkt_queue); - if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { + if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) { if (qlen) { enqueue: __skb_queue_tail(&sd->input_pkt_queue, skb); @@ -4796,7 +4801,7 @@ static int netif_rx_internal(struct sk_buff *skb) { int ret; - net_timestamp_check(netdev_tstamp_prequeue, skb); + net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); trace_netif_rx(skb); @@ -5157,7 +5162,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, int ret = NET_RX_DROP; __be16 type; - net_timestamp_check(!netdev_tstamp_prequeue, skb); + net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb); trace_netif_receive_skb(skb); @@ -5559,7 +5564,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb) { int ret; - net_timestamp_check(netdev_tstamp_prequeue, skb); + net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); if (skb_defer_rx_timestamp(skb)) return NET_RX_SUCCESS; @@ -5589,7 +5594,7 @@ static void netif_receive_skb_list_internal(struct list_head *head) INIT_LIST_HEAD(&sublist); list_for_each_entry_safe(skb, next, head, list) { - net_timestamp_check(netdev_tstamp_prequeue, skb); + net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); skb_list_del_init(skb); if (!skb_defer_rx_timestamp(skb)) list_add_tail(&skb->list, &sublist); @@ -5751,7 +5756,7 @@ static void flush_all_backlogs(void) } /* we can have in flight packet[s] on the cpus we are not flushing, - * synchronize_net() in rollback_registered_many() will take care of + * synchronize_net() in unregister_netdevice_many() will take care of * them */ for_each_cpu(cpu, &flush_cpus) @@ -6372,7 +6377,7 @@ static int process_backlog(struct napi_struct *napi, int quota) net_rps_action_and_irq_enable(sd); } - napi->weight = dev_rx_weight; + napi->weight = READ_ONCE(dev_rx_weight); while (again) { struct sk_buff *skb; @@ -6880,8 +6885,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) { struct softnet_data *sd = this_cpu_ptr(&softnet_data); unsigned long time_limit = jiffies + - usecs_to_jiffies(netdev_budget_usecs); - int budget = netdev_budget; + usecs_to_jiffies(READ_ONCE(netdev_budget_usecs)); + int budget = READ_ONCE(netdev_budget); LIST_HEAD(list); LIST_HEAD(repoll); @@ -9509,106 +9514,6 @@ static void net_set_todo(struct net_device *dev) dev_net(dev)->dev_unreg_count++; } -static void rollback_registered_many(struct list_head *head) -{ - struct net_device *dev, *tmp; - LIST_HEAD(close_head); - - BUG_ON(dev_boot_phase); - ASSERT_RTNL(); - - list_for_each_entry_safe(dev, tmp, head, unreg_list) { - /* Some devices call without registering - * for initialization unwind. Remove those - * devices and proceed with the remaining. - */ - if (dev->reg_state == NETREG_UNINITIALIZED) { - pr_debug("unregister_netdevice: device %s/%p never was registered\n", - dev->name, dev); - - WARN_ON(1); - list_del(&dev->unreg_list); - continue; - } - dev->dismantle = true; - BUG_ON(dev->reg_state != NETREG_REGISTERED); - } - - /* If device is running, close it first. */ - list_for_each_entry(dev, head, unreg_list) - list_add_tail(&dev->close_list, &close_head); - dev_close_many(&close_head, true); - - list_for_each_entry(dev, head, unreg_list) { - /* And unlink it from device chain. */ - unlist_netdevice(dev); - - dev->reg_state = NETREG_UNREGISTERING; - } - flush_all_backlogs(); - - synchronize_net(); - - list_for_each_entry(dev, head, unreg_list) { - struct sk_buff *skb = NULL; - - /* Shutdown queueing discipline. */ - dev_shutdown(dev); - - dev_xdp_uninstall(dev); - - /* Notify protocols, that we are about to destroy - * this device. They should clean all the things. - */ - call_netdevice_notifiers(NETDEV_UNREGISTER, dev); - - if (!dev->rtnl_link_ops || - dev->rtnl_link_state == RTNL_LINK_INITIALIZED) - skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, - GFP_KERNEL, NULL, 0); - - /* - * Flush the unicast and multicast chains - */ - dev_uc_flush(dev); - dev_mc_flush(dev); - - netdev_name_node_alt_flush(dev); - netdev_name_node_free(dev->name_node); - - if (dev->netdev_ops->ndo_uninit) - dev->netdev_ops->ndo_uninit(dev); - - if (skb) - rtmsg_ifinfo_send(skb, dev, GFP_KERNEL); - - /* Notifier chain MUST detach us all upper devices. */ - WARN_ON(netdev_has_any_upper_dev(dev)); - WARN_ON(netdev_has_any_lower_dev(dev)); - - /* Remove entries from kobject tree */ - netdev_unregister_kobject(dev); -#ifdef CONFIG_XPS - /* Remove XPS queueing entries */ - netif_reset_xps_queues_gt(dev, 0); -#endif - } - - synchronize_net(); - - list_for_each_entry(dev, head, unreg_list) - dev_put(dev); -} - -static void rollback_registered(struct net_device *dev) -{ - LIST_HEAD(single); - - list_add(&dev->unreg_list, &single); - rollback_registered_many(&single); - list_del(&single); -} - static netdev_features_t netdev_sync_upper_features(struct net_device *lower, struct net_device *upper, netdev_features_t features) { @@ -10145,17 +10050,10 @@ int register_netdevice(struct net_device *dev) ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); ret = notifier_to_errno(ret); if (ret) { - rollback_registered(dev); - rcu_barrier(); - - dev->reg_state = NETREG_UNREGISTERED; - /* We should put the kobject that hold in - * netdev_unregister_kobject(), otherwise - * the net device cannot be freed when - * driver calls free_netdev(), because the - * kobject is being hold. - */ - kobject_put(&dev->dev.kobj); + /* Expect explicit free_netdev() on failure */ + dev->needs_free_netdev = false; + unregister_netdevice_queue(dev, NULL); + goto out; } /* * Prevent userspace races by waiting until the network @@ -10684,6 +10582,17 @@ void free_netdev(struct net_device *dev) struct napi_struct *p, *n; might_sleep(); + + /* When called immediately after register_netdevice() failed the unwind + * handling may still be dismantling the device. Handle that case by + * deferring the free. + */ + if (dev->reg_state == NETREG_UNREGISTERING) { + ASSERT_RTNL(); + dev->needs_free_netdev = true; + return; + } + netif_free_tx_queues(dev); netif_free_rx_queues(dev); @@ -10750,9 +10659,10 @@ void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) if (head) { list_move_tail(&dev->unreg_list, head); } else { - rollback_registered(dev); - /* Finish processing unregister after unlock */ - net_set_todo(dev); + LIST_HEAD(single); + + list_add(&dev->unreg_list, &single); + unregister_netdevice_many(&single); } } EXPORT_SYMBOL(unregister_netdevice_queue); @@ -10766,14 +10676,100 @@ EXPORT_SYMBOL(unregister_netdevice_queue); */ void unregister_netdevice_many(struct list_head *head) { - struct net_device *dev; + struct net_device *dev, *tmp; + LIST_HEAD(close_head); - if (!list_empty(head)) { - rollback_registered_many(head); - list_for_each_entry(dev, head, unreg_list) - net_set_todo(dev); - list_del(head); + BUG_ON(dev_boot_phase); + ASSERT_RTNL(); + + if (list_empty(head)) + return; + + list_for_each_entry_safe(dev, tmp, head, unreg_list) { + /* Some devices call without registering + * for initialization unwind. Remove those + * devices and proceed with the remaining. + */ + if (dev->reg_state == NETREG_UNINITIALIZED) { + pr_debug("unregister_netdevice: device %s/%p never was registered\n", + dev->name, dev); + + WARN_ON(1); + list_del(&dev->unreg_list); + continue; + } + dev->dismantle = true; + BUG_ON(dev->reg_state != NETREG_REGISTERED); } + + /* If device is running, close it first. */ + list_for_each_entry(dev, head, unreg_list) + list_add_tail(&dev->close_list, &close_head); + dev_close_many(&close_head, true); + + list_for_each_entry(dev, head, unreg_list) { + /* And unlink it from device chain. */ + unlist_netdevice(dev); + + dev->reg_state = NETREG_UNREGISTERING; + } + flush_all_backlogs(); + + synchronize_net(); + + list_for_each_entry(dev, head, unreg_list) { + struct sk_buff *skb = NULL; + + /* Shutdown queueing discipline. */ + dev_shutdown(dev); + + dev_xdp_uninstall(dev); + + /* Notify protocols, that we are about to destroy + * this device. They should clean all the things. + */ + call_netdevice_notifiers(NETDEV_UNREGISTER, dev); + + if (!dev->rtnl_link_ops || + dev->rtnl_link_state == RTNL_LINK_INITIALIZED) + skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, + GFP_KERNEL, NULL, 0); + + /* + * Flush the unicast and multicast chains + */ + dev_uc_flush(dev); + dev_mc_flush(dev); + + netdev_name_node_alt_flush(dev); + netdev_name_node_free(dev->name_node); + + if (dev->netdev_ops->ndo_uninit) + dev->netdev_ops->ndo_uninit(dev); + + if (skb) + rtmsg_ifinfo_send(skb, dev, GFP_KERNEL); + + /* Notifier chain MUST detach us all upper devices. */ + WARN_ON(netdev_has_any_upper_dev(dev)); + WARN_ON(netdev_has_any_lower_dev(dev)); + + /* Remove entries from kobject tree */ + netdev_unregister_kobject(dev); +#ifdef CONFIG_XPS + /* Remove XPS queueing entries */ + netif_reset_xps_queues_gt(dev, 0); +#endif + } + + synchronize_net(); + + list_for_each_entry(dev, head, unreg_list) { + dev_put(dev); + net_set_todo(dev); + } + + list_del(head); } EXPORT_SYMBOL(unregister_netdevice_many); diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 54fb18b4f55e..993420da2930 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include +#include #include #include #include @@ -25,26 +26,6 @@ static int dev_ifname(struct net *net, struct ifreq *ifr) return netdev_get_name(net, ifr->ifr_name, ifr->ifr_ifindex); } -static gifconf_func_t *gifconf_list[NPROTO]; - -/** - * register_gifconf - register a SIOCGIF handler - * @family: Address family - * @gifconf: Function handler - * - * Register protocol dependent address dumping routines. The handler - * that is passed must not be freed or reused until it has been replaced - * by another handler. - */ -int register_gifconf(unsigned int family, gifconf_func_t *gifconf) -{ - if (family >= NPROTO) - return -EINVAL; - gifconf_list[family] = gifconf; - return 0; -} -EXPORT_SYMBOL(register_gifconf); - /* * Perform a SIOCGIFCONF call. This structure will change * size eventually, and there is nothing I can do about it. @@ -57,7 +38,6 @@ int dev_ifconf(struct net *net, struct ifconf *ifc, int size) char __user *pos; int len; int total; - int i; /* * Fetch the caller's info block. @@ -72,19 +52,15 @@ int dev_ifconf(struct net *net, struct ifconf *ifc, int size) total = 0; for_each_netdev(net, dev) { - for (i = 0; i < NPROTO; i++) { - if (gifconf_list[i]) { - int done; - if (!pos) - done = gifconf_list[i](dev, NULL, 0, size); - else - done = gifconf_list[i](dev, pos + total, - len - total, size); - if (done < 0) - return -EFAULT; - total += done; - } - } + int done; + if (!pos) + done = inet_gifconf(dev, NULL, 0, size); + else + done = inet_gifconf(dev, pos + total, + len - total, size); + if (done < 0) + return -EFAULT; + total += done; } /* diff --git a/net/core/devlink.c b/net/core/devlink.c index 646d90f63daf..72047750dcd9 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -3620,7 +3620,7 @@ static int devlink_param_get(struct devlink *devlink, const struct devlink_param *param, struct devlink_param_gset_ctx *ctx) { - if (!param->get) + if (!param->get || devlink->reload_failed) return -EOPNOTSUPP; return param->get(devlink, param->id, ctx); } @@ -3629,7 +3629,7 @@ static int devlink_param_set(struct devlink *devlink, const struct devlink_param *param, struct devlink_param_gset_ctx *ctx) { - if (!param->set) + if (!param->set || devlink->reload_failed) return -EOPNOTSUPP; return param->set(devlink, param->id, ctx); } diff --git a/net/core/filter.c b/net/core/filter.c index 3b52ddd92659..fc629761828f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1212,10 +1212,11 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) { u32 filter_size = bpf_prog_size(fp->prog->len); + int optmem_max = READ_ONCE(sysctl_optmem_max); /* same check as in sock_kmalloc() */ - if (filter_size <= sysctl_optmem_max && - atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) { + if (filter_size <= optmem_max && + atomic_read(&sk->sk_omem_alloc) + filter_size < optmem_max) { atomic_add(filter_size, &sk->sk_omem_alloc); return true; } @@ -1547,7 +1548,7 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) if (IS_ERR(prog)) return PTR_ERR(prog); - if (bpf_prog_size(prog->len) > sysctl_optmem_max) + if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) err = -ENOMEM; else err = reuseport_attach_prog(sk, prog); @@ -1614,7 +1615,7 @@ int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk) } } else { /* BPF_PROG_TYPE_SOCKET_FILTER */ - if (bpf_prog_size(prog->len) > sysctl_optmem_max) { + if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) { err = -ENOMEM; goto err_prog_put; } @@ -1687,7 +1688,7 @@ BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset, if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) return -EINVAL; - if (unlikely(offset > 0xffff)) + if (unlikely(offset > INT_MAX)) return -EFAULT; if (unlikely(bpf_try_make_writable(skb, offset + len))) return -EFAULT; @@ -1722,7 +1723,7 @@ BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset, { void *ptr; - if (unlikely(offset > 0xffff)) + if (unlikely(offset > INT_MAX)) goto err_clear; ptr = skb_header_pointer(skb, offset, len, to); @@ -4695,14 +4696,14 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, /* Only some socketops are supported */ switch (optname) { case SO_RCVBUF: - val = min_t(u32, val, sysctl_rmem_max); + val = min_t(u32, val, READ_ONCE(sysctl_rmem_max)); val = min_t(int, val, INT_MAX / 2); sk->sk_userlocks |= SOCK_RCVBUF_LOCK; WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF)); break; case SO_SNDBUF: - val = min_t(u32, val, sysctl_wmem_max); + val = min_t(u32, val, READ_ONCE(sysctl_wmem_max)); val = min_t(int, val, INT_MAX / 2); sk->sk_userlocks |= SOCK_SNDBUF_LOCK; WRITE_ONCE(sk->sk_sndbuf, @@ -5606,7 +5607,6 @@ static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len if (err) return err; - ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); skb_set_transport_header(skb, sizeof(struct ipv6hdr)); return seg6_lookup_nexthop(skb, NULL, 0); @@ -5964,10 +5964,21 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, ifindex, proto, netns_id, flags); if (sk) { - sk = sk_to_full_sk(sk); - if (!sk_fullsock(sk)) { + struct sock *sk2 = sk_to_full_sk(sk); + + /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk + * sock refcnt is decremented to prevent a request_sock leak. + */ + if (!sk_fullsock(sk2)) + sk2 = NULL; + if (sk2 != sk) { sock_gen_put(sk); - return NULL; + /* Ensure there is no need to bump sk2 refcnt */ + if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) { + WARN_ONCE(1, "Found non-RCU, unreferenced socket!"); + return NULL; + } + sk = sk2; } } @@ -6001,10 +6012,21 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, flags); if (sk) { - sk = sk_to_full_sk(sk); - if (!sk_fullsock(sk)) { + struct sock *sk2 = sk_to_full_sk(sk); + + /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk + * sock refcnt is decremented to prevent a request_sock leak. + */ + if (!sk_fullsock(sk2)) + sk2 = NULL; + if (sk2 != sk) { sock_gen_put(sk); - return NULL; + /* Ensure there is no need to bump sk2 refcnt */ + if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) { + WARN_ONCE(1, "Found non-RCU, unreferenced socket!"); + return NULL; + } + sk = sk2; } } @@ -6468,30 +6490,39 @@ BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN) return -EINVAL; - if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies) + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies)) return -EINVAL; if (!th->ack || th->rst || th->syn) return -ENOENT; + if (unlikely(iph_len < sizeof(struct iphdr))) + return -EINVAL; + if (tcp_synq_no_recent_overflow(sk)) return -ENOENT; cookie = ntohl(th->ack_seq) - 1; - switch (sk->sk_family) { - case AF_INET: - if (unlikely(iph_len < sizeof(struct iphdr))) + /* Both struct iphdr and struct ipv6hdr have the version field at the + * same offset so we can cast to the shorter header (struct iphdr). + */ + switch (((struct iphdr *)iph)->version) { + case 4: + if (sk->sk_family == AF_INET6 && ipv6_only_sock(sk)) return -EINVAL; ret = __cookie_v4_check((struct iphdr *)iph, th, cookie); break; #if IS_BUILTIN(CONFIG_IPV6) - case AF_INET6: + case 6: if (unlikely(iph_len < sizeof(struct ipv6hdr))) return -EINVAL; + if (sk->sk_family != AF_INET6) + return -EINVAL; + ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie); break; #endif /* CONFIG_IPV6 */ @@ -6534,7 +6565,7 @@ BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len, if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN) return -EINVAL; - if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies) + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies)) return -ENOENT; if (!th->syn || th->ack || th->fin || th->rst) @@ -7691,6 +7722,7 @@ bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); + int field_size; if (off < 0 || off >= sizeof(struct bpf_sock)) return false; @@ -7702,7 +7734,6 @@ bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, case offsetof(struct bpf_sock, family): case offsetof(struct bpf_sock, type): case offsetof(struct bpf_sock, protocol): - case offsetof(struct bpf_sock, dst_port): case offsetof(struct bpf_sock, src_port): case offsetof(struct bpf_sock, rx_queue_mapping): case bpf_ctx_range(struct bpf_sock, src_ip4): @@ -7711,6 +7742,14 @@ bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]): bpf_ctx_record_field_size(info, size_default); return bpf_ctx_narrow_access_ok(off, size, size_default); + case bpf_ctx_range(struct bpf_sock, dst_port): + field_size = size == size_default ? + size_default : sizeof_field(struct bpf_sock, dst_port); + bpf_ctx_record_field_size(info, field_size); + return bpf_ctx_narrow_access_ok(off, size, field_size); + case offsetofend(struct bpf_sock, dst_port) ... + offsetof(struct bpf_sock, dst_ip4) - 1: + return false; } return size == size_default; @@ -10278,6 +10317,7 @@ static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type, } const struct bpf_prog_ops sk_lookup_prog_ops = { + .test_run = bpf_prog_test_run_sk_lookup, }; const struct bpf_verifier_ops sk_lookup_verifier_ops = { diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 813c709c61cf..b8d082f55718 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -263,7 +263,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb, key->ct_zone = ct->zone.id; #endif #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) - key->ct_mark = ct->mark; + key->ct_mark = READ_ONCE(ct->mark); #endif cl = nf_ct_labels_find(ct); @@ -1171,6 +1171,7 @@ proto_again: VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; } key_vlan->vlan_tpid = saved_vlan_tpid; + key_vlan->vlan_eth_type = proto; } fdret = FLOW_DISSECT_RET_PROTO_AGAIN; @@ -1484,7 +1485,7 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow) } EXPORT_SYMBOL(flow_get_u32_dst); -/* Sort the source and destination IP (and the ports if the IP are the same), +/* Sort the source and destination IP and the ports, * to have consistent hash within the two directions */ static inline void __flow_hash_consistentify(struct flow_keys *keys) @@ -1493,13 +1494,12 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys) switch (keys->control.addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: - addr_diff = (__force u32)keys->addrs.v4addrs.dst - - (__force u32)keys->addrs.v4addrs.src; - if ((addr_diff < 0) || - (addr_diff == 0 && - ((__force u16)keys->ports.dst < - (__force u16)keys->ports.src))) { + if ((__force u32)keys->addrs.v4addrs.dst < + (__force u32)keys->addrs.v4addrs.src) swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst); + + if ((__force u16)keys->ports.dst < + (__force u16)keys->ports.src) { swap(keys->ports.src, keys->ports.dst); } break; @@ -1507,13 +1507,13 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys) addr_diff = memcmp(&keys->addrs.v6addrs.dst, &keys->addrs.v6addrs.src, sizeof(keys->addrs.v6addrs.dst)); - if ((addr_diff < 0) || - (addr_diff == 0 && - ((__force u16)keys->ports.dst < - (__force u16)keys->ports.src))) { + if (addr_diff < 0) { for (i = 0; i < 4; i++) swap(keys->addrs.v6addrs.src.s6_addr32[i], keys->addrs.v6addrs.dst.s6_addr32[i]); + } + if ((__force u16)keys->ports.dst < + (__force u16)keys->ports.src) { swap(keys->ports.src, keys->ports.dst); } break; diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index e3f0d5906811..8d958290b7d2 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -566,3 +566,9 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0; } EXPORT_SYMBOL(flow_indr_dev_setup_offload); + +bool flow_indr_dev_exists(void) +{ + return !list_empty(&flow_block_indr_dev_list); +} +EXPORT_SYMBOL(flow_indr_dev_exists); diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c index 6eb2e5ec2c50..2f66f3f29563 100644 --- a/net/core/gro_cells.c +++ b/net/core/gro_cells.c @@ -26,7 +26,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) cell = this_cpu_ptr(gcells->cells); - if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) { + if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) { drop: atomic_long_inc(&dev->rx_dropped); kfree_skb(skb); diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 2f7940bcf715..3fd207fe1284 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -158,10 +158,8 @@ static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb) return dst->lwtstate->orig_output(net, sk, skb); } -static int xmit_check_hhlen(struct sk_buff *skb) +static int xmit_check_hhlen(struct sk_buff *skb, int hh_len) { - int hh_len = skb_dst(skb)->dev->hard_header_len; - if (skb_headroom(skb) < hh_len) { int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); @@ -273,6 +271,7 @@ static int bpf_xmit(struct sk_buff *skb) bpf = bpf_lwt_lwtunnel(dst->lwtstate); if (bpf->xmit.prog) { + int hh_len = dst->dev->hard_header_len; __be16 proto = skb->protocol; int ret; @@ -290,7 +289,7 @@ static int bpf_xmit(struct sk_buff *skb) /* If the header was expanded, headroom might be too * small for L2 header to come, expand as needed. */ - ret = xmit_check_hhlen(skb); + ret = xmit_check_hhlen(skb, hh_len); if (unlikely(ret)) return ret; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 52a1c8725337..f6f580e9d282 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -280,11 +280,26 @@ static int neigh_del_timer(struct neighbour *n) return 0; } -static void pneigh_queue_purge(struct sk_buff_head *list) +static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net) { + struct sk_buff_head tmp; + unsigned long flags; struct sk_buff *skb; - while ((skb = skb_dequeue(list)) != NULL) { + skb_queue_head_init(&tmp); + spin_lock_irqsave(&list->lock, flags); + skb = skb_peek(list); + while (skb != NULL) { + struct sk_buff *skb_next = skb_peek_next(skb, list); + if (net == NULL || net_eq(dev_net(skb->dev), net)) { + __skb_unlink(skb, list); + __skb_queue_tail(&tmp, skb); + } + skb = skb_next; + } + spin_unlock_irqrestore(&list->lock, flags); + + while ((skb = __skb_dequeue(&tmp))) { dev_put(skb->dev); kfree_skb(skb); } @@ -358,9 +373,9 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, write_lock_bh(&tbl->lock); neigh_flush_dev(tbl, dev, skip_perm); pneigh_ifdown_and_unlock(tbl, dev); - - del_timer_sync(&tbl->proxy_timer); - pneigh_queue_purge(&tbl->proxy_queue); + pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL); + if (skb_queue_empty_lockless(&tbl->proxy_queue)) + del_timer_sync(&tbl->proxy_timer); return 0; } @@ -1743,7 +1758,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl) /* It is not clean... Fix it to unload IPv6 module safely */ cancel_delayed_work_sync(&tbl->gc_work); del_timer_sync(&tbl->proxy_timer); - pneigh_queue_purge(&tbl->proxy_queue); + pneigh_queue_purge(&tbl->proxy_queue, NULL); neigh_ifdown(tbl, NULL); if (atomic_read(&tbl->entries)) pr_crit("neighbour leakage\n"); diff --git a/net/core/net-traces.c b/net/core/net-traces.c index 465362a9b55d..ffeb3a682859 100644 --- a/net/core/net-traces.c +++ b/net/core/net-traces.c @@ -58,3 +58,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb); EXPORT_TRACEPOINT_SYMBOL_GPL(napi_poll); EXPORT_TRACEPOINT_SYMBOL_GPL(tcp_send_reset); + +EXPORT_TRACEPOINT_SYMBOL_GPL(net_dev_queue); +EXPORT_TRACEPOINT_SYMBOL_GPL(net_dev_xmit); +EXPORT_TRACEPOINT_SYMBOL_GPL(netif_receive_skb); +EXPORT_TRACEPOINT_SYMBOL_GPL(netif_rx); + diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index cbff7d94b993..a3b7d965e9c0 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -135,6 +135,7 @@ static int net_assign_generic(struct net *net, unsigned int id, void *data) static int ops_init(const struct pernet_operations *ops, struct net *net) { + struct net_generic *ng; int err = -ENOMEM; void *data = NULL; @@ -153,7 +154,13 @@ static int ops_init(const struct pernet_operations *ops, struct net *net) if (!err) return 0; + if (ops->id && ops->size) { cleanup: + ng = rcu_dereference_protected(net->gen, + lockdep_is_held(&pernet_ops_rwsem)); + ng->ptr[*ops->id] = NULL; + } + kfree(data); out: diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 9ff6d4160dab..3c9c2d6e3b92 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3442,26 +3442,15 @@ replay: dev->ifindex = ifm->ifi_index; - if (ops->newlink) { + if (ops->newlink) err = ops->newlink(link_net ? : net, dev, tb, data, extack); - /* Drivers should call free_netdev() in ->destructor - * and unregister it on failure after registration - * so that device could be finally freed in rtnl_unlock. - */ - if (err < 0) { - /* If device is not registered at all, free it now */ - if (dev->reg_state == NETREG_UNINITIALIZED || - dev->reg_state == NETREG_UNREGISTERED) - free_netdev(dev); - goto out; - } - } else { + else err = register_netdevice(dev); - if (err < 0) { - free_netdev(dev); - goto out; - } + if (err < 0) { + free_netdev(dev); + goto out; } + err = rtnl_configure_link(dev, ifm); if (err < 0) goto out_unregister; @@ -3626,13 +3615,24 @@ static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr, bool *changed, struct netlink_ext_ack *extack) { char *alt_ifname; + size_t size; int err; err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack); if (err) return err; - alt_ifname = nla_strdup(attr, GFP_KERNEL); + if (cmd == RTM_NEWLINKPROP) { + size = rtnl_prop_list_size(dev); + size += nla_total_size(ALTIFNAMSIZ); + if (size >= U16_MAX) { + NL_SET_ERR_MSG(extack, + "effective property list too long"); + return -EINVAL; + } + } + + alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT); if (!alt_ifname) return -ENOMEM; diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index b5bc680d4755..189eea1372d5 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -22,6 +22,8 @@ static siphash_key_t net_secret __read_mostly; static siphash_key_t ts_secret __read_mostly; +#define EPHEMERAL_PORT_SHUFFLE_PERIOD (10 * HZ) + static __always_inline void net_secret_init(void) { net_get_random_once(&net_secret, sizeof(net_secret)); @@ -62,7 +64,7 @@ u32 secure_tcpv6_ts_off(const struct net *net, .daddr = *(struct in6_addr *)daddr, }; - if (net->ipv4.sysctl_tcp_timestamps != 1) + if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1) return 0; ts_secret_init(); @@ -94,17 +96,19 @@ u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr, } EXPORT_SYMBOL(secure_tcpv6_seq); -u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, +u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport) { const struct { struct in6_addr saddr; struct in6_addr daddr; + unsigned int timeseed; __be16 dport; } __aligned(SIPHASH_ALIGNMENT) combined = { .saddr = *(struct in6_addr *)saddr, .daddr = *(struct in6_addr *)daddr, - .dport = dport + .timeseed = jiffies / EPHEMERAL_PORT_SHUFFLE_PERIOD, + .dport = dport, }; net_secret_init(); return siphash(&combined, offsetofend(typeof(combined), dport), @@ -116,7 +120,7 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral); #ifdef CONFIG_INET u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr) { - if (net->ipv4.sysctl_tcp_timestamps != 1) + if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1) return 0; ts_secret_init(); @@ -142,11 +146,13 @@ u32 secure_tcp_seq(__be32 saddr, __be32 daddr, } EXPORT_SYMBOL_GPL(secure_tcp_seq); -u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) +u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) { net_secret_init(); - return siphash_3u32((__force u32)saddr, (__force u32)daddr, - (__force u16)dport, &net_secret); + return siphash_4u32((__force u32)saddr, (__force u32)daddr, + (__force u16)dport, + jiffies / EPHEMERAL_PORT_SHUFFLE_PERIOD, + &net_secret); } EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral); #endif diff --git a/net/core/skbuff.c b/net/core/skbuff.c index fc36c8eddbf7..382dbdc553bc 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1676,11 +1676,10 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, skb->head = data; skb->head_frag = 0; skb->data += off; + + skb_set_end_offset(skb, size); #ifdef NET_SKBUFF_DATA_USES_OFFSET - skb->end = size; off = nhead; -#else - skb->end = skb->head + size; #endif skb->tail += off; skb_headers_offset_update(skb, nhead); @@ -1728,6 +1727,38 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) } EXPORT_SYMBOL(skb_realloc_headroom); +int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) +{ + unsigned int saved_end_offset, saved_truesize; + struct skb_shared_info *shinfo; + int res; + + saved_end_offset = skb_end_offset(skb); + saved_truesize = skb->truesize; + + res = pskb_expand_head(skb, 0, 0, pri); + if (res) + return res; + + skb->truesize = saved_truesize; + + if (likely(skb_end_offset(skb) == saved_end_offset)) + return 0; + + shinfo = skb_shinfo(skb); + + /* We are about to change back skb->end, + * we need to move skb_shinfo() to its new location. + */ + memmove(skb->head + saved_end_offset, + shinfo, + offsetof(struct skb_shared_info, frags[shinfo->nr_frags])); + + skb_set_end_offset(skb, saved_end_offset); + + return 0; +} + /** * skb_copy_expand - copy and expand sk_buff * @skb: buffer to copy @@ -3801,23 +3832,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, int i = 0; int pos; - if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) && - (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) { - /* gso_size is untrusted, and we have a frag_list with a linear - * non head_frag head. - * - * (we assume checking the first list_skb member suffices; - * i.e if either of the list_skb members have non head_frag - * head, then the first one has too). - * - * If head_skb's headlen does not fit requested gso_size, it - * means that the frag_list members do NOT terminate on exact - * gso_size boundaries. Hence we cannot perform skb_frag_t page - * sharing. Therefore we must fallback to copying the frag_list - * skbs; we do so by disabling SG. - */ - if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) - features &= ~NETIF_F_SG; + if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) && + mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) { + struct sk_buff *check_skb; + + for (check_skb = list_skb; check_skb; check_skb = check_skb->next) { + if (skb_headlen(check_skb) && !check_skb->head_frag) { + /* gso_size is untrusted, and we have a frag_list with + * a linear non head_frag item. + * + * If head_skb's headlen does not fit requested gso_size, + * it means that the frag_list members do NOT terminate + * on exact gso_size boundaries. Hence we cannot perform + * skb_frag_t page sharing. Therefore we must fallback to + * copying the frag_list skbs; we do so by disabling SG. + */ + features &= ~NETIF_F_SG; + break; + } + } } __skb_push(head_skb, doffset); @@ -3978,9 +4011,8 @@ normal: SKB_GSO_CB(nskb)->csum_start = skb_headroom(nskb) + doffset; } else { - skb_copy_bits(head_skb, offset, - skb_put(nskb, len), - len); + if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len)) + goto err; } continue; } @@ -4683,7 +4715,7 @@ static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) { bool ret; - if (likely(sysctl_tstamp_allow_data || tsonly)) + if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly)) return true; read_lock_bh(&sk->sk_callback_lock); @@ -5975,11 +6007,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, skb->head = data; skb->data = data; skb->head_frag = 0; -#ifdef NET_SKBUFF_DATA_USES_OFFSET - skb->end = size; -#else - skb->end = skb->head + size; -#endif + skb_set_end_offset(skb, size); skb_set_tail_pointer(skb, skb_headlen(skb)); skb_headers_offset_update(skb, 0); skb->cloned = 0; @@ -6117,11 +6145,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, skb->head = data; skb->head_frag = 0; skb->data = data; -#ifdef NET_SKBUFF_DATA_USES_OFFSET - skb->end = size; -#else - skb->end = skb->head + size; -#endif + skb_set_end_offset(skb, size); skb_reset_tail_pointer(skb); skb_headers_offset_update(skb, 0); skb->cloned = 0; diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 545181a1ae04..bb4fbc60b272 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -612,7 +612,9 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node) sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED); refcount_set(&psock->refcnt, 1); - rcu_assign_sk_user_data_nocopy(sk, psock); + __rcu_assign_sk_user_data_with_flags(sk, psock, + SK_USER_DATA_NOCOPY | + SK_USER_DATA_PSOCK); sock_hold(sk); out: diff --git a/net/core/sock.c b/net/core/sock.c index 7539b119d5ce..0506590b016b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -888,7 +888,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, * play 'guess the biggest size' games. RCVBUF/SNDBUF * are treated in BSD as hints */ - val = min_t(u32, val, sysctl_wmem_max); + val = min_t(u32, val, READ_ONCE(sysctl_wmem_max)); set_sndbuf: /* Ensure val * 2 fits into an int, to prevent max_t() * from treating it as a negative value. @@ -920,7 +920,7 @@ set_sndbuf: * play 'guess the biggest size' games. RCVBUF/SNDBUF * are treated in BSD as hints */ - __sock_set_rcvbuf(sk, min_t(u32, val, sysctl_rmem_max)); + __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max))); break; case SO_RCVBUFFORCE: @@ -2220,7 +2220,7 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */ if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) > - sysctl_optmem_max) + READ_ONCE(sysctl_optmem_max)) return NULL; skb = alloc_skb(size, priority); @@ -2238,8 +2238,10 @@ struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, */ void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) { - if ((unsigned int)size <= sysctl_optmem_max && - atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { + int optmem_max = READ_ONCE(sysctl_optmem_max); + + if ((unsigned int)size <= optmem_max && + atomic_read(&sk->sk_omem_alloc) + size < optmem_max) { void *mem; /* First do the add, to avoid the race if kmalloc * might sleep. @@ -2985,8 +2987,8 @@ void sock_init_data(struct socket *sock, struct sock *sk) timer_setup(&sk->sk_timer, NULL, 0); sk->sk_allocation = GFP_KERNEL; - sk->sk_rcvbuf = sysctl_rmem_default; - sk->sk_sndbuf = sysctl_wmem_default; + sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default); + sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default); sk->sk_state = TCP_CLOSE; sk_set_socket(sk, sock); @@ -3041,7 +3043,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) #ifdef CONFIG_NET_RX_BUSY_POLL sk->sk_napi_id = 0; - sk->sk_ll_usec = sysctl_net_busy_read; + sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read); #endif sk->sk_max_pacing_rate = ~0UL; diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 4ea5bc65848f..cbf4184fabc9 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -815,13 +815,22 @@ static int sock_map_init_seq_private(void *priv_data, { struct sock_map_seq_info *info = priv_data; + bpf_map_inc_with_uref(aux->map); info->map = aux->map; return 0; } +static void sock_map_fini_seq_private(void *priv_data) +{ + struct sock_map_seq_info *info = priv_data; + + bpf_map_put_with_uref(info->map); +} + static const struct bpf_iter_seq_info sock_map_iter_seq_info = { .seq_ops = &sock_map_seq_ops, .init_seq_private = sock_map_init_seq_private, + .fini_seq_private = sock_map_fini_seq_private, .seq_priv_size = sizeof(struct sock_map_seq_info), }; @@ -1422,18 +1431,27 @@ static const struct seq_operations sock_hash_seq_ops = { }; static int sock_hash_init_seq_private(void *priv_data, - struct bpf_iter_aux_info *aux) + struct bpf_iter_aux_info *aux) { struct sock_hash_seq_info *info = priv_data; + bpf_map_inc_with_uref(aux->map); info->map = aux->map; info->htab = container_of(aux->map, struct bpf_shtab, map); return 0; } +static void sock_hash_fini_seq_private(void *priv_data) +{ + struct sock_hash_seq_info *info = priv_data; + + bpf_map_put_with_uref(info->map); +} + static const struct bpf_iter_seq_info sock_hash_iter_seq_info = { .seq_ops = &sock_hash_seq_ops, .init_seq_private = sock_hash_init_seq_private, + .fini_seq_private = sock_hash_fini_seq_private, .seq_priv_size = sizeof(struct sock_hash_seq_info), }; diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index b065f0a103ed..49f9c2c4ffd5 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -18,6 +18,22 @@ DEFINE_SPINLOCK(reuseport_lock); static DEFINE_IDA(reuseport_ida); +void reuseport_has_conns_set(struct sock *sk) +{ + struct sock_reuseport *reuse; + + if (!rcu_access_pointer(sk->sk_reuseport_cb)) + return; + + spin_lock_bh(&reuseport_lock); + reuse = rcu_dereference_protected(sk->sk_reuseport_cb, + lockdep_is_held(&reuseport_lock)); + if (likely(reuse)) + reuse->has_conns = 1; + spin_unlock_bh(&reuseport_lock); +} +EXPORT_SYMBOL(reuseport_has_conns_set); + static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks) { unsigned int size = sizeof(struct sock_reuseport) + diff --git a/net/core/stream.c b/net/core/stream.c index a166a32b411f..a61130504827 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -159,7 +159,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) *timeo_p = current_timeo; } out: - remove_wait_queue(sk_sleep(sk), &wait); + if (!sock_flag(sk, SOCK_DEAD)) + remove_wait_queue(sk_sleep(sk), &wait); return err; do_error: diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 2e0a4378e778..0dfe9f255ab3 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -235,14 +235,17 @@ static int set_default_qdisc(struct ctl_table *table, int write, static int proc_do_dev_weight(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { - int ret; + static DEFINE_MUTEX(dev_weight_mutex); + int ret, weight; + mutex_lock(&dev_weight_mutex); ret = proc_dointvec(table, write, buffer, lenp, ppos); - if (ret != 0) - return ret; - - dev_rx_weight = weight_p * dev_weight_rx_bias; - dev_tx_weight = weight_p * dev_weight_tx_bias; + if (!ret && write) { + weight = READ_ONCE(weight_p); + WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias); + WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias); + } + mutex_unlock(&dev_weight_mutex); return ret; } diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index b0b6e6a4784e..a2a8b952b3c5 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -130,6 +130,8 @@ failure: * This unhashes the socket and releases the local port, if necessary. */ dccp_set_state(sk, DCCP_CLOSED); + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) + inet_reset_saddr(sk); ip_rt_put(rt); sk->sk_route_caps = 0; inet->inet_dport = 0; @@ -464,7 +466,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk, .fl4_dport = dccp_hdr(skb)->dccph_sport, }; - security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); + security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 49f4034bf126..21c61a9c3b15 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -203,7 +203,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req fl6.flowi6_oif = ireq->ir_iif; fl6.fl6_dport = ireq->ir_rmt_port; fl6.fl6_sport = htons(ireq->ir_num); - security_req_classify_flow(req, flowi6_to_flowi(&fl6)); + security_req_classify_flow(req, flowi6_to_flowi_common(&fl6)); rcu_read_lock(); @@ -279,7 +279,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb) fl6.flowi6_oif = inet6_iif(rxskb); fl6.fl6_dport = dccp_hdr(skb)->dccph_dport; fl6.fl6_sport = dccp_hdr(skb)->dccph_sport; - security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6)); + security_skb_classify_flow(rxskb, flowi6_to_flowi_common(&fl6)); /* sk = NULL, but it is safe for now. RST socket required. */ dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); @@ -912,7 +912,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.fl6_dport = usin->sin6_port; fl6.fl6_sport = inet->inet_sport; - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); final_p = fl6_update_dst(&fl6, opt, &final); @@ -957,6 +957,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, late_failure: dccp_set_state(sk, DCCP_CLOSED); + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) + inet_reset_saddr(sk); __sk_dst_reset(sk); failure: inet->inet_dport = 0; diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 6d705d90c614..834701f6d816 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -747,11 +747,6 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) lock_sock(sk); - if (dccp_qpolicy_full(sk)) { - rc = -EAGAIN; - goto out_release; - } - timeo = sock_sndtimeo(sk, noblock); /* @@ -770,6 +765,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (skb == NULL) goto out_release; + if (dccp_qpolicy_full(sk)) { + rc = -EAGAIN; + goto out_discard; + } + if (sk->sk_state == DCCP_CLOSED) { rc = -ENOTCONN; goto out_discard; diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index dc92a67baea3..7d542eb46172 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -480,8 +480,8 @@ static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gf sk->sk_family = PF_DECnet; sk->sk_protocol = 0; sk->sk_allocation = gfp; - sk->sk_sndbuf = sysctl_decnet_wmem[1]; - sk->sk_rcvbuf = sysctl_decnet_rmem[1]; + sk->sk_sndbuf = READ_ONCE(sysctl_decnet_wmem[1]); + sk->sk_rcvbuf = READ_ONCE(sysctl_decnet_rmem[1]); /* Initialization of DECnet Session Control Port */ scp = DN_SK(sk); diff --git a/net/dsa/port.c b/net/dsa/port.c index 73569c9af3cc..c9d552c4c358 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -721,8 +721,10 @@ int dsa_port_link_register_of(struct dsa_port *dp) if (ds->ops->phylink_mac_link_down) ds->ops->phylink_mac_link_down(ds, port, MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); + of_node_put(phy_np); return dsa_port_phylink_register(dp); } + of_node_put(phy_np); return 0; } diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c index 4820dbcedfa2..230ddf45dff0 100644 --- a/net/dsa/tag_ksz.c +++ b/net/dsa/tag_ksz.c @@ -22,7 +22,8 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb, if (!skb->dev) return NULL; - pskb_trim_rcsum(skb, skb->len - len); + if (pskb_trim_rcsum(skb, skb->len - len)) + return NULL; skb->offload_fwd_mark = true; diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index baf4765be6d7..cb9b54a7abd2 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -108,15 +108,15 @@ struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame, struct hsr_port *port) { if (!frame->skb_std) { - if (frame->skb_hsr) { + if (frame->skb_hsr) frame->skb_std = create_stripped_skb_hsr(frame->skb_hsr, frame); - } else { - /* Unexpected */ - WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n", - __FILE__, __LINE__, port->dev->name); + else + netdev_warn_once(port->dev, + "Unexpected frame received in hsr_get_untagged_frame()\n"); + + if (!frame->skb_std) return NULL; - } } return skb_clone(frame->skb_std, GFP_ATOMIC); @@ -303,17 +303,18 @@ static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev, struct hsr_node *node_src) { bool was_multicast_frame; - int res; + int res, recv_len; was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST); hsr_addr_subst_source(node_src, skb); skb_pull(skb, ETH_HLEN); + recv_len = skb->len; res = netif_rx(skb); if (res == NET_RX_DROP) { dev->stats.rx_dropped++; } else { dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + dev->stats.rx_bytes += recv_len; if (was_multicast_frame) dev->stats.multicast++; } diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index c25f7617770c..d4c275e56d82 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -201,8 +201,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len) int err = 0; struct net_device *dev = NULL; - if (len < sizeof(*uaddr)) - return -EINVAL; + err = ieee802154_sockaddr_check_size(uaddr, len); + if (err < 0) + return err; uaddr = (struct sockaddr_ieee802154 *)_uaddr; if (uaddr->family != AF_IEEE802154) @@ -272,6 +273,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) err = -EMSGSIZE; goto out_dev; } + if (!size) { + err = 0; + goto out_dev; + } hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; @@ -494,11 +499,14 @@ static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len) ro->bound = 0; - if (len < sizeof(*addr)) + err = ieee802154_sockaddr_check_size(addr, len); + if (err < 0) goto out; - if (addr->family != AF_IEEE802154) + if (addr->family != AF_IEEE802154) { + err = -EINVAL; goto out; + } ieee802154_addr_from_sa(&haddr, &addr->addr); dev = ieee802154_get_dev(sock_net(sk), &haddr); @@ -565,8 +573,9 @@ static int dgram_connect(struct sock *sk, struct sockaddr *uaddr, struct dgram_sock *ro = dgram_sk(sk); int err = 0; - if (len < sizeof(*addr)) - return -EINVAL; + err = ieee802154_sockaddr_check_size(addr, len); + if (err < 0) + return err; if (addr->family != AF_IEEE802154) return -EINVAL; @@ -605,6 +614,7 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) struct ieee802154_mac_cb *cb; struct dgram_sock *ro = dgram_sk(sk); struct ieee802154_addr dst_addr; + DECLARE_SOCKADDR(struct sockaddr_ieee802154*, daddr, msg->msg_name); int hlen, tlen; int err; @@ -613,10 +623,20 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) return -EOPNOTSUPP; } - if (!ro->connected && !msg->msg_name) - return -EDESTADDRREQ; - else if (ro->connected && msg->msg_name) - return -EISCONN; + if (msg->msg_name) { + if (ro->connected) + return -EISCONN; + if (msg->msg_namelen < IEEE802154_MIN_NAMELEN) + return -EINVAL; + err = ieee802154_sockaddr_check_size(daddr, msg->msg_namelen); + if (err < 0) + return err; + ieee802154_addr_from_sa(&dst_addr, &daddr->addr); + } else { + if (!ro->connected) + return -EDESTADDRREQ; + dst_addr = ro->dst_addr; + } if (!ro->bound) dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154); @@ -652,16 +672,6 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) cb = mac_cb_init(skb); cb->type = IEEE802154_FC_TYPE_DATA; cb->ackreq = ro->want_ack; - - if (msg->msg_name) { - DECLARE_SOCKADDR(struct sockaddr_ieee802154*, - daddr, msg->msg_name); - - ieee802154_addr_from_sa(&dst_addr, &daddr->addr); - } else { - dst_addr = ro->dst_addr; - } - cb->secen = ro->secen; cb->secen_override = ro->secen_override; cb->seclevel = ro->seclevel; diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 87983e70f03f..23b06063e1a5 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -403,6 +403,16 @@ config INET_IPCOMP If unsure, say Y. +config INET_TABLE_PERTURB_ORDER + int "INET: Source port perturbation table size (as power of 2)" if EXPERT + default 16 + help + Source port perturbation table size (as power of 2) for + RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm. + + The default is almost always what you want. + Only change this if you know what you are doing. + config INET_XFRM_TUNNEL tristate select INET_TUNNEL diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 482a4538e315..b9f4fe8c14b8 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -158,7 +158,7 @@ void inet_sock_destruct(struct sock *sk) kfree(rcu_dereference_protected(inet->inet_opt, 1)); dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1)); - dst_release(sk->sk_rx_dst); + dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1)); sk_refcnt_debug_dec(sk); } EXPORT_SYMBOL(inet_sock_destruct); @@ -220,7 +220,7 @@ int inet_listen(struct socket *sock, int backlog) * because the socket was in TCP_LISTEN state previously but * was shutdown() rather than close(). */ - tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen; + tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen); if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) && (tcp_fastopen & TFO_SERVER_ENABLE) && !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) { @@ -338,7 +338,7 @@ lookup_protocol: inet->hdrincl = 1; } - if (net->ipv4.sysctl_ip_no_pmtu_disc) + if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) inet->pmtudisc = IP_PMTUDISC_DONT; else inet->pmtudisc = IP_PMTUDISC_WANT; @@ -1021,7 +1021,6 @@ static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon const struct proto_ops inet_stream_ops = { .family = PF_INET, - .flags = PROTO_CMSG_DATA_ONLY, .owner = THIS_MODULE, .release = inet_release, .bind = inet_bind, @@ -1249,7 +1248,7 @@ static int inet_sk_reselect_saddr(struct sock *sk) if (new_saddr == old_saddr) return 0; - if (sock_net(sk)->ipv4.sysctl_ip_dynaddr > 1) { + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) > 1) { pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n", __func__, &old_saddr, &new_saddr); } @@ -1304,7 +1303,7 @@ int inet_sk_rebuild_header(struct sock *sk) * Other protocols have to map its equivalent state to TCP_SYN_SENT. * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme */ - if (!sock_net(sk)->ipv4.sysctl_ip_dynaddr || + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) || sk->sk_state != TCP_SYN_SENT || (sk->sk_userlocks & SOCK_BINDADDR_LOCK) || (err = inet_sk_reselect_saddr(sk)) != 0) @@ -1730,12 +1729,7 @@ static const struct net_protocol igmp_protocol = { }; #endif -/* thinking of making this const? Don't. - * early_demux can change based on sysctl. - */ -static struct net_protocol tcp_protocol = { - .early_demux = tcp_v4_early_demux, - .early_demux_handler = tcp_v4_early_demux, +static const struct net_protocol tcp_protocol = { .handler = tcp_v4_rcv, .err_handler = tcp_v4_err, .no_policy = 1, @@ -1743,12 +1737,7 @@ static struct net_protocol tcp_protocol = { .icmp_strict_tag_validation = 1, }; -/* thinking of making this const? Don't. - * early_demux can change based on sysctl. - */ -static struct net_protocol udp_protocol = { - .early_demux = udp_v4_early_demux, - .early_demux_handler = udp_v4_early_demux, +static const struct net_protocol udp_protocol = { .handler = udp_rcv, .err_handler = udp_err, .no_policy = 1, diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 922dd73e5740..83a47998c4b1 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1116,13 +1116,18 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev) return err; } -static int arp_invalidate(struct net_device *dev, __be32 ip) +int arp_invalidate(struct net_device *dev, __be32 ip, bool force) { struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev); int err = -ENXIO; struct neigh_table *tbl = &arp_tbl; if (neigh) { + if ((neigh->nud_state & NUD_VALID) && !force) { + neigh_release(neigh); + return 0; + } + if (neigh->nud_state & ~NUD_NOARP) err = neigh_update(neigh, NULL, NUD_FAILED, NEIGH_UPDATE_F_OVERRIDE| @@ -1169,7 +1174,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r, if (!dev) return -EINVAL; } - return arp_invalidate(dev, ip); + return arp_invalidate(dev, ip, true); } /* diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index ca217a6f488f..d4a4160159a9 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -240,7 +240,7 @@ static int cipso_v4_cache_check(const unsigned char *key, struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; - if (!cipso_v4_cache_enabled) + if (!READ_ONCE(cipso_v4_cache_enabled)) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); @@ -297,13 +297,14 @@ static int cipso_v4_cache_check(const unsigned char *key, int cipso_v4_cache_add(const unsigned char *cipso_ptr, const struct netlbl_lsm_secattr *secattr) { + int bkt_size = READ_ONCE(cipso_v4_cache_bucketsize); int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; u32 cipso_ptr_len; - if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) + if (!READ_ONCE(cipso_v4_cache_enabled) || bkt_size <= 0) return 0; cipso_ptr_len = cipso_ptr[1]; @@ -323,7 +324,7 @@ int cipso_v4_cache_add(const unsigned char *cipso_ptr, bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); - if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { + if (cipso_v4_cache[bkt].size < bkt_size) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { @@ -1200,7 +1201,8 @@ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ - if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) + if (READ_ONCE(cipso_v4_rbm_optfmt) && ret_val > 0 && + ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; @@ -1604,7 +1606,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ - if (cipso_v4_rbm_strictvalid) { + if (READ_ONCE(cipso_v4_rbm_strictvalid)) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 4a8550c49202..112c6e892d30 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -70,7 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len } inet->inet_daddr = fl4->daddr; inet->inet_dport = usin->sin_port; - reuseport_has_conns(sk, true); + reuseport_has_conns_set(sk); sk->sk_state = TCP_ESTABLISHED; sk_set_txhash(sk); inet->inet_id = prandom_u32(); diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 148ef484a66c..88b6120878cd 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1244,7 +1244,7 @@ out: return ret; } -static int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size) +int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size) { struct in_device *in_dev = __in_dev_get_rtnl(dev); const struct in_ifaddr *ifa; @@ -2668,23 +2668,27 @@ static __net_init int devinet_init_net(struct net *net) #endif if (!net_eq(net, &init_net)) { - if (IS_ENABLED(CONFIG_SYSCTL) && - sysctl_devconf_inherit_init_net == 3) { + switch (net_inherit_devconf()) { + case 3: /* copy from the current netns */ memcpy(all, current->nsproxy->net_ns->ipv4.devconf_all, sizeof(ipv4_devconf)); memcpy(dflt, current->nsproxy->net_ns->ipv4.devconf_dflt, sizeof(ipv4_devconf_dflt)); - } else if (!IS_ENABLED(CONFIG_SYSCTL) || - sysctl_devconf_inherit_init_net != 2) { - /* inherit == 0 or 1: copy from init_net */ + break; + case 0: + case 1: + /* copy from init_net */ memcpy(all, init_net.ipv4.devconf_all, sizeof(ipv4_devconf)); memcpy(dflt, init_net.ipv4.devconf_dflt, sizeof(ipv4_devconf_dflt)); + break; + case 2: + /* use compiled values */ + break; } - /* else inherit == 2: use compiled values */ } #ifdef CONFIG_SYSCTL @@ -2762,8 +2766,6 @@ void __init devinet_init(void) INIT_HLIST_HEAD(&inet_addr_lst[i]); register_pernet_subsys(&devinet_ops); - - register_gifconf(PF_INET, inet_gifconf); register_netdevice_notifier(&ip_netdev_notifier); queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index d76af5ba5cc2..7010faae6a7e 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -448,7 +448,6 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * struct page *page; struct sk_buff *trailer; int tailen = esp->tailen; - unsigned int allocsz; /* this is non-NULL only with TCP/UDP Encapsulation */ if (x->encap) { @@ -458,8 +457,8 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * return err; } - allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES); - if (allocsz > ESP_SKB_FRAG_MAXSIZE) + if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE || + ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE) goto cow; if (!skb_cloned(skb)) { diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 3450c9ba2728..84257678160a 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -312,6 +312,9 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ xo->seq.low += skb_shinfo(skb)->gso_segs; } + if (xo->seq.low < seq) + xo->seq.hi++; + esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32)); ip_hdr(skb)->tot_len = htons(skb->len); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 917ea953dfad..5f786ef662ea 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -389,7 +389,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, dev_match = dev_match || (res.type == RTN_LOCAL && dev == net->loopback_dev); if (dev_match) { - ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST; + ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK; return ret; } if (no_addr) @@ -401,7 +401,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, ret = 0; if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) { if (res.type == RTN_UNICAST) - ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST; + ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK; } return ret; @@ -830,6 +830,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, return -EINVAL; } + if (!cfg->fc_table) + cfg->fc_table = RT_TABLE_MAIN; + return 0; errout: return err; @@ -1112,9 +1115,11 @@ void fib_add_ifaddr(struct in_ifaddr *ifa) return; /* Add broadcast address, if it is explicitly assigned. */ - if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF)) + if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF)) { fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim, 0); + arp_invalidate(dev, ifa->ifa_broadcast, false); + } if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) && (prefix != addr || ifa->ifa_prefixlen < 32)) { @@ -1130,6 +1135,7 @@ void fib_add_ifaddr(struct in_ifaddr *ifa) prim, 0); fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix | ~mask, 32, prim, 0); + arp_invalidate(dev, prefix | ~mask, false); } } } diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 838a876c168c..ab9fcc6231b8 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -423,6 +423,7 @@ static struct fib_info *fib_find_info(struct fib_info *nfi) nfi->fib_prefsrc == fi->fib_prefsrc && nfi->fib_priority == fi->fib_priority && nfi->fib_type == fi->fib_type && + nfi->fib_tb_id == fi->fib_tb_id && memcmp(nfi->fib_metrics, fi->fib_metrics, sizeof(u32) * RTAX_MAX) == 0 && !((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) && @@ -887,9 +888,16 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi, return 1; } - if (cfg->fc_oif || cfg->fc_gw_family) { - struct fib_nh *nh = fib_info_nh(fi, 0); + if (fi->nh) { + if (cfg->fc_oif || cfg->fc_gw_family || cfg->fc_mp) + return 1; + return 0; + } + if (cfg->fc_oif || cfg->fc_gw_family) { + struct fib_nh *nh; + + nh = fib_info_nh(fi, 0); if (cfg->fc_encap) { if (fib_encap_match(net, cfg->fc_encap_type, cfg->fc_encap, nh, cfg, extack)) @@ -1224,7 +1232,7 @@ static int fib_check_nh_nongw(struct net *net, struct fib_nh *nh, nh->fib_nh_dev = in_dev->dev; dev_hold(nh->fib_nh_dev); - nh->fib_nh_scope = RT_SCOPE_HOST; + nh->fib_nh_scope = RT_SCOPE_LINK; if (!netif_carrier_ok(nh->fib_nh_dev)) nh->fib_nh_flags |= RTNH_F_LINKDOWN; err = 0; @@ -1826,7 +1834,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, goto nla_put_failure; if (nexthop_is_blackhole(fi->nh)) rtm->rtm_type = RTN_BLACKHOLE; - if (!fi->fib_net->ipv4.sysctl_nexthop_compat_mode) + if (!READ_ONCE(fi->fib_net->ipv4.sysctl_nexthop_compat_mode)) goto offload; } @@ -2227,7 +2235,7 @@ void fib_select_multipath(struct fib_result *res, int hash) } change_nexthops(fi) { - if (net->ipv4.sysctl_fib_multipath_use_neigh) { + if (READ_ONCE(net->ipv4.sysctl_fib_multipath_use_neigh)) { if (!fib_good_nh(nexthop_nh)) continue; if (!first) { diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index ffc5332f1390..d11fb16234a6 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -497,7 +497,7 @@ static void tnode_free(struct key_vector *tn) tn = container_of(head, struct tnode, rcu)->kv; } - if (tnode_free_size >= sysctl_fib_sync_mem) { + if (tnode_free_size >= READ_ONCE(sysctl_fib_sync_mem)) { tnode_free_size = 0; synchronize_rcu(); } @@ -1331,8 +1331,10 @@ int fib_table_insert(struct net *net, struct fib_table *tb, /* The alias was already inserted, so the node must exist. */ l = l ? l : fib_find_node(t, &tp, key); - if (WARN_ON_ONCE(!l)) + if (WARN_ON_ONCE(!l)) { + err = -ENOENT; goto out_free_new_fa; + } if (fib_find_alias(&l->leaf, new_fa->fa_slen, 0, 0, tb->tb_id, true) == new_fa) { diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index b71b836cc7d1..a1aacf5e75a6 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -261,11 +261,12 @@ bool icmp_global_allow(void) spin_lock(&icmp_global.lock); delta = min_t(u32, now - icmp_global.stamp, HZ); if (delta >= HZ / 50) { - incr = sysctl_icmp_msgs_per_sec * delta / HZ ; + incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ; if (incr) WRITE_ONCE(icmp_global.stamp, now); } - credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst); + credit = min_t(u32, icmp_global.credit + incr, + READ_ONCE(sysctl_icmp_msgs_burst)); if (credit) { /* We want to use a credit of one in average, but need to randomize * it for security reasons. @@ -289,7 +290,7 @@ static bool icmpv4_mask_allow(struct net *net, int type, int code) return true; /* Limit if icmp type is enabled in ratemask. */ - if (!((1 << type) & net->ipv4.sysctl_icmp_ratemask)) + if (!((1 << type) & READ_ONCE(net->ipv4.sysctl_icmp_ratemask))) return true; return false; @@ -327,7 +328,8 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, vif = l3mdev_master_ifindex(dst->dev); peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1); - rc = inet_peer_xrlim_allow(peer, net->ipv4.sysctl_icmp_ratelimit); + rc = inet_peer_xrlim_allow(peer, + READ_ONCE(net->ipv4.sysctl_icmp_ratelimit)); if (peer) inet_putpeer(peer); out: @@ -447,7 +449,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); fl4.flowi4_proto = IPPROTO_ICMP; fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev); - security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); + security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4)); rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) goto out_unlock; @@ -503,7 +505,7 @@ static struct rtable *icmp_route_lookup(struct net *net, route_lookup_dev = icmp_get_route_lookup_dev(skb_in); fl4->flowi4_oif = l3mdev_master_ifindex(route_lookup_dev); - security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); + security_skb_classify_flow(skb_in, flowi4_to_flowi_common(fl4)); rt = ip_route_output_key_hash(net, fl4, skb_in); if (IS_ERR(rt)) return rt; @@ -885,7 +887,7 @@ static bool icmp_unreach(struct sk_buff *skb) * values please see * Documentation/networking/ip-sysctl.rst */ - switch (net->ipv4.sysctl_ip_no_pmtu_disc) { + switch (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) { default: net_dbg_ratelimited("%pI4: fragmentation needed and DF set\n", &iph->daddr); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 03589a04f9aa..46196b50c9d6 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -467,7 +467,8 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, if (pmc->multiaddr == IGMP_ALL_HOSTS) return skb; - if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports) + if (ipv4_is_local_multicast(pmc->multiaddr) && + !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) return skb; mtu = READ_ONCE(dev->mtu); @@ -593,7 +594,7 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc) if (pmc->multiaddr == IGMP_ALL_HOSTS) continue; if (ipv4_is_local_multicast(pmc->multiaddr) && - !net->ipv4.sysctl_igmp_llm_reports) + !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) continue; spin_lock_bh(&pmc->lock); if (pmc->sfcount[MCAST_EXCLUDE]) @@ -736,7 +737,8 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, if (type == IGMPV3_HOST_MEMBERSHIP_REPORT) return igmpv3_send_report(in_dev, pmc); - if (ipv4_is_local_multicast(group) && !net->ipv4.sysctl_igmp_llm_reports) + if (ipv4_is_local_multicast(group) && + !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) return 0; if (type == IGMP_HOST_LEAVE_MESSAGE) @@ -818,7 +820,7 @@ static void igmp_ifc_event(struct in_device *in_dev) struct net *net = dev_net(in_dev->dev); if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) return; - in_dev->mr_ifc_count = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + in_dev->mr_ifc_count = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); igmp_ifc_start_timer(in_dev, 1); } @@ -913,7 +915,8 @@ static bool igmp_heard_report(struct in_device *in_dev, __be32 group) if (group == IGMP_ALL_HOSTS) return false; - if (ipv4_is_local_multicast(group) && !net->ipv4.sysctl_igmp_llm_reports) + if (ipv4_is_local_multicast(group) && + !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) return false; rcu_read_lock(); @@ -999,7 +1002,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, * received value was zero, use the default or statically * configured value. */ - in_dev->mr_qrv = ih3->qrv ?: net->ipv4.sysctl_igmp_qrv; + in_dev->mr_qrv = ih3->qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); in_dev->mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL; /* RFC3376, 8.3. Query Response Interval: @@ -1038,7 +1041,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, if (im->multiaddr == IGMP_ALL_HOSTS) continue; if (ipv4_is_local_multicast(im->multiaddr) && - !net->ipv4.sysctl_igmp_llm_reports) + !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) continue; spin_lock_bh(&im->lock); if (im->tm_running) @@ -1179,7 +1182,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im, pmc->interface = im->interface; in_dev_hold(in_dev); pmc->multiaddr = im->multiaddr; - pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); pmc->sfmode = im->sfmode; if (pmc->sfmode == MCAST_INCLUDE) { struct ip_sf_list *psf; @@ -1230,9 +1233,11 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) swap(im->tomb, pmc->tomb); swap(im->sources, pmc->sources); for (psf = im->sources; psf; psf = psf->sf_next) - psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + psf->sf_crcount = in_dev->mr_qrv ?: + READ_ONCE(net->ipv4.sysctl_igmp_qrv); } else { - im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + im->crcount = in_dev->mr_qrv ?: + READ_ONCE(net->ipv4.sysctl_igmp_qrv); } in_dev_put(pmc->interface); kfree_pmc(pmc); @@ -1289,7 +1294,8 @@ static void __igmp_group_dropped(struct ip_mc_list *im, gfp_t gfp) #ifdef CONFIG_IP_MULTICAST if (im->multiaddr == IGMP_ALL_HOSTS) return; - if (ipv4_is_local_multicast(im->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports) + if (ipv4_is_local_multicast(im->multiaddr) && + !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) return; reporter = im->reporter; @@ -1331,13 +1337,14 @@ static void igmp_group_added(struct ip_mc_list *im) #ifdef CONFIG_IP_MULTICAST if (im->multiaddr == IGMP_ALL_HOSTS) return; - if (ipv4_is_local_multicast(im->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports) + if (ipv4_is_local_multicast(im->multiaddr) && + !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) return; if (in_dev->dead) return; - im->unsolicit_count = net->ipv4.sysctl_igmp_qrv; + im->unsolicit_count = READ_ONCE(net->ipv4.sysctl_igmp_qrv); if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { spin_lock_bh(&im->lock); igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); @@ -1351,7 +1358,7 @@ static void igmp_group_added(struct ip_mc_list *im) * IN() to IN(A). */ if (im->sfmode == MCAST_EXCLUDE) - im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + im->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); igmp_ifc_event(in_dev); #endif @@ -1635,7 +1642,7 @@ static void ip_mc_rejoin_groups(struct in_device *in_dev) if (im->multiaddr == IGMP_ALL_HOSTS) continue; if (ipv4_is_local_multicast(im->multiaddr) && - !net->ipv4.sysctl_igmp_llm_reports) + !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) continue; /* a failover is happening and switches @@ -1742,7 +1749,7 @@ static void ip_mc_reset(struct in_device *in_dev) in_dev->mr_qi = IGMP_QUERY_INTERVAL; in_dev->mr_qri = IGMP_QUERY_RESPONSE_INTERVAL; - in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv; + in_dev->mr_qrv = READ_ONCE(net->ipv4.sysctl_igmp_qrv); } #else static void ip_mc_reset(struct in_device *in_dev) @@ -1876,7 +1883,7 @@ static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode, #ifdef CONFIG_IP_MULTICAST if (psf->sf_oldin && !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) { - psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + psf->sf_crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); psf->sf_next = pmc->tomb; pmc->tomb = psf; rv = 1; @@ -1940,7 +1947,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode, /* filter mode change */ pmc->sfmode = MCAST_INCLUDE; #ifdef CONFIG_IP_MULTICAST - pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); in_dev->mr_ifc_count = pmc->crcount; for (psf = pmc->sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; @@ -2119,7 +2126,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, #ifdef CONFIG_IP_MULTICAST /* else no filters; keep old mode for reports */ - pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); in_dev->mr_ifc_count = pmc->crcount; for (psf = pmc->sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; @@ -2185,7 +2192,7 @@ static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr, count++; } err = -ENOBUFS; - if (count >= net->ipv4.sysctl_igmp_max_memberships) + if (count >= READ_ONCE(net->ipv4.sysctl_igmp_max_memberships)) goto done; iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); if (!iml) @@ -2372,7 +2379,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct } /* else, add a new source to the filter */ - if (psl && psl->sl_count >= net->ipv4.sysctl_igmp_max_msf) { + if (psl && psl->sl_count >= READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) { err = -ENOBUFS; goto done; } @@ -2394,9 +2401,10 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct newpsl->sl_addr[i] = psl->sl_addr[i]; /* decrease mem now to avoid the memleak warning */ atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc); - kfree_rcu(psl, rcu); } rcu_assign_pointer(pmc->sflist, newpsl); + if (psl) + kfree_rcu(psl, rcu); psl = newpsl; } rv = 1; /* > 0 for insert logic below if sl_count is 0 */ @@ -2494,11 +2502,13 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) psl->sl_count, psl->sl_addr, 0); /* decrease mem now to avoid the memleak warning */ atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc); - kfree_rcu(psl, rcu); - } else + } else { (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 0, NULL, 0); + } rcu_assign_pointer(pmc->sflist, newpsl); + if (psl) + kfree_rcu(psl, rcu); pmc->sfmode = msf->imsf_fmode; err = 0; done: diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 1dfa561e8f98..c4303cbc9dc0 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -251,7 +251,7 @@ next_port: goto other_half_scan; } - if (net->ipv4.sysctl_ip_autobind_reuse && !relax) { + if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) { /* We still have a chance to connect to different destinations */ relax = true; goto ports_exhausted; @@ -602,7 +602,7 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk, (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, ireq->ir_loc_addr, ireq->ir_rmt_port, htons(ireq->ir_num), sk->sk_uid); - security_req_classify_flow(req, flowi4_to_flowi(fl4)); + security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); rt = ip_route_output_flow(net, fl4, sk); if (IS_ERR(rt)) goto no_route; @@ -640,7 +640,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, ireq->ir_loc_addr, ireq->ir_rmt_port, htons(ireq->ir_num), sk->sk_uid); - security_req_classify_flow(req, flowi4_to_flowi(fl4)); + security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); rt = ip_route_output_flow(net, fl4, sk); if (IS_ERR(rt)) goto no_route; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index f3fd5c911ed0..2ec518cb601f 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -410,13 +410,11 @@ begin: sk_nulls_for_each_rcu(sk, node, &head->chain) { if (sk->sk_hash != hash) continue; - if (likely(INET_MATCH(sk, net, acookie, - saddr, daddr, ports, dif, sdif))) { + if (likely(INET_MATCH(net, sk, acookie, ports, dif, sdif))) { if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) goto out; - if (unlikely(!INET_MATCH(sk, net, acookie, - saddr, daddr, ports, - dif, sdif))) { + if (unlikely(!INET_MATCH(net, sk, acookie, + ports, dif, sdif))) { sock_gen_put(sk); goto begin; } @@ -465,8 +463,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, if (sk2->sk_hash != hash) continue; - if (likely(INET_MATCH(sk2, net, acookie, - saddr, daddr, ports, dif, sdif))) { + if (likely(INET_MATCH(net, sk2, acookie, ports, dif, sdif))) { if (sk2->sk_state == TCP_TIME_WAIT) { tw = inet_twsk(sk2); if (twsk_unique(sk, sk2, twp)) @@ -504,7 +501,7 @@ not_unique: return -EADDRNOTAVAIL; } -static u32 inet_sk_port_offset(const struct sock *sk) +static u64 inet_sk_port_offset(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); @@ -532,16 +529,14 @@ static bool inet_ehash_lookup_by_sk(struct sock *sk, if (esk->sk_hash != sk->sk_hash) continue; if (sk->sk_family == AF_INET) { - if (unlikely(INET_MATCH(esk, net, acookie, - sk->sk_daddr, - sk->sk_rcv_saddr, + if (unlikely(INET_MATCH(net, esk, acookie, ports, dif, sdif))) { return true; } } #if IS_ENABLED(CONFIG_IPV6) else if (sk->sk_family == AF_INET6) { - if (unlikely(INET6_MATCH(esk, net, + if (unlikely(inet6_match(net, esk, &sk->sk_v6_daddr, &sk->sk_v6_rcv_saddr, ports, dif, sdif))) { @@ -637,7 +632,9 @@ int __inet_hash(struct sock *sk, struct sock *osk) int err = 0; if (sk->sk_state != TCP_LISTEN) { + local_bh_disable(); inet_ehash_nolisten(sk, osk, NULL); + local_bh_enable(); return 0; } WARN_ON(!sk_unhashed(sk)); @@ -669,50 +666,72 @@ int inet_hash(struct sock *sk) { int err = 0; - if (sk->sk_state != TCP_CLOSE) { - local_bh_disable(); + if (sk->sk_state != TCP_CLOSE) err = __inet_hash(sk, NULL); - local_bh_enable(); - } return err; } EXPORT_SYMBOL_GPL(inet_hash); -void inet_unhash(struct sock *sk) +static void __inet_unhash(struct sock *sk, struct inet_listen_hashbucket *ilb) { - struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; - struct inet_listen_hashbucket *ilb = NULL; - spinlock_t *lock; - if (sk_unhashed(sk)) return; - if (sk->sk_state == TCP_LISTEN) { - ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; - lock = &ilb->lock; - } else { - lock = inet_ehash_lockp(hashinfo, sk->sk_hash); - } - spin_lock_bh(lock); - if (sk_unhashed(sk)) - goto unlock; - if (rcu_access_pointer(sk->sk_reuseport_cb)) reuseport_detach_sock(sk); if (ilb) { + struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; + inet_unhash2(hashinfo, sk); ilb->count--; } __sk_nulls_del_node_init_rcu(sk); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); -unlock: - spin_unlock_bh(lock); +} + +void inet_unhash(struct sock *sk) +{ + struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; + + if (sk_unhashed(sk)) + return; + + if (sk->sk_state == TCP_LISTEN) { + struct inet_listen_hashbucket *ilb; + + ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; + /* Don't disable bottom halves while acquiring the lock to + * avoid circular locking dependency on PREEMPT_RT. + */ + spin_lock(&ilb->lock); + __inet_unhash(sk, ilb); + spin_unlock(&ilb->lock); + } else { + spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); + + spin_lock_bh(lock); + __inet_unhash(sk, NULL); + spin_unlock_bh(lock); + } } EXPORT_SYMBOL_GPL(inet_unhash); +/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm + * Note that we use 32bit integers (vs RFC 'short integers') + * because 2^16 is not a multiple of num_ephemeral and this + * property might be used by clever attacker. + * + * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though + * attacks were since demonstrated, thus we use 65536 by default instead + * to really give more isolation and privacy, at the expense of 256kB + * of kernel memory. + */ +#define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER) +static u32 *table_perturb; + int __inet_hash_connect(struct inet_timewait_death_row *death_row, - struct sock *sk, u32 port_offset, + struct sock *sk, u64 port_offset, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, struct inet_timewait_sock **)) { @@ -724,8 +743,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct inet_bind_bucket *tb; u32 remaining, offset; int ret, i, low, high; - static u32 hint; int l3mdev; + u32 index; if (port) { head = &hinfo->bhash[inet_bhashfn(net, port, @@ -752,7 +771,13 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, if (likely(remaining > 1)) remaining &= ~1U; - offset = (hint + port_offset) % remaining; + get_random_slow_once(table_perturb, + INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb)); + index = port_offset & (INET_TABLE_PERTURB_SIZE - 1); + + offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32); + offset %= remaining; + /* In first pass we try ports of @low parity. * inet_csk_get_port() does the opposite choice. */ @@ -806,7 +831,13 @@ next_port: return -EADDRNOTAVAIL; ok: - hint += i + 2; + /* Here we want to add a little bit of randomness to the next source + * port that will be chosen. We use a max() with a random here so that + * on low contention the randomness is maximal and on high contention + * it may be inexistent. + */ + i = max_t(int, i, (prandom_u32() & 7) * 2); + WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2); /* Head lock still held and bh's disabled */ inet_bind_hash(sk, tb, port); @@ -829,7 +860,7 @@ ok: int inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { - u32 port_offset = 0; + u64 port_offset = 0; if (!inet_sk(sk)->inet_num) port_offset = inet_sk_port_offset(sk); @@ -879,6 +910,12 @@ void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, low_limit, high_limit); init_hashinfo_lhash2(h); + + /* this one is used for source ports of outgoing connections */ + table_perturb = kmalloc_array(INET_TABLE_PERTURB_SIZE, + sizeof(*table_perturb), GFP_KERNEL); + if (!table_perturb) + panic("TCP: failed to alloc table_perturb"); } int inet_hashinfo2_init_mod(struct inet_hashinfo *h) diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index ff327a62c9ce..a18668552d33 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -148,16 +148,20 @@ static void inet_peer_gc(struct inet_peer_base *base, struct inet_peer *gc_stack[], unsigned int gc_cnt) { + int peer_threshold, peer_maxttl, peer_minttl; struct inet_peer *p; __u32 delta, ttl; int i; - if (base->total >= inet_peer_threshold) + peer_threshold = READ_ONCE(inet_peer_threshold); + peer_maxttl = READ_ONCE(inet_peer_maxttl); + peer_minttl = READ_ONCE(inet_peer_minttl); + + if (base->total >= peer_threshold) ttl = 0; /* be aggressive */ else - ttl = inet_peer_maxttl - - (inet_peer_maxttl - inet_peer_minttl) / HZ * - base->total / inet_peer_threshold * HZ; + ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ * + base->total / peer_threshold * HZ; for (i = 0; i < gc_cnt; i++) { p = gc_stack[i]; diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 00ec819f949b..29730edda220 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -151,7 +151,7 @@ int ip_forward(struct sk_buff *skb) !skb_sec_path(skb)) ip_rt_send_redirect(skb); - if (net->ipv4.sysctl_ip_fwd_update_priority) + if (READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority)) skb->priority = rt_tos2priority(iph->tos); return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index e4504dd510c6..65ead8a74933 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -454,14 +454,12 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, __be16 proto) { struct ip_tunnel *tunnel = netdev_priv(dev); - - if (tunnel->parms.o_flags & TUNNEL_SEQ) - tunnel->o_seqno++; + __be16 flags = tunnel->parms.o_flags; /* Push GRE header. */ gre_build_header(skb, tunnel->tun_hlen, - tunnel->parms.o_flags, proto, tunnel->parms.o_key, - htonl(tunnel->o_seqno)); + flags, proto, tunnel->parms.o_key, + (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); } @@ -499,7 +497,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev, (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ); gre_build_header(skb, tunnel_hlen, flags, proto, tunnel_id_to_key32(tun_info->key.tun_id), - (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0); + (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0); ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen); @@ -521,7 +519,6 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) int tunnel_hlen; int version; int nhoff; - int thoff; tun_info = skb_tunnel_info(skb); if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || @@ -555,10 +552,16 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff)) truncate = true; - thoff = skb_transport_header(skb) - skb_mac_header(skb); - if (skb->protocol == htons(ETH_P_IPV6) && - (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)) - truncate = true; + if (skb->protocol == htons(ETH_P_IPV6)) { + int thoff; + + if (skb_transport_header_was_set(skb)) + thoff = skb_transport_header(skb) - skb_mac_header(skb); + else + thoff = nhoff + sizeof(struct ipv6hdr); + if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff) + truncate = true; + } if (version == 1) { erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), @@ -576,7 +579,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) } gre_build_header(skb, 8, TUNNEL_SEQ, - proto, 0, htonl(tunnel->o_seqno++)); + proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno))); ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen); @@ -626,21 +629,20 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, } if (dev->header_ops) { - const int pull_len = tunnel->hlen + sizeof(struct iphdr); - if (skb_cow_head(skb, 0)) goto free_skb; tnl_params = (const struct iphdr *)skb->data; - if (pull_len > skb_transport_offset(skb)) - goto free_skb; - /* Pull skb since ip_tunnel_xmit() needs skb->data pointing * to gre header. */ - skb_pull(skb, pull_len); + skb_pull(skb, tunnel->hlen + sizeof(struct iphdr)); skb_reset_mac_header(skb); + + if (skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_start(skb) < skb->data) + goto free_skb; } else { if (skb_cow_head(skb, dev->needed_headroom)) goto free_skb; @@ -1491,24 +1493,6 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) struct ip_tunnel_parm *p = &t->parms; __be16 o_flags = p->o_flags; - if (t->erspan_ver <= 2) { - if (t->erspan_ver != 0 && !t->collect_md) - o_flags |= TUNNEL_KEY; - - if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver)) - goto nla_put_failure; - - if (t->erspan_ver == 1) { - if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index)) - goto nla_put_failure; - } else if (t->erspan_ver == 2) { - if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir)) - goto nla_put_failure; - if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid)) - goto nla_put_failure; - } - } - if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || nla_put_be16(skb, IFLA_GRE_IFLAGS, gre_tnl_flags_to_gre_flags(p->i_flags)) || @@ -1549,6 +1533,34 @@ nla_put_failure: return -EMSGSIZE; } +static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct ip_tunnel *t = netdev_priv(dev); + + if (t->erspan_ver <= 2) { + if (t->erspan_ver != 0 && !t->collect_md) + t->parms.o_flags |= TUNNEL_KEY; + + if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver)) + goto nla_put_failure; + + if (t->erspan_ver == 1) { + if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index)) + goto nla_put_failure; + } else if (t->erspan_ver == 2) { + if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir)) + goto nla_put_failure; + if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid)) + goto nla_put_failure; + } + } + + return ipgre_fill_info(skb, dev); + +nla_put_failure: + return -EMSGSIZE; +} + static void erspan_setup(struct net_device *dev) { struct ip_tunnel *t = netdev_priv(dev); @@ -1627,7 +1639,7 @@ static struct rtnl_link_ops erspan_link_ops __read_mostly = { .changelink = erspan_changelink, .dellink = ip_tunnel_dellink, .get_size = ipgre_get_size, - .fill_info = ipgre_fill_info, + .fill_info = erspan_fill_info, .get_link_net = ip_tunnel_get_link_net, }; diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index b0c244af1e4d..f6b3237e88ca 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -309,14 +309,13 @@ static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph, ip_hdr(hint)->tos == iph->tos; } -INDIRECT_CALLABLE_DECLARE(int udp_v4_early_demux(struct sk_buff *)); -INDIRECT_CALLABLE_DECLARE(int tcp_v4_early_demux(struct sk_buff *)); +int tcp_v4_early_demux(struct sk_buff *skb); +int udp_v4_early_demux(struct sk_buff *skb); static int ip_rcv_finish_core(struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *dev, const struct sk_buff *hint) { const struct iphdr *iph = ip_hdr(skb); - int (*edemux)(struct sk_buff *skb); struct rtable *rt; int err; @@ -327,21 +326,29 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, goto drop_error; } - if (net->ipv4.sysctl_ip_early_demux && + if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) && !skb_dst(skb) && !skb->sk && !ip_is_fragment(iph)) { - const struct net_protocol *ipprot; - int protocol = iph->protocol; + switch (iph->protocol) { + case IPPROTO_TCP: + if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) { + tcp_v4_early_demux(skb); - ipprot = rcu_dereference(inet_protos[protocol]); - if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) { - err = INDIRECT_CALL_2(edemux, tcp_v4_early_demux, - udp_v4_early_demux, skb); - if (unlikely(err)) - goto drop_error; - /* must reload iph, skb->head might have changed */ - iph = ip_hdr(skb); + /* must reload iph, skb->head might have changed */ + iph = ip_hdr(skb); + } + break; + case IPPROTO_UDP: + if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) { + err = udp_v4_early_demux(skb); + if (unlikely(err)) + goto drop_error; + + /* must reload iph, skb->head might have changed */ + iph = ip_hdr(skb); + } + break; } } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 5e48b3d3a00d..0dbf950de832 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1712,7 +1712,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, daddr, saddr, tcp_hdr(skb)->source, tcp_hdr(skb)->dest, arg->uid); - security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); + security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4)); rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) return; @@ -1721,7 +1721,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, sk->sk_protocol = ip_hdr(skb)->protocol; sk->sk_bound_dev_if = arg->bound_dev_if; - sk->sk_sndbuf = sysctl_wmem_default; + sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default); ipc.sockc.mark = fl4.flowi4_mark; err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0, &ipc, &rt, MSG_DONTWAIT); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index ec6036713e2c..4cc39c62af55 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -773,7 +773,7 @@ static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen) if (optlen < GROUP_FILTER_SIZE(0)) return -EINVAL; - if (optlen > sysctl_optmem_max) + if (optlen > READ_ONCE(sysctl_optmem_max)) return -ENOBUFS; gsf = memdup_sockptr(optval, optlen); @@ -783,7 +783,7 @@ static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen) /* numsrc >= (4G-140)/128 overflow in 32 bits */ err = -ENOBUFS; if (gsf->gf_numsrc >= 0x1ffffff || - gsf->gf_numsrc > sock_net(sk)->ipv4.sysctl_igmp_max_msf) + gsf->gf_numsrc > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf)) goto out_free_gsf; err = -EINVAL; @@ -808,7 +808,7 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, if (optlen < size0) return -EINVAL; - if (optlen > sysctl_optmem_max - 4) + if (optlen > READ_ONCE(sysctl_optmem_max) - 4) return -ENOBUFS; p = kmalloc(optlen + 4, GFP_KERNEL); @@ -832,7 +832,7 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, /* numsrc >= (4G-140)/128 overflow in 32 bits */ err = -ENOBUFS; - if (n > sock_net(sk)->ipv4.sysctl_igmp_max_msf) + if (n > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf)) goto out_free_gsf; err = set_mcast_msfilter(sk, gf32->gf_interface, n, gf32->gf_fmode, &gf32->gf_group, gf32->gf_slist); @@ -1231,7 +1231,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname, if (optlen < IP_MSFILTER_SIZE(0)) goto e_inval; - if (optlen > sysctl_optmem_max) { + if (optlen > READ_ONCE(sysctl_optmem_max)) { err = -ENOBUFS; break; } @@ -1242,7 +1242,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname, } /* numsrc >= (1G-4) overflow in 32 bits */ if (msf->imsf_numsrc >= 0x3ffffffcU || - msf->imsf_numsrc > net->ipv4.sysctl_igmp_max_msf) { + msf->imsf_numsrc > READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) { kfree(msf); err = -ENOBUFS; break; diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index e25be2d01a7a..4b74c67f13c9 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -410,7 +410,7 @@ int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst, u32 mtu = dst_mtu(encap_dst) - headroom; if ((skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) || - (!skb_is_gso(skb) && (skb->len - skb_mac_header_len(skb)) <= mtu)) + (!skb_is_gso(skb) && (skb->len - skb_network_offset(skb)) <= mtu)) return 0; skb_dst_update_pmtu_no_confirm(skb, mtu); diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 1088564d4dbc..77e3b67e8790 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -424,7 +424,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par) switch (ctinfo) { case IP_CT_NEW: - ct->mark = hash; + WRITE_ONCE(ct->mark, hash); break; case IP_CT_RELATED: case IP_CT_RELATED_REPLY: @@ -441,7 +441,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par) #ifdef DEBUG nf_ct_dump_tuple_ip(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); #endif - pr_debug("hash=%u ct_hash=%u ", hash, ct->mark); + pr_debug("hash=%u ct_hash=%u ", hash, READ_ONCE(ct->mark)); if (!clusterip_responsible(cipinfo->config, hash)) { pr_debug("not responsible\n"); return NF_DROP; diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c index bcdb37f86a94..aeb631760eb9 100644 --- a/net/ipv4/netfilter/nft_dup_ipv4.c +++ b/net/ipv4/netfilter/nft_dup_ipv4.c @@ -13,8 +13,8 @@ #include struct nft_dup_ipv4 { - enum nft_registers sreg_addr:8; - enum nft_registers sreg_dev:8; + u8 sreg_addr; + u8 sreg_dev; }; static void nft_dup_ipv4_eval(const struct nft_expr *expr, @@ -40,16 +40,16 @@ static int nft_dup_ipv4_init(const struct nft_ctx *ctx, if (tb[NFTA_DUP_SREG_ADDR] == NULL) return -EINVAL; - priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]); - err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in_addr)); + err = nft_parse_register_load(tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr, + sizeof(struct in_addr)); if (err < 0) return err; - if (tb[NFTA_DUP_SREG_DEV] != NULL) { - priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]); - return nft_validate_register_load(priv->sreg_dev, sizeof(int)); - } - return 0; + if (tb[NFTA_DUP_SREG_DEV]) + err = nft_parse_register_load(tb[NFTA_DUP_SREG_DEV], + &priv->sreg_dev, sizeof(int)); + + return err; } static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr) diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c index 03df986217b7..9e6f0f1275e2 100644 --- a/net/ipv4/netfilter/nft_fib_ipv4.c +++ b/net/ipv4/netfilter/nft_fib_ipv4.c @@ -83,6 +83,9 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, else oif = NULL; + if (priv->flags & NFTA_FIB_F_IIF) + fl4.flowi4_oif = l3mdev_master_ifindex_rcu(oif); + if (nft_hook(pkt) == NF_INET_PRE_ROUTING && nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { nft_fib_store_result(dest, priv, nft_in(pkt)); diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index 4dbc628f8c38..84e817ac3f08 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -882,7 +882,7 @@ static void __remove_nexthop_fib(struct net *net, struct nexthop *nh) /* __ip6_del_rt does a release, so do a hold here */ fib6_info_hold(f6i); ipv6_stub->ip6_del_rt(net, f6i, - !net->ipv4.sysctl_nexthop_compat_mode); + !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode)); } } @@ -1173,7 +1173,8 @@ out: if (!rc) { nh_base_seq_inc(net); nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo); - if (replace_notify && net->ipv4.sysctl_nexthop_compat_mode) + if (replace_notify && + READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode)) nexthop_replace_notify(net, new_nh, &cfg->nlinfo); } @@ -1324,7 +1325,7 @@ static int nh_create_ipv4(struct net *net, struct nexthop *nh, if (!err) { nh->nh_flags = fib_nh->fib_nh_flags; fib_info_update_nhc_saddr(net, &fib_nh->nh_common, - fib_nh->fib_nh_scope); + !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1); } else { fib_nh_release(net, fib_nh); } diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index e60ca03543a5..1bad851b3fc3 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -305,6 +305,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, struct net *net = sock_net(sk); if (sk->sk_family == AF_INET) { struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; + u32 tb_id = RT_TABLE_LOCAL; int chk_addr_ret; if (addr_len < sizeof(*addr)) @@ -320,8 +321,10 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) chk_addr_ret = RTN_LOCAL; - else - chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr); + else { + tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id; + chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id); + } if ((!inet_can_nonlocal_bind(net, isk) && chk_addr_ret != RTN_LOCAL) || @@ -359,6 +362,14 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, return -ENODEV; } } + + if (!dev && sk->sk_bound_dev_if) { + dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); + if (!dev) { + rcu_read_unlock(); + return -ENODEV; + } + } has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev, scoped); rcu_read_unlock(); @@ -785,7 +796,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl4.fl4_icmp_type = user_icmph.type; fl4.fl4_icmp_code = user_icmph.code; - security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); + security_sk_classify_flow(sk, flowi4_to_flowi_common(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 5d95f80314f9..4899ebe569eb 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -640,7 +640,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) goto done; } - security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); + security_sk_classify_flow(sk, flowi4_to_flowi_common(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c72d0de8bf71..216f76b548ff 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1442,7 +1442,7 @@ u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr) struct fib_info *fi = res->fi; u32 mtu = 0; - if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu || + if (READ_ONCE(dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu) || fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU)) mtu = fi->fib_mtu; @@ -1776,7 +1776,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, flags |= RTCF_LOCAL; rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST, - IN_DEV_CONF_GET(in_dev, NOPOLICY), false); + IN_DEV_ORCONF(in_dev, NOPOLICY), false); if (!rth) return -ENOBUFS; @@ -1792,6 +1792,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, #endif RT_CACHE_STAT_INC(in_slow_mc); + skb_dst_drop(skb); skb_dst_set(skb, &rth->dst); return 0; } @@ -1892,8 +1893,8 @@ static int __mkroute_input(struct sk_buff *skb, } rth = rt_dst_alloc(out_dev->dev, 0, res->type, - IN_DEV_CONF_GET(in_dev, NOPOLICY), - IN_DEV_CONF_GET(out_dev, NOXFRM)); + IN_DEV_ORCONF(in_dev, NOPOLICY), + IN_DEV_ORCONF(out_dev, NOXFRM)); if (!rth) { err = -ENOBUFS; goto cleanup; @@ -2275,7 +2276,7 @@ local_input: rth = rt_dst_alloc(ip_rt_get_dev(net, res), flags | RTCF_LOCAL, res->type, - IN_DEV_CONF_GET(in_dev, NOPOLICY), false); + IN_DEV_ORCONF(in_dev, NOPOLICY), false); if (!rth) goto e_nobufs; @@ -2498,8 +2499,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res, add: rth = rt_dst_alloc(dev_out, flags, type, - IN_DEV_CONF_GET(in_dev, NOPOLICY), - IN_DEV_CONF_GET(in_dev, NOXFRM)); + IN_DEV_ORCONF(in_dev, NOPOLICY), + IN_DEV_ORCONF(in_dev, NOXFRM)); if (!rth) return ERR_PTR(-ENOBUFS); diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 00dc3f943c80..41afc9155f31 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -249,12 +249,12 @@ bool cookie_timestamp_decode(const struct net *net, return true; } - if (!net->ipv4.sysctl_tcp_timestamps) + if (!READ_ONCE(net->ipv4.sysctl_tcp_timestamps)) return false; tcp_opt->sack_ok = (options & TS_OPT_SACK) ? TCP_SACK_SEEN : 0; - if (tcp_opt->sack_ok && !net->ipv4.sysctl_tcp_sack) + if (tcp_opt->sack_ok && !READ_ONCE(net->ipv4.sysctl_tcp_sack)) return false; if ((options & TS_OPT_WSCALE_MASK) == TS_OPT_WSCALE_MASK) @@ -263,7 +263,7 @@ bool cookie_timestamp_decode(const struct net *net, tcp_opt->wscale_ok = 1; tcp_opt->snd_wscale = options & TS_OPT_WSCALE_MASK; - return net->ipv4.sysctl_tcp_window_scaling != 0; + return READ_ONCE(net->ipv4.sysctl_tcp_window_scaling) != 0; } EXPORT_SYMBOL(cookie_timestamp_decode); @@ -283,6 +283,7 @@ bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt, EXPORT_SYMBOL(cookie_ecn_ok); struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, + const struct tcp_request_sock_ops *af_ops, struct sock *sk, struct sk_buff *skb) { @@ -299,6 +300,10 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, return NULL; treq = tcp_rsk(req); + + /* treq->af_specific might be used to perform TCP_MD5 lookup */ + treq->af_specific = af_ops; + treq->syn_tos = TCP_SKB_CB(skb)->ip_dsfield; #if IS_ENABLED(CONFIG_MPTCP) treq->is_mptcp = sk_is_mptcp(sk); @@ -337,7 +342,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) struct flowi4 fl4; u32 tsoff = 0; - if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst) + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) || + !th->ack || th->rst) goto out; if (tcp_synq_no_recent_overflow(sk)) @@ -366,7 +372,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) goto out; ret = NULL; - req = cookie_tcp_reqsk_alloc(&tcp_request_sock_ops, sk, skb); + req = cookie_tcp_reqsk_alloc(&tcp_request_sock_ops, + &tcp_request_sock_ipv4_ops, sk, skb); if (!req) goto out; @@ -418,7 +425,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) inet_sk_flowi_flags(sk), opt->srr ? opt->faddr : ireq->ir_rmt_addr, ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid); - security_req_classify_flow(req, flowi4_to_flowi(&fl4)); + security_req_classify_flow(req, flowi4_to_flowi_common(&fl4)); rt = ip_route_output_key(sock_net(sk), &fl4); if (IS_ERR(rt)) { reqsk_free(req); diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 0c039e986372..070fd0670d00 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -95,7 +95,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write, * port limit. */ if ((range[1] < range[0]) || - (range[0] < net->ipv4.sysctl_ip_prot_sock)) + (range[0] < READ_ONCE(net->ipv4.sysctl_ip_prot_sock))) ret = -EINVAL; else set_local_port_range(net, range); @@ -121,7 +121,7 @@ static int ipv4_privileged_ports(struct ctl_table *table, int write, .extra2 = &ip_privileged_port_max, }; - pports = net->ipv4.sysctl_ip_prot_sock; + pports = READ_ONCE(net->ipv4.sysctl_ip_prot_sock); ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); @@ -133,7 +133,7 @@ static int ipv4_privileged_ports(struct ctl_table *table, int write, if (range[0] < pports) ret = -EINVAL; else - net->ipv4.sysctl_ip_prot_sock = pports; + WRITE_ONCE(net->ipv4.sysctl_ip_prot_sock, pports); } return ret; @@ -361,61 +361,6 @@ bad_key: return ret; } -static void proc_configure_early_demux(int enabled, int protocol) -{ - struct net_protocol *ipprot; -#if IS_ENABLED(CONFIG_IPV6) - struct inet6_protocol *ip6prot; -#endif - - rcu_read_lock(); - - ipprot = rcu_dereference(inet_protos[protocol]); - if (ipprot) - ipprot->early_demux = enabled ? ipprot->early_demux_handler : - NULL; - -#if IS_ENABLED(CONFIG_IPV6) - ip6prot = rcu_dereference(inet6_protos[protocol]); - if (ip6prot) - ip6prot->early_demux = enabled ? ip6prot->early_demux_handler : - NULL; -#endif - rcu_read_unlock(); -} - -static int proc_tcp_early_demux(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos) -{ - int ret = 0; - - ret = proc_dointvec(table, write, buffer, lenp, ppos); - - if (write && !ret) { - int enabled = init_net.ipv4.sysctl_tcp_early_demux; - - proc_configure_early_demux(enabled, IPPROTO_TCP); - } - - return ret; -} - -static int proc_udp_early_demux(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos) -{ - int ret = 0; - - ret = proc_dointvec(table, write, buffer, lenp, ppos); - - if (write && !ret) { - int enabled = init_net.ipv4.sysctl_udp_early_demux; - - proc_configure_early_demux(enabled, IPPROTO_UDP); - } - - return ret; -} - static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) @@ -685,14 +630,14 @@ static struct ctl_table ipv4_net_table[] = { .data = &init_net.ipv4.sysctl_udp_early_demux, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_udp_early_demux + .proc_handler = proc_douintvec_minmax, }, { .procname = "tcp_early_demux", .data = &init_net.ipv4.sysctl_tcp_early_demux, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_tcp_early_demux + .proc_handler = proc_douintvec_minmax, }, { .procname = "nexthop_compat_mode", diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index ba28fc514fc1..90c5e6928b5c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -442,7 +442,7 @@ void tcp_init_sock(struct sock *sk) tp->snd_cwnd_clamp = ~0; tp->mss_cache = TCP_MSS_DEFAULT; - tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; + tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering); tcp_assign_congestion_control(sk); tp->tsoffset = 0; @@ -453,8 +453,8 @@ void tcp_init_sock(struct sock *sk) icsk->icsk_sync_mss = tcp_sync_mss; - WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]); - WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]); + WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1])); + WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1])); sk_sockets_allocated_inc(sk); sk->sk_route_forced_caps = NETIF_F_GSO; @@ -700,7 +700,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, int size_goal) { return skb->len < size_goal && - sock_net(sk)->ipv4.sysctl_tcp_autocorking && + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) && !tcp_rtx_queue_empty(sk) && refcount_read(&sk->sk_wmem_alloc) > skb->truesize; } @@ -1150,7 +1150,8 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, struct sockaddr *uaddr = msg->msg_name; int err, flags; - if (!(sock_net(sk)->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) || + if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & + TFO_CLIENT_ENABLE) || (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && uaddr->sa_family == AF_UNSPEC)) return -EOPNOTSUPP; @@ -1713,7 +1714,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val) if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) cap = sk->sk_rcvbuf >> 1; else - cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1; + cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1; val = min(val, cap); WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); @@ -2774,6 +2775,8 @@ int tcp_disconnect(struct sock *sk, int flags) tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_cwnd = TCP_INIT_CWND; tp->snd_cwnd_cnt = 0; + tp->is_cwnd_limited = 0; + tp->max_packets_out = 0; tp->window_clamp = 0; tp->delivered = 0; tp->delivered_ce = 0; @@ -2792,8 +2795,7 @@ int tcp_disconnect(struct sock *sk, int flags) icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); __sk_dst_reset(sk); - dst_release(sk->sk_rx_dst); - sk->sk_rx_dst = NULL; + dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL)); tcp_saved_syn_free(tp); tp->compressed_ack = 0; tp->segs_in = 0; @@ -3268,7 +3270,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname, case TCP_REPAIR_OPTIONS: if (!tp->repair) err = -EINVAL; - else if (sk->sk_state == TCP_ESTABLISHED) + else if (sk->sk_state == TCP_ESTABLISHED && !tp->bytes_sent) err = tcp_repair_options_est(sk, optval, optlen); else err = -EPERM; @@ -3369,7 +3371,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname, case TCP_FASTOPEN_CONNECT: if (val > 1 || val < 0) { err = -EINVAL; - } else if (net->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) { + } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) & + TFO_CLIENT_ENABLE) { if (sk->sk_state == TCP_CLOSE) tp->fastopen_connect = val; else @@ -3706,7 +3709,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level, case TCP_LINGER2: val = tp->linger2; if (val >= 0) - val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ; + val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ; break; case TCP_DEFER_ACCEPT: val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, @@ -4018,12 +4021,16 @@ static void __tcp_alloc_md5sig_pool(void) * to memory. See smp_rmb() in tcp_get_md5sig_pool() */ smp_wmb(); - tcp_md5sig_pool_populated = true; + /* Paired with READ_ONCE() from tcp_alloc_md5sig_pool() + * and tcp_get_md5sig_pool(). + */ + WRITE_ONCE(tcp_md5sig_pool_populated, true); } bool tcp_alloc_md5sig_pool(void) { - if (unlikely(!tcp_md5sig_pool_populated)) { + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + if (unlikely(!READ_ONCE(tcp_md5sig_pool_populated))) { mutex_lock(&tcp_md5sig_mutex); if (!tcp_md5sig_pool_populated) { @@ -4034,7 +4041,8 @@ bool tcp_alloc_md5sig_pool(void) mutex_unlock(&tcp_md5sig_mutex); } - return tcp_md5sig_pool_populated; + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + return READ_ONCE(tcp_md5sig_pool_populated); } EXPORT_SYMBOL(tcp_alloc_md5sig_pool); @@ -4050,7 +4058,8 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) { local_bh_disable(); - if (tcp_md5sig_pool_populated) { + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + if (READ_ONCE(tcp_md5sig_pool_populated)) { /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ smp_rmb(); return this_cpu_ptr(&tcp_md5sig_pool); diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index eaf2308c355a..809ee0f32d59 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -315,7 +315,7 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, { bool cork = false, enospc = sk_msg_full(msg); struct sock *sk_redir; - u32 tosend, delta = 0; + u32 tosend, origsize, sent, delta = 0; u32 eval = __SK_NONE; int ret; @@ -370,10 +370,12 @@ more_data: cork = true; psock->cork = NULL; } - sk_msg_return(sk, msg, msg->sg.size); + sk_msg_return(sk, msg, tosend); release_sock(sk); + origsize = msg->sg.size; ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags); + sent = origsize - msg->sg.size; if (eval == __SK_REDIRECT) sock_put(sk_redir); @@ -412,7 +414,7 @@ more_data: msg->sg.data[msg->sg.start].page_link && msg->sg.data[msg->sg.start].length) { if (eval == __SK_REDIRECT) - sk_mem_charge(sk, msg->sg.size); + sk_mem_charge(sk, tosend - sent); goto more_data; } } diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c index 709d23801823..56dede4b59d9 100644 --- a/net/ipv4/tcp_cdg.c +++ b/net/ipv4/tcp_cdg.c @@ -375,6 +375,7 @@ static void tcp_cdg_init(struct sock *sk) struct cdg *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); + ca->gradients = NULL; /* We silently fall back to window = 1 if allocation fails. */ if (window > 1) ca->gradients = kcalloc(window, sizeof(ca->gradients[0]), @@ -388,6 +389,7 @@ static void tcp_cdg_release(struct sock *sk) struct cdg *ca = inet_csk_ca(sk); kfree(ca->gradients); + ca->gradients = NULL; } static struct tcp_congestion_ops tcp_cdg __read_mostly = { diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 107111984384..39fb037ce5f3 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -349,7 +349,7 @@ static bool tcp_fastopen_no_cookie(const struct sock *sk, const struct dst_entry *dst, int flag) { - return (sock_net(sk)->ipv4.sysctl_tcp_fastopen & flag) || + return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) || tcp_sk(sk)->fastopen_no_cookie || (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE)); } @@ -364,7 +364,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, const struct dst_entry *dst) { bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; - int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen; + int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen); struct tcp_fastopen_cookie valid_foc = { .len = -1 }; struct sock *child; int ret = 0; @@ -506,7 +506,7 @@ void tcp_fastopen_active_disable(struct sock *sk) { struct net *net = sock_net(sk); - if (!sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout) + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout)) return; /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */ @@ -527,7 +527,8 @@ void tcp_fastopen_active_disable(struct sock *sk) */ bool tcp_fastopen_active_should_disable(struct sock *sk) { - unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout; + unsigned int tfo_bh_timeout = + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout); unsigned long timeout; int tfo_da_times; int multiplier; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 06ec2887a466..4a3926cae468 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -426,7 +426,7 @@ static void tcp_sndbuf_expand(struct sock *sk) if (sk->sk_sndbuf < sndmem) WRITE_ONCE(sk->sk_sndbuf, - min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2])); + min(sndmem, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[2]))); } /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) @@ -455,12 +455,13 @@ static void tcp_sndbuf_expand(struct sock *sk) */ /* Slow part of check#2. */ -static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) +static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb, + unsigned int skbtruesize) { struct tcp_sock *tp = tcp_sk(sk); /* Optimize this! */ - int truesize = tcp_win_from_space(sk, skb->truesize) >> 1; - int window = tcp_win_from_space(sk, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1; + int truesize = tcp_win_from_space(sk, skbtruesize) >> 1; + int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1; while (tp->rcv_ssthresh <= window) { if (truesize <= skb->len) @@ -472,7 +473,27 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) return 0; } -static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) +/* Even if skb appears to have a bad len/truesize ratio, TCP coalescing + * can play nice with us, as sk_buff and skb->head might be either + * freed or shared with up to MAX_SKB_FRAGS segments. + * Only give a boost to drivers using page frag(s) to hold the frame(s), + * and if no payload was pulled in skb->head before reaching us. + */ +static u32 truesize_adjust(bool adjust, const struct sk_buff *skb) +{ + u32 truesize = skb->truesize; + + if (adjust && !skb_headlen(skb)) { + truesize -= SKB_TRUESIZE(skb_end_offset(skb)); + /* paranoid check, some drivers might be buggy */ + if (unlikely((int)truesize < (int)skb->len)) + truesize = skb->truesize; + } + return truesize; +} + +static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb, + bool adjust) { struct tcp_sock *tp = tcp_sk(sk); int room; @@ -481,15 +502,16 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) /* Check #1 */ if (room > 0 && !tcp_under_memory_pressure(sk)) { + unsigned int truesize = truesize_adjust(adjust, skb); int incr; /* Check #2. Increase window, if skb with such overhead * will fit to rcvbuf in future. */ - if (tcp_win_from_space(sk, skb->truesize) <= skb->len) + if (tcp_win_from_space(sk, truesize) <= skb->len) incr = 2 * tp->advmss; else - incr = __tcp_grow_window(sk, skb); + incr = __tcp_grow_window(sk, skb, truesize); if (incr) { incr = max_t(int, incr, 2 * skb->len); @@ -504,7 +526,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) */ static void tcp_init_buffer_space(struct sock *sk) { - int tcp_app_win = sock_net(sk)->ipv4.sysctl_tcp_app_win; + int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win); struct tcp_sock *tp = tcp_sk(sk); int maxwin; @@ -544,16 +566,17 @@ static void tcp_clamp_window(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); struct net *net = sock_net(sk); + int rmem2; icsk->icsk_ack.quick = 0; + rmem2 = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]); - if (sk->sk_rcvbuf < net->ipv4.sysctl_tcp_rmem[2] && + if (sk->sk_rcvbuf < rmem2 && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && !tcp_under_memory_pressure(sk) && sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { WRITE_ONCE(sk->sk_rcvbuf, - min(atomic_read(&sk->sk_rmem_alloc), - net->ipv4.sysctl_tcp_rmem[2])); + min(atomic_read(&sk->sk_rmem_alloc), rmem2)); } if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); @@ -694,7 +717,7 @@ void tcp_rcv_space_adjust(struct sock *sk) * */ - if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf && + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { int rcvmem, rcvbuf; u64 rcvwin, grow; @@ -715,7 +738,7 @@ void tcp_rcv_space_adjust(struct sock *sk) do_div(rcvwin, tp->advmss); rcvbuf = min_t(u64, rcvwin * rcvmem, - sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); if (rcvbuf > sk->sk_rcvbuf) { WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); @@ -783,7 +806,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) tcp_ecn_check_ce(sk, skb); if (skb->len >= 128) - tcp_grow_window(sk, skb); + tcp_grow_window(sk, skb, true); } /* Called to compute a smoothed rtt estimate. The data fed to this @@ -1012,7 +1035,7 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq, tp->undo_marker ? tp->undo_retrans : 0); #endif tp->reordering = min_t(u32, (metric + mss - 1) / mss, - sock_net(sk)->ipv4.sysctl_tcp_max_reordering); + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)); } /* This exciting event is worth to be remembered. 8) */ @@ -1991,7 +2014,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend) return; tp->reordering = min_t(u32, tp->packets_out + addend, - sock_net(sk)->ipv4.sysctl_tcp_max_reordering); + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)); tp->reord_seen++; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER); } @@ -2056,7 +2079,8 @@ static inline void tcp_init_undo(struct tcp_sock *tp) static bool tcp_is_rack(const struct sock *sk) { - return sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION; + return READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & + TCP_RACK_LOSS_DETECTION; } /* If we detect SACK reneging, forget all SACK information @@ -2100,6 +2124,7 @@ void tcp_enter_loss(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; + u8 reordering; tcp_timeout_mark_lost(sk); @@ -2120,10 +2145,12 @@ void tcp_enter_loss(struct sock *sk) /* Timeout in disordered state after receiving substantial DUPACKs * suggests that the degree of reordering is over-estimated. */ + reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering); if (icsk->icsk_ca_state <= TCP_CA_Disorder && - tp->sacked_out >= net->ipv4.sysctl_tcp_reordering) + tp->sacked_out >= reordering) tp->reordering = min_t(unsigned int, tp->reordering, - net->ipv4.sysctl_tcp_reordering); + reordering); + tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; tcp_ecn_queue_cwr(tp); @@ -2132,7 +2159,7 @@ void tcp_enter_loss(struct sock *sk) * loss recovery is underway except recurring timeout(s) on * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing */ - tp->frto = net->ipv4.sysctl_tcp_frto && + tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) && (new_recovery || icsk->icsk_retransmits) && !inet_csk(sk)->icsk_mtup.probe_size; } @@ -2149,7 +2176,8 @@ void tcp_enter_loss(struct sock *sk) */ static bool tcp_check_sack_reneging(struct sock *sk, int flag) { - if (flag & FLAG_SACK_RENEGING) { + if (flag & FLAG_SACK_RENEGING && + flag & FLAG_SND_UNA_ADVANCED) { struct tcp_sock *tp = tcp_sk(sk); unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4), msecs_to_jiffies(10)); @@ -2470,6 +2498,21 @@ static inline bool tcp_may_undo(const struct tcp_sock *tp) return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); } +static bool tcp_is_non_sack_preventing_reopen(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + + if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { + /* Hold old state until something *above* high_seq + * is ACKed. For Reno it is MUST to prevent false + * fast retransmits (RFC2582). SACK TCP is safe. */ + if (!tcp_any_retrans_done(sk)) + tp->retrans_stamp = 0; + return true; + } + return false; +} + /* People celebrate: "We love our President!" */ static bool tcp_try_undo_recovery(struct sock *sk) { @@ -2492,14 +2535,8 @@ static bool tcp_try_undo_recovery(struct sock *sk) } else if (tp->rack.reo_wnd_persist) { tp->rack.reo_wnd_persist--; } - if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { - /* Hold old state until something *above* high_seq - * is ACKed. For Reno it is MUST to prevent false - * fast retransmits (RFC2582). SACK TCP is safe. */ - if (!tcp_any_retrans_done(sk)) - tp->retrans_stamp = 0; + if (tcp_is_non_sack_preventing_reopen(sk)) return true; - } tcp_set_ca_state(sk, TCP_CA_Open); tp->is_sack_reneg = 0; return false; @@ -2535,6 +2572,8 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); inet_csk(sk)->icsk_retransmits = 0; + if (tcp_is_non_sack_preventing_reopen(sk)) + return true; if (frto_undo || tcp_is_sack(tp)) { tcp_set_ca_state(sk, TCP_CA_Open); tp->is_sack_reneg = 0; @@ -2668,12 +2707,15 @@ static void tcp_mtup_probe_success(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); + u64 val; - /* FIXME: breaks with very large cwnd */ tp->prior_ssthresh = tcp_current_ssthresh(sk); - tp->snd_cwnd = tp->snd_cwnd * - tcp_mss_to_mtu(sk, tp->mss_cache) / - icsk->icsk_mtup.probe_size; + + val = (u64)tp->snd_cwnd * tcp_mss_to_mtu(sk, tp->mss_cache); + do_div(val, icsk->icsk_mtup.probe_size); + WARN_ON_ONCE((u32)val != val); + tp->snd_cwnd = max_t(u32, 1U, val); + tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_jiffies32; tp->snd_ssthresh = tcp_current_ssthresh(sk); @@ -2998,7 +3040,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag) { - u32 wlen = sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen * HZ; + u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ; struct tcp_sock *tp = tcp_sk(sk); if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) { @@ -3409,7 +3451,8 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) * new SACK or ECE mark may first advance cwnd here and later reduce * cwnd in tcp_fastretrans_alert() based on more states. */ - if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering) + if (tcp_sk(sk)->reordering > + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering)) return flag & FLAG_FORWARD_PROGRESS; return flag & FLAG_DATA_ACKED; @@ -3521,7 +3564,8 @@ static bool __tcp_oow_rate_limited(struct net *net, int mib_idx, if (*last_oow_ack_time) { s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time); - if (0 <= elapsed && elapsed < net->ipv4.sysctl_tcp_invalid_ratelimit) { + if (0 <= elapsed && + elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) { NET_INC_STATS(net, mib_idx); return true; /* rate-limited: don't send yet! */ } @@ -3568,11 +3612,11 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) /* Then check host-wide RFC 5961 rate limit. */ now = jiffies / HZ; - if (now != challenge_timestamp) { - u32 ack_limit = net->ipv4.sysctl_tcp_challenge_ack_limit; + if (now != READ_ONCE(challenge_timestamp)) { + u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit); u32 half = (ack_limit + 1) >> 1; - challenge_timestamp = now; + WRITE_ONCE(challenge_timestamp, now); WRITE_ONCE(challenge_count, half + prandom_u32_max(ack_limit)); } count = READ_ONCE(challenge_count); @@ -3815,7 +3859,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_process_tlp_ack(sk, ack, flag); if (tcp_ack_is_dubious(sk, flag)) { - if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP))) { + if (!(flag & (FLAG_SND_UNA_ADVANCED | + FLAG_NOT_DUP | FLAG_DSACKING_ACK))) { num_dupack = 1; /* Consider if pure acks were aggregated in tcp_add_backlog() */ if (!(flag & FLAG_DATA)) @@ -4000,7 +4045,7 @@ void tcp_parse_options(const struct net *net, break; case TCPOPT_WINDOW: if (opsize == TCPOLEN_WINDOW && th->syn && - !estab && net->ipv4.sysctl_tcp_window_scaling) { + !estab && READ_ONCE(net->ipv4.sysctl_tcp_window_scaling)) { __u8 snd_wscale = *(__u8 *)ptr; opt_rx->wscale_ok = 1; if (snd_wscale > TCP_MAX_WSCALE) { @@ -4016,7 +4061,7 @@ void tcp_parse_options(const struct net *net, case TCPOPT_TIMESTAMP: if ((opsize == TCPOLEN_TIMESTAMP) && ((estab && opt_rx->tstamp_ok) || - (!estab && net->ipv4.sysctl_tcp_timestamps))) { + (!estab && READ_ONCE(net->ipv4.sysctl_tcp_timestamps)))) { opt_rx->saw_tstamp = 1; opt_rx->rcv_tsval = get_unaligned_be32(ptr); opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); @@ -4024,7 +4069,7 @@ void tcp_parse_options(const struct net *net, break; case TCPOPT_SACK_PERM: if (opsize == TCPOLEN_SACK_PERM && th->syn && - !estab && net->ipv4.sysctl_tcp_sack) { + !estab && READ_ONCE(net->ipv4.sysctl_tcp_sack)) { opt_rx->sack_ok = TCP_SACK_SEEN; tcp_sack_reset(opt_rx); } @@ -4359,7 +4404,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); - if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) { + if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) { int mib_idx; if (before(seq, tp->rcv_nxt)) @@ -4406,7 +4451,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); - if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) { + if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; tcp_rcv_spurious_retrans(sk, skb); @@ -4753,7 +4798,7 @@ coalesce_done: * and trigger fast retransmit. */ if (tcp_is_sack(tp)) - tcp_grow_window(sk, skb); + tcp_grow_window(sk, skb, true); kfree_skb_partial(skb, fragstolen); skb = NULL; goto add_sack; @@ -4841,7 +4886,7 @@ end: * and trigger fast retransmit. */ if (tcp_is_sack(tp)) - tcp_grow_window(sk, skb); + tcp_grow_window(sk, skb, false); skb_condense(skb); skb_set_owner_r(skb, sk); } @@ -5372,7 +5417,17 @@ static void tcp_new_space(struct sock *sk) sk->sk_write_space(sk); } -static void tcp_check_space(struct sock *sk) +/* Caller made space either from: + * 1) Freeing skbs in rtx queues (after tp->snd_una has advanced) + * 2) Sent skbs from output queue (and thus advancing tp->snd_nxt) + * + * We might be able to generate EPOLLOUT to the application if: + * 1) Space consumed in output/rtx queues is below sk->sk_sndbuf/2 + * 2) notsent amount (tp->write_seq - tp->snd_nxt) became + * small enough that tcp_stream_memory_free() decides it + * is time to generate EPOLLOUT. + */ +void tcp_check_space(struct sock *sk) { /* pairs with tcp_poll() */ smp_mb(); @@ -5422,7 +5477,7 @@ send_now: } if (!tcp_is_sack(tp) || - tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr) + tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)) goto send_now; if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) { @@ -5443,11 +5498,12 @@ send_now: if (tp->srtt_us && tp->srtt_us < rtt) rtt = tp->srtt_us; - delay = min_t(unsigned long, sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns, + delay = min_t(unsigned long, + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns), rtt * (NSEC_PER_USEC >> 3)/20); sock_hold(sk); hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay), - sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns, + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns), HRTIMER_MODE_REL_PINNED_SOFT); } @@ -5475,7 +5531,7 @@ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) struct tcp_sock *tp = tcp_sk(sk); u32 ptr = ntohs(th->urg_ptr); - if (ptr && !sock_net(sk)->ipv4.sysctl_tcp_stdurg) + if (ptr && !READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_stdurg)) ptr--; ptr += ntohl(th->seq); @@ -5724,7 +5780,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) trace_tcp_probe(sk, skb); tcp_mstamp_refresh(tp); - if (unlikely(!sk->sk_rx_dst)) + if (unlikely(!rcu_access_pointer(sk->sk_rx_dst))) inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); /* * Header prediction. @@ -6671,11 +6727,14 @@ static bool tcp_syn_flood_action(const struct sock *sk, const char *proto) { struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; const char *msg = "Dropping request"; - bool want_cookie = false; struct net *net = sock_net(sk); + bool want_cookie = false; + u8 syncookies; + + syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies); #ifdef CONFIG_SYN_COOKIES - if (net->ipv4.sysctl_tcp_syncookies) { + if (syncookies) { msg = "Sending cookies"; want_cookie = true; __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); @@ -6683,8 +6742,7 @@ static bool tcp_syn_flood_action(const struct sock *sk, const char *proto) #endif __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); - if (!queue->synflood_warned && - net->ipv4.sysctl_tcp_syncookies != 2 && + if (!queue->synflood_warned && syncookies != 2 && xchg(&queue->synflood_warned, 1) == 0) net_info_ratelimited("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", proto, sk->sk_num, msg); @@ -6733,7 +6791,7 @@ u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops, struct tcp_sock *tp = tcp_sk(sk); u16 mss; - if (sock_net(sk)->ipv4.sysctl_tcp_syncookies != 2 && + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) != 2 && !inet_csk_reqsk_queue_is_full(sk)) return 0; @@ -6767,13 +6825,15 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, bool want_cookie = false; struct dst_entry *dst; struct flowi fl; + u8 syncookies; + + syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies); /* TW buckets are converted to open requests without * limitations, they conserve resources and peer is * evidently real one. */ - if ((net->ipv4.sysctl_tcp_syncookies == 2 || - inet_csk_reqsk_queue_is_full(sk)) && !isn) { + if ((syncookies == 2 || inet_csk_reqsk_queue_is_full(sk)) && !isn) { want_cookie = tcp_syn_flood_action(sk, rsk_ops->slab_name); if (!want_cookie) goto drop; @@ -6827,10 +6887,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, goto drop_and_free; if (!want_cookie && !isn) { + int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog); + /* Kill the following clause, if you dislike this way. */ - if (!net->ipv4.sysctl_tcp_syncookies && - (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < - (net->ipv4.sysctl_max_syn_backlog >> 2)) && + if (!syncookies && + (max_syn_backlog - inet_csk_reqsk_queue_len(sk) < + (max_syn_backlog >> 2)) && !tcp_peer_is_proven(req, dst)) { /* Without syncookies last quarter of * backlog is filled with destinations, diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 017cd666387f..8bd7b1ec3b6a 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -106,10 +106,10 @@ static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb) int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) { + int reuse = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse); const struct inet_timewait_sock *tw = inet_twsk(sktw); const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw); struct tcp_sock *tp = tcp_sk(sk); - int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse; if (reuse == 2) { /* Still does not detect *everything* that goes through @@ -322,6 +322,8 @@ failure: * if necessary. */ tcp_set_state(sk, TCP_CLOSE); + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) + inet_reset_saddr(sk); ip_rt_put(rt); sk->sk_route_caps = 0; inet->inet_dport = 0; @@ -983,7 +985,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, if (skb) { __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); - tos = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ? + tos = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ? (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) | (inet_sk(sk)->tos & INET_ECN_MASK) : inet_sk(sk)->tos; @@ -1558,7 +1560,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, /* Set ToS of the new socket based upon the value of incoming SYN. * ECT bits are set later in tcp_init_transfer(). */ - if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)) newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK; if (!dst) { @@ -1670,15 +1672,18 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) struct sock *rsk; if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ - struct dst_entry *dst = sk->sk_rx_dst; + struct dst_entry *dst; + + dst = rcu_dereference_protected(sk->sk_rx_dst, + lockdep_sock_is_held(sk)); sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || !dst->ops->check(dst, 0)) { + RCU_INIT_POINTER(sk->sk_rx_dst, NULL); dst_release(dst); - sk->sk_rx_dst = NULL; } } tcp_rcv_established(sk, skb); @@ -1753,7 +1758,7 @@ int tcp_v4_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { - struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); + struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); @@ -1767,8 +1772,7 @@ int tcp_v4_early_demux(struct sk_buff *skb) bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) { - u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf); - u32 tail_gso_size, tail_gso_segs; + u32 limit, tail_gso_size, tail_gso_segs; struct skb_shared_info *shinfo; const struct tcphdr *th; struct tcphdr *thtail; @@ -1871,11 +1875,13 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) __skb_push(skb, hdrlen); no_coalesce: + limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1); + /* Only socket owner can try to collapse/prune rx queues * to reduce memory overhead, so add a little headroom here. * Few sockets backlog are possibly concurrently non empty. */ - limit += 64*1024; + limit += 64 * 1024; if (unlikely(sk_add_backlog(sk, skb, limit))) { bh_unlock_sock(sk); @@ -1980,7 +1986,8 @@ process: struct sock *nsk; sk = req->rsk_listener; - if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) { + if (unlikely(!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb) || + tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) { sk_drops_add(sk, skb); reqsk_put(req); goto discard_it; @@ -2019,6 +2026,7 @@ process: } goto discard_and_relse; } + nf_reset_ct(skb); if (nsk == sk) { reqsk_put(req); tcp_v4_restore_cb(skb); @@ -2160,7 +2168,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) struct dst_entry *dst = skb_dst(skb); if (dst && dst_hold_safe(dst)) { - sk->sk_rx_dst = dst; + rcu_assign_pointer(sk->sk_rx_dst, dst); inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; } } diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 6b27c481fe18..f3ca6eea2ca3 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -329,7 +329,7 @@ void tcp_update_metrics(struct sock *sk) int m; sk_dst_confirm(sk); - if (net->ipv4.sysctl_tcp_nometrics_save || !dst) + if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst) return; rcu_read_lock(); @@ -385,7 +385,7 @@ void tcp_update_metrics(struct sock *sk) if (tcp_in_initial_slowstart(tp)) { /* Slow start still did not finish. */ - if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save && + if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) && !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) { val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); if (val && (tp->snd_cwnd >> 1) > val) @@ -401,7 +401,7 @@ void tcp_update_metrics(struct sock *sk) } else if (!tcp_in_slow_start(tp) && icsk->icsk_ca_state == TCP_CA_Open) { /* Cong. avoidance phase, cwnd is reliable. */ - if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save && + if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) && !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) tcp_metric_set(tm, TCP_METRIC_SSTHRESH, max(tp->snd_cwnd >> 1, tp->snd_ssthresh)); @@ -418,7 +418,7 @@ void tcp_update_metrics(struct sock *sk) tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_ssthresh) >> 1); } - if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save && + if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) && !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) { val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); if (val && tp->snd_ssthresh > val) @@ -428,7 +428,8 @@ void tcp_update_metrics(struct sock *sk) if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) { val = tcp_metric_get(tm, TCP_METRIC_REORDERING); if (val < tp->reordering && - tp->reordering != net->ipv4.sysctl_tcp_reordering) + tp->reordering != + READ_ONCE(net->ipv4.sysctl_tcp_reordering)) tcp_metric_set(tm, TCP_METRIC_REORDERING, tp->reordering); } @@ -462,7 +463,7 @@ void tcp_init_metrics(struct sock *sk) if (tcp_metric_locked(tm, TCP_METRIC_CWND)) tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND); - val = net->ipv4.sysctl_tcp_no_ssthresh_metrics_save ? + val = READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) ? 0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH); if (val) { tp->snd_ssthresh = val; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index f0f67b25c97a..e42312321191 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -180,7 +180,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, * Oh well... nobody has a sufficient solution to this * protocol bug yet. */ - if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) { + if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) { kill: inet_twsk_deschedule_put(tw); return TCP_TW_SUCCESS; @@ -538,7 +538,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, newtp->tsoffset = treq->ts_off; #ifdef CONFIG_TCP_MD5SIG newtp->md5sig_info = NULL; /*XXX*/ - if (newtp->af_specific->md5_lookup(sk, newsk)) + if (treq->af_specific->req_md5_lookup(sk, req_to_sk(req))) newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; #endif if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7fce54a29802..e5f8c8d6b985 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -82,6 +82,7 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, tcp_skb_pcount(skb)); + tcp_check_space(sk); } /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one @@ -166,16 +167,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp, if (tcp_packets_in_flight(tp) == 0) tcp_ca_event(sk, CA_EVENT_TX_START); - /* If this is the first data packet sent in response to the - * previous received data, - * and it is a reply for ato after last received packet, - * increase pingpong count. - */ - if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) && - (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) - inet_csk_inc_pingpong_cnt(sk); - tp->lsndtime = now; + + /* If it is a reply for ato after last received + * packet, enter pingpong mode. + */ + if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) + inet_csk_enter_pingpong_mode(sk); } /* Account for an ACK we sent. */ @@ -240,8 +238,8 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss, *rcv_wscale = 0; if (wscale_ok) { /* Set window scaling on max possible window */ - space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); - space = max_t(u32, space, sysctl_rmem_max); + space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); + space = max_t(u32, space, READ_ONCE(sysctl_rmem_max)); space = min_t(u32, space, *window_clamp); *rcv_wscale = clamp_t(int, ilog2(space) - 15, 0, TCP_MAX_WSCALE); @@ -788,18 +786,18 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, opts->mss = tcp_advertise_mss(sk); remaining -= TCPOLEN_MSS_ALIGNED; - if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) { + if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps) && !*md5)) { opts->options |= OPTION_TS; opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; opts->tsecr = tp->rx_opt.ts_recent; remaining -= TCPOLEN_TSTAMP_ALIGNED; } - if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) { + if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) { opts->ws = tp->rx_opt.rcv_wscale; opts->options |= OPTION_WSCALE; remaining -= TCPOLEN_WSCALE_ALIGNED; } - if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) { + if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) { opts->options |= OPTION_SACK_ADVERTISE; if (unlikely(!(OPTION_TS & opts->options))) remaining -= TCPOLEN_SACKPERM_ALIGNED; @@ -1719,7 +1717,8 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) mss_now -= icsk->icsk_ext_hdr_len; /* Then reserve room for full set of TCP options and 8 bytes of data */ - mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss); + mss_now = max(mss_now, + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss)); return mss_now; } @@ -1762,10 +1761,10 @@ void tcp_mtup_init(struct sock *sk) struct inet_connection_sock *icsk = inet_csk(sk); struct net *net = sock_net(sk); - icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1; + icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1; icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + icsk->icsk_af_ops->net_header_len; - icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); + icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss)); icsk->icsk_mtup.probe_size = 0; if (icsk->icsk_mtup.enabled) icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; @@ -1877,15 +1876,20 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; struct tcp_sock *tp = tcp_sk(sk); - /* Track the maximum number of outstanding packets in each - * window, and remember whether we were cwnd-limited then. + /* Track the strongest available signal of the degree to which the cwnd + * is fully utilized. If cwnd-limited then remember that fact for the + * current window. If not cwnd-limited then track the maximum number of + * outstanding packets in the current window. (If cwnd-limited then we + * chose to not update tp->max_packets_out to avoid an extra else + * clause with no functional impact.) */ - if (!before(tp->snd_una, tp->max_packets_seq) || - tp->packets_out > tp->max_packets_out || - is_cwnd_limited) { - tp->max_packets_out = tp->packets_out; - tp->max_packets_seq = tp->snd_nxt; + if (!before(tp->snd_una, tp->cwnd_usage_seq) || + is_cwnd_limited || + (!tp->is_cwnd_limited && + tp->packets_out > tp->max_packets_out)) { tp->is_cwnd_limited = is_cwnd_limited; + tp->max_packets_out = tp->packets_out; + tp->cwnd_usage_seq = tp->snd_nxt; } if (tcp_is_cwnd_limited(sk)) { @@ -1897,7 +1901,7 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) if (tp->packets_out > tp->snd_cwnd_used) tp->snd_cwnd_used = tp->packets_out; - if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle && + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) && (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && !ca_ops->cong_control) tcp_cwnd_application_limited(sk); @@ -1985,7 +1989,7 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now) min_tso = ca_ops->min_tso_segs ? ca_ops->min_tso_segs(sk) : - sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs; + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs); tso_segs = tcp_tso_autosize(sk, mss_now, min_tso); return min_t(u32, tso_segs, sk->sk_gso_max_segs); @@ -2276,7 +2280,7 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk) u32 interval; s32 delta; - interval = net->ipv4.sysctl_tcp_probe_interval; + interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval); delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp; if (unlikely(delta >= interval * HZ)) { int mss = tcp_current_mss(sk); @@ -2358,7 +2362,7 @@ static int tcp_mtu_probe(struct sock *sk) * probing process by not resetting search range to its orignal. */ if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || - interval < net->ipv4.sysctl_tcp_probe_threshold) { + interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) { /* Check whether enough time has elaplased for * another round of probing. */ @@ -2500,7 +2504,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift)); if (sk->sk_pacing_status == SK_PACING_NONE) limit = min_t(unsigned long, limit, - sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes); + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes)); limit <<= factor; if (static_branch_unlikely(&tcp_tx_delay_enabled) && @@ -2733,7 +2737,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) if (rcu_access_pointer(tp->fastopen_rsk)) return false; - early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans; + early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans); /* Schedule a loss probe in 2*RTT for SACK capable connections * not in loss recovery, that are either limited by cwnd or application. */ @@ -3098,7 +3102,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, struct sk_buff *skb = to, *tmp; bool first = true; - if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse) + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)) return; if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) return; @@ -3138,7 +3142,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) struct tcp_sock *tp = tcp_sk(sk); unsigned int cur_mss; int diff, len, err; - + int avail_wnd; /* Inconclusive MTU probe */ if (icsk->icsk_mtup.probe_size) @@ -3168,17 +3172,25 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) return -EHOSTUNREACH; /* Routing failure or similar. */ cur_mss = tcp_current_mss(sk); + avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; /* If receiver has shrunk his window, and skb is out of * new window, do not retransmit it. The exception is the * case, when window is shrunk to zero. In this case - * our retransmit serves as a zero window probe. + * our retransmit of one segment serves as a zero window probe. */ - if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && - TCP_SKB_CB(skb)->seq != tp->snd_una) - return -EAGAIN; + if (avail_wnd <= 0) { + if (TCP_SKB_CB(skb)->seq != tp->snd_una) + return -EAGAIN; + avail_wnd = cur_mss; + } len = cur_mss * segs; + if (len > avail_wnd) { + len = rounddown(avail_wnd, cur_mss); + if (!len) + len = avail_wnd; + } if (skb->len > len) { if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len, cur_mss, GFP_ATOMIC)) @@ -3192,8 +3204,9 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) diff -= tcp_skb_pcount(skb); if (diff) tcp_adjust_pcount(sk, skb, diff); - if (skb->len < cur_mss) - tcp_retrans_try_collapse(sk, skb, cur_mss); + avail_wnd = min_t(int, avail_wnd, cur_mss); + if (skb->len < avail_wnd) + tcp_retrans_try_collapse(sk, skb, avail_wnd); } /* RFC3168, section 6.1.1.1. ECN fallback */ @@ -3364,11 +3377,12 @@ void tcp_xmit_retransmit_queue(struct sock *sk) */ void sk_forced_mem_schedule(struct sock *sk, int size) { - int amt; + int delta, amt; - if (size <= sk->sk_forward_alloc) + delta = size - sk->sk_forward_alloc; + if (delta <= 0) return; - amt = sk_mem_pages(size); + amt = sk_mem_pages(delta); sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; sk_memory_allocated_add(sk, amt); @@ -3646,7 +3660,7 @@ static void tcp_connect_init(struct sock *sk) * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. */ tp->tcp_header_len = sizeof(struct tcphdr); - if (sock_net(sk)->ipv4.sysctl_tcp_timestamps) + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps)) tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; #ifdef CONFIG_TCP_MD5SIG @@ -3682,7 +3696,7 @@ static void tcp_connect_init(struct sock *sk) tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), &tp->rcv_wnd, &tp->window_clamp, - sock_net(sk)->ipv4.sysctl_tcp_window_scaling, + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling), &rcv_wscale, rcv_wnd); @@ -4090,7 +4104,7 @@ void tcp_send_probe0(struct sock *sk) icsk->icsk_probes_out++; if (err <= 0) { - if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2) + if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2)) icsk->icsk_backoff++; timeout = tcp_probe0_when(sk, TCP_RTO_MAX); } else { @@ -4114,8 +4128,8 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL, NULL); if (!res) { - __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); + TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); if (unlikely(tcp_passive_fastopen(sk))) tcp_sk(sk)->total_retrans++; trace_tcp_retransmit_synack(sk, req); diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index 31fc178f42c0..21fc9859d421 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -19,7 +19,8 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk) return 0; if (tp->sacked_out >= tp->reordering && - !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH)) + !(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & + TCP_RACK_NO_DUPTHRESH)) return 0; } @@ -190,7 +191,8 @@ void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs) { struct tcp_sock *tp = tcp_sk(sk); - if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND || + if ((READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & + TCP_RACK_STATIC_REO_WND) || !rs->prior_delivered) return; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 4ef08079ccfa..888683f2ff3e 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -143,7 +143,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset) */ static int tcp_orphan_retries(struct sock *sk, bool alive) { - int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */ + int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */ /* We know from an ICMP that something is wrong. */ if (sk->sk_err_soft && !alive) @@ -163,7 +163,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) int mss; /* Black hole detection */ - if (!net->ipv4.sysctl_tcp_mtu_probing) + if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing)) return; if (!icsk->icsk_mtup.enabled) { @@ -171,9 +171,9 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; } else { mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; - mss = min(net->ipv4.sysctl_tcp_base_mss, mss); - mss = max(mss, net->ipv4.sysctl_tcp_mtu_probe_floor); - mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss); + mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss); + mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor)); + mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss)); icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); } tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); @@ -242,14 +242,14 @@ static int tcp_write_timeout(struct sock *sk) retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; expired = icsk->icsk_retransmits >= retry_until; } else { - if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) { + if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) { /* Black hole detection */ tcp_mtu_probing(icsk, sk); __dst_negative_advice(sk); } - retry_until = net->ipv4.sysctl_tcp_retries2; + retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2); if (sock_flag(sk, SOCK_DEAD)) { const bool alive = icsk->icsk_rto < TCP_RTO_MAX; @@ -380,7 +380,7 @@ static void tcp_probe_timer(struct sock *sk) msecs_to_jiffies(icsk->icsk_user_timeout)) goto abort; - max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2; + max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); if (sock_flag(sk, SOCK_DEAD)) { const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; @@ -574,7 +574,7 @@ out_reset_timer: * linear-timeout retransmissions into a black hole */ if (sk->sk_state == TCP_ESTABLISHED && - (tp->thin_lto || net->ipv4.sysctl_tcp_thin_linear_timeouts) && + (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) && tcp_stream_is_thin(tp) && icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { icsk->icsk_backoff = 0; @@ -585,7 +585,7 @@ out_reset_timer: } inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX); - if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0)) + if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0)) __sk_dst_reset(sk); out:; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index da857876e321..3c1283c478d9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -447,7 +447,7 @@ static struct sock *udp4_lib_lookup2(struct net *net, result = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); /* Fall back to scoring if group has connections */ - if (result && !reuseport_has_conns(sk, false)) + if (result && !reuseport_has_conns(sk)) return result; result = result ? : sk; @@ -1206,7 +1206,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) faddr, saddr, dport, inet->inet_sport, sk->sk_uid); - security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); + security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); rt = ip_route_output_flow(net, fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); @@ -2196,7 +2196,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) struct dst_entry *old; if (dst_hold_safe(dst)) { - old = xchg(&sk->sk_rx_dst, dst); + old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst); dst_release(old); return old != dst; } @@ -2386,7 +2386,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, struct dst_entry *dst = skb_dst(skb); int ret; - if (unlikely(sk->sk_rx_dst != dst)) + if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) udp_sk_rx_dst_set(sk, dst); ret = udp_unicast_rcv_skb(sk, skb, uh); @@ -2493,8 +2493,7 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net, struct sock *sk; udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { - if (INET_MATCH(sk, net, acookie, rmt_addr, - loc_addr, ports, dif, sdif)) + if (INET_MATCH(net, sk, acookie, ports, dif, sdif)) return sk; /* Only check first socket in chain */ break; @@ -2545,7 +2544,7 @@ int udp_v4_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_efree; - dst = READ_ONCE(sk->sk_rx_dst); + dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c index ea595c8549c7..cfd46222ef91 100644 --- a/net/ipv4/xfrm4_protocol.c +++ b/net/ipv4/xfrm4_protocol.c @@ -307,4 +307,3 @@ void __init xfrm4_protocol_init(void) { xfrm_input_register_afinfo(&xfrm4_input_afinfo); } -EXPORT_SYMBOL(xfrm4_protocol_init); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ebf4866e10bc..be75bf8b98c2 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -546,7 +546,7 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex, #ifdef CONFIG_IPV6_MROUTE if ((all || type == NETCONFA_MC_FORWARDING) && nla_put_s32(skb, NETCONFA_MC_FORWARDING, - devconf->mc_forwarding) < 0) + atomic_read(&devconf->mc_forwarding)) < 0) goto nla_put_failure; #endif if ((all || type == NETCONFA_PROXY_NEIGH) && @@ -793,6 +793,7 @@ static void dev_forward_change(struct inet6_dev *idev) { struct net_device *dev; struct inet6_ifaddr *ifa; + LIST_HEAD(tmp_addr_list); if (!idev) return; @@ -811,14 +812,24 @@ static void dev_forward_change(struct inet6_dev *idev) } } + read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { if (ifa->flags&IFA_F_TENTATIVE) continue; + list_add_tail(&ifa->if_list_aux, &tmp_addr_list); + } + read_unlock_bh(&idev->lock); + + while (!list_empty(&tmp_addr_list)) { + ifa = list_first_entry(&tmp_addr_list, + struct inet6_ifaddr, if_list_aux); + list_del(&ifa->if_list_aux); if (idev->cnf.forwarding) addrconf_join_anycast(ifa); else addrconf_leave_anycast(ifa); } + inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, NETCONFA_FORWARDING, dev->ifindex, &idev->cnf); @@ -1095,10 +1106,6 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg, goto out; } - if (net->ipv6.devconf_all->disable_policy || - idev->cnf.disable_policy) - f6i->dst_nopolicy = true; - neigh_parms_data_state_setall(idev->nd_parms); ifa->addr = *cfg->pfx; @@ -3739,7 +3746,8 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister) unsigned long event = unregister ? NETDEV_UNREGISTER : NETDEV_DOWN; struct net *net = dev_net(dev); struct inet6_dev *idev; - struct inet6_ifaddr *ifa, *tmp; + struct inet6_ifaddr *ifa; + LIST_HEAD(tmp_addr_list); bool keep_addr = false; bool was_ready; int state, i; @@ -3831,16 +3839,23 @@ restart: write_lock_bh(&idev->lock); } - list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { + list_for_each_entry(ifa, &idev->addr_list, if_list) + list_add_tail(&ifa->if_list_aux, &tmp_addr_list); + write_unlock_bh(&idev->lock); + + while (!list_empty(&tmp_addr_list)) { struct fib6_info *rt = NULL; bool keep; + ifa = list_first_entry(&tmp_addr_list, + struct inet6_ifaddr, if_list_aux); + list_del(&ifa->if_list_aux); + addrconf_del_dad_work(ifa); keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) && !addr_is_local(&ifa->addr); - write_unlock_bh(&idev->lock); spin_lock_bh(&ifa->lock); if (keep) { @@ -3871,15 +3886,14 @@ restart: addrconf_leave_solict(ifa->idev, &ifa->addr); } - write_lock_bh(&idev->lock); if (!keep) { + write_lock_bh(&idev->lock); list_del_rcu(&ifa->if_list); + write_unlock_bh(&idev->lock); in6_ifa_put(ifa); } } - write_unlock_bh(&idev->lock); - /* Step 5: Discard anycast and multicast list */ if (unregister) { ipv6_ac_destroy_dev(idev); @@ -4211,7 +4225,8 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, send_rs = send_mld && ipv6_accept_ra(ifp->idev) && ifp->idev->cnf.rtr_solicits != 0 && - (dev->flags&IFF_LOOPBACK) == 0; + (dev->flags & IFF_LOOPBACK) == 0 && + (dev->type != ARPHRD_TUNNEL); read_unlock_bh(&ifp->idev->lock); /* While dad is in progress mld report's source address is in6_addrany. @@ -5544,7 +5559,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic; #endif #ifdef CONFIG_IPV6_MROUTE - array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding; + array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding); #endif array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6; array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad; @@ -7063,9 +7078,8 @@ static int __net_init addrconf_init_net(struct net *net) if (!dflt) goto err_alloc_dflt; - if (IS_ENABLED(CONFIG_SYSCTL) && - !net_eq(net, &init_net)) { - switch (sysctl_devconf_inherit_init_net) { + if (!net_eq(net, &init_net)) { + switch (net_inherit_devconf()) { case 1: /* copy from init_net */ memcpy(all, init_net.ipv6.devconf_all, sizeof(ipv6_devconf)); diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index 8a22486cf270..17ac45aa7194 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -437,6 +437,7 @@ static void ip6addrlbl_putmsg(struct nlmsghdr *nlh, { struct ifaddrlblmsg *ifal = nlmsg_data(nlh); ifal->ifal_family = AF_INET6; + ifal->__ifal_reserved = 0; ifal->ifal_prefixlen = prefixlen; ifal->ifal_flags = 0; ifal->ifal_index = ifindex; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index c6db41b5063f..bf3dfe2d12cb 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -225,7 +225,7 @@ lookup_protocol: inet->mc_list = NULL; inet->rcv_tos = 0; - if (net->ipv4.sysctl_ip_no_pmtu_disc) + if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) inet->pmtudisc = IP_PMTUDISC_DONT; else inet->pmtudisc = IP_PMTUDISC_WANT; @@ -663,7 +663,6 @@ int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, const struct proto_ops inet6_stream_ops = { .family = PF_INET6, - .flags = PROTO_CMSG_DATA_ONLY, .owner = THIS_MODULE, .release = inet6_release, .bind = inet6_bind, @@ -821,7 +820,7 @@ int inet6_sk_rebuild_header(struct sock *sk) fl6.fl6_dport = inet->inet_dport; fl6.fl6_sport = inet->inet_sport; fl6.flowi6_uid = sk->sk_uid; - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); rcu_read_lock(); final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index cc8ad7ddecda..f4559e5bc84b 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -60,7 +60,7 @@ static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk) if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) fl6->flowi6_oif = np->mcast_oif; - security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); + security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); } int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr) @@ -256,7 +256,7 @@ ipv4_connected: goto out; } - reuseport_has_conns(sk, true); + reuseport_has_conns_set(sk); sk->sk_state = TCP_ESTABLISHED; sk_set_txhash(sk); out: diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 61c913b75c9e..13d5b2c872f4 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -483,7 +483,6 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info struct page *page; struct sk_buff *trailer; int tailen = esp->tailen; - unsigned int allocsz; if (x->encap) { int err = esp6_output_encap(x, skb, esp); @@ -492,8 +491,8 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info return err; } - allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES); - if (allocsz > ESP_SKB_FRAG_MAXSIZE) + if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE || + ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE) goto cow; if (!skb_cloned(skb)) { diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 1c3f02d05d2b..7608be04d0f5 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -343,6 +343,9 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features xo->seq.low += skb_shinfo(skb)->gso_segs; } + if (xo->seq.low < seq) + xo->seq.hi++; + esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); len = skb->len - sizeof(struct ipv6hdr); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index cbab41d557b2..fd1f896115c1 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -573,7 +573,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, fl6.fl6_icmp_code = code; fl6.flowi6_uid = sock_net_uid(net, NULL); fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, NULL); - security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); + security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6)); np = inet6_sk(sk); @@ -755,7 +755,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY; fl6.flowi6_mark = mark; fl6.flowi6_uid = sock_net_uid(net, NULL); - security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); + security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6)); local_bh_disable(); sk = icmpv6_xmit_lock(net); @@ -1008,7 +1008,7 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6, fl6->fl6_icmp_type = type; fl6->fl6_icmp_code = 0; fl6->flowi6_oif = oif; - security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); + security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); } static void __net_exit icmpv6_sk_exit(struct net *net) diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index e315526fa244..5a9f4d722f35 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -46,7 +46,7 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk, fl6->fl6_dport = ireq->ir_rmt_port; fl6->fl6_sport = htons(ireq->ir_num); fl6->flowi6_uid = sk->sk_uid; - security_req_classify_flow(req, flowi6_to_flowi(fl6)); + security_req_classify_flow(req, flowi6_to_flowi_common(fl6)); dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (IS_ERR(dst)) @@ -95,7 +95,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk, fl6->fl6_sport = inet->inet_sport; fl6->fl6_dport = inet->inet_dport; fl6->flowi6_uid = sk->sk_uid; - security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); + security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); rcu_read_lock(); final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 67c9114835c8..b4a5e01e1201 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -71,12 +71,12 @@ begin: sk_nulls_for_each_rcu(sk, node, &head->chain) { if (sk->sk_hash != hash) continue; - if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif, sdif)) + if (!inet6_match(net, sk, saddr, daddr, ports, dif, sdif)) continue; if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) goto out; - if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif, sdif))) { + if (unlikely(!inet6_match(net, sk, saddr, daddr, ports, dif, sdif))) { sock_gen_put(sk); goto begin; } @@ -269,7 +269,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, if (sk2->sk_hash != hash) continue; - if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, + if (likely(inet6_match(net, sk2, saddr, daddr, ports, dif, sdif))) { if (sk2->sk_state == TCP_TIME_WAIT) { tw = inet_twsk(sk2); @@ -308,7 +308,7 @@ not_unique: return -EADDRNOTAVAIL; } -static u32 inet6_sk_port_offset(const struct sock *sk) +static u64 inet6_sk_port_offset(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); @@ -320,7 +320,7 @@ static u32 inet6_sk_port_offset(const struct sock *sk) int inet6_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { - u32 port_offset = 0; + u64 port_offset = 0; if (!inet_sk(sk)->inet_num) port_offset = inet6_sk_port_offset(sk); @@ -333,11 +333,8 @@ int inet6_hash(struct sock *sk) { int err = 0; - if (sk->sk_state != TCP_CLOSE) { - local_bh_disable(); + if (sk->sk_state != TCP_CLOSE) err = __inet_hash(sk, NULL); - local_bh_enable(); - } return err; } diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 9a0263f25232..0010f9e54f13 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -724,6 +724,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb, { struct ip6_tnl *tunnel = netdev_priv(dev); __be16 protocol; + __be16 flags; if (dev->type == ARPHRD_ETHER) IPCB(skb)->flags = 0; @@ -733,16 +734,13 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb, else fl6->daddr = tunnel->parms.raddr; - if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen)) - return -ENOMEM; - /* Push GRE header. */ protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto; if (tunnel->parms.collect_md) { struct ip_tunnel_info *tun_info; const struct ip_tunnel_key *key; - __be16 flags; + int tun_hlen; tun_info = skb_tunnel_info_txcheck(skb); if (IS_ERR(tun_info) || @@ -760,21 +758,27 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb, dsfield = key->tos; flags = key->tun_flags & (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ); - tunnel->tun_hlen = gre_calc_hlen(flags); + tun_hlen = gre_calc_hlen(flags); - gre_build_header(skb, tunnel->tun_hlen, + if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen)) + return -ENOMEM; + + gre_build_header(skb, tun_hlen, flags, protocol, tunnel_id_to_key32(tun_info->key.tun_id), - (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) + (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0); } else { - if (tunnel->parms.o_flags & TUNNEL_SEQ) - tunnel->o_seqno++; + if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen)) + return -ENOMEM; - gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, + flags = tunnel->parms.o_flags; + + gre_build_header(skb, tunnel->tun_hlen, flags, protocol, tunnel->parms.o_key, - htonl(tunnel->o_seqno)); + (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) + : 0); } return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, @@ -940,7 +944,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, __be16 proto; __u32 mtu; int nhoff; - int thoff; if (!pskb_inet_may_pull(skb)) goto tx_err; @@ -961,10 +964,16 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff)) truncate = true; - thoff = skb_transport_header(skb) - skb_mac_header(skb); - if (skb->protocol == htons(ETH_P_IPV6) && - (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)) - truncate = true; + if (skb->protocol == htons(ETH_P_IPV6)) { + int thoff; + + if (skb_transport_header_was_set(skb)) + thoff = skb_transport_header(skb) - skb_mac_header(skb); + else + thoff = nhoff + sizeof(struct ipv6hdr); + if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff) + truncate = true; + } if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen)) goto tx_err; @@ -1052,7 +1061,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, /* Push GRE header. */ proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN) : htons(ETH_P_ERSPAN2); - gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++)); + gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno))); /* TooBig packet may have updated dst->dev's mtu */ if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) @@ -1144,14 +1153,16 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu, dev->needed_headroom = dst_len; if (set_mtu) { - dev->mtu = rt->dst.dev->mtu - t_hlen; - if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) - dev->mtu -= 8; - if (dev->type == ARPHRD_ETHER) - dev->mtu -= ETH_HLEN; + int mtu = rt->dst.dev->mtu - t_hlen; - if (dev->mtu < IPV6_MIN_MTU) - dev->mtu = IPV6_MIN_MTU; + if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) + mtu -= 8; + if (dev->type == ARPHRD_ETHER) + mtu -= ETH_HLEN; + + if (mtu < IPV6_MIN_MTU) + mtu = IPV6_MIN_MTU; + WRITE_ONCE(dev->mtu, mtu); } } ip6_rt_put(rt); diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 06d60662717d..4eb9fbfdce33 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -44,21 +44,25 @@ #include #include -INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *)); -INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *)); +void udp_v6_early_demux(struct sk_buff *); +void tcp_v6_early_demux(struct sk_buff *); static void ip6_rcv_finish_core(struct net *net, struct sock *sk, struct sk_buff *skb) { - void (*edemux)(struct sk_buff *skb); - - if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { - const struct inet6_protocol *ipprot; - - ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]); - if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) - INDIRECT_CALL_2(edemux, tcp_v6_early_demux, - udp_v6_early_demux, skb); + if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) && + !skb_dst(skb) && !skb->sk) { + switch (ipv6_hdr(skb)->nexthdr) { + case IPPROTO_TCP: + if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) + tcp_v6_early_demux(skb); + break; + case IPPROTO_UDP: + if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) + udp_v6_early_demux(skb); + break; + } } + if (!skb_valid_dst(skb)) ip6_route_input(skb); } @@ -509,7 +513,7 @@ int ip6_mc_input(struct sk_buff *skb) /* * IPv6 multicast router mode is now supported ;) */ - if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && + if (atomic_read(&dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding) && !(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) && likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 2aa39ce7093d..e427f5040a08 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -508,7 +508,7 @@ int ip6_forward(struct sk_buff *skb) goto drop; if (!net->ipv6.devconf_all->disable_policy && - !idev->cnf.disable_policy && + (!idev || !idev->cnf.disable_policy) && !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); goto drop; @@ -919,6 +919,9 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, if (err < 0) goto fail; + /* We prevent @rt from being freed. */ + rcu_read_lock(); + for (;;) { /* Prepare header of the next frame, * before previous one went down. */ @@ -942,6 +945,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, if (err == 0) { IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGOKS); + rcu_read_unlock(); return 0; } @@ -949,6 +953,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGFAILS); + rcu_read_unlock(); return err; slow_path_clean: @@ -1313,8 +1318,7 @@ struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb, fl6.daddr = info->key.u.ipv6.dst; fl6.saddr = info->key.u.ipv6.src; prio = info->key.tos; - fl6.flowlabel = ip6_make_flowinfo(RT_TOS(prio), - info->key.label); + fl6.flowlabel = ip6_make_flowinfo(prio, info->key.label); dst = ipv6_stub->ipv6_dst_lookup_flow(net, sock->sk, &fl6, NULL); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 3a2741569b84..0d4cab94c5dd 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1476,8 +1476,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) struct net_device *tdev = NULL; struct __ip6_tnl_parm *p = &t->parms; struct flowi6 *fl6 = &t->fl.u.ip6; - unsigned int mtu; int t_hlen; + int mtu; memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); @@ -1524,12 +1524,13 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) dev->hard_header_len = tdev->hard_header_len + t_hlen; mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU); - dev->mtu = mtu - t_hlen; + mtu = mtu - t_hlen; if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) - dev->mtu -= 8; + mtu -= 8; - if (dev->mtu < IPV6_MIN_MTU) - dev->mtu = IPV6_MIN_MTU; + if (mtu < IPV6_MIN_MTU) + mtu = IPV6_MIN_MTU; + WRITE_ONCE(dev->mtu, mtu); } } } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 41cb348a7c3c..5f0ac47acc74 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -740,7 +740,7 @@ static int mif6_delete(struct mr_table *mrt, int vifi, int notify, in6_dev = __in6_dev_get(dev); if (in6_dev) { - in6_dev->cnf.mc_forwarding--; + atomic_dec(&in6_dev->cnf.mc_forwarding); inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, NETCONFA_MC_FORWARDING, dev->ifindex, &in6_dev->cnf); @@ -908,7 +908,7 @@ static int mif6_add(struct net *net, struct mr_table *mrt, in6_dev = __in6_dev_get(dev); if (in6_dev) { - in6_dev->cnf.mc_forwarding++; + atomic_inc(&in6_dev->cnf.mc_forwarding); inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, NETCONFA_MC_FORWARDING, dev->ifindex, &in6_dev->cnf); @@ -1558,7 +1558,7 @@ static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk) } else { rcu_assign_pointer(mrt->mroute_sk, sk); sock_set_flag(sk, SOCK_RCU_FREE); - net->ipv6.devconf_all->mc_forwarding++; + atomic_inc(&net->ipv6.devconf_all->mc_forwarding); } write_unlock_bh(&mrt_lock); @@ -1591,7 +1591,7 @@ int ip6mr_sk_done(struct sock *sk) * so the RCU grace period before sk freeing * is guaranteed by sk_destruct() */ - net->ipv6.devconf_all->mc_forwarding--; + atomic_dec(&net->ipv6.devconf_all->mc_forwarding); write_unlock_bh(&mrt_lock); inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING, diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 43a894bf9a1b..2017257cb278 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -208,7 +208,7 @@ static int ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval, if (optlen < GROUP_FILTER_SIZE(0)) return -EINVAL; - if (optlen > sysctl_optmem_max) + if (optlen > READ_ONCE(sysctl_optmem_max)) return -ENOBUFS; gsf = memdup_sockptr(optval, optlen); @@ -242,7 +242,7 @@ static int compat_ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval, if (optlen < size0) return -EINVAL; - if (optlen > sysctl_optmem_max - 4) + if (optlen > READ_ONCE(sysctl_optmem_max) - 4) return -ENOBUFS; p = kmalloc(optlen + 4, GFP_KERNEL); @@ -417,6 +417,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, rtnl_lock(); lock_sock(sk); + /* Another thread has converted the socket into IPv4 with + * IPV6_ADDRFORM concurrently. + */ + if (unlikely(sk->sk_family != AF_INET6)) + goto unlock; + switch (optname) { case IPV6_ADDRFORM: @@ -976,6 +982,7 @@ done: break; } +unlock: release_sock(sk); if (needs_rtnl) rtnl_unlock(); diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index 4aef6baaa55e..bf95513736c9 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c @@ -179,7 +179,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev); fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark); - security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); + security_skb_classify_flow(oldskb, flowi6_to_flowi_common(&fl6)); dst = ip6_route_output(net, NULL, &fl6); if (dst->error) { dst_release(dst); diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c index 8b5193efb1f1..3a00d95e964e 100644 --- a/net/ipv6/netfilter/nft_dup_ipv6.c +++ b/net/ipv6/netfilter/nft_dup_ipv6.c @@ -13,8 +13,8 @@ #include struct nft_dup_ipv6 { - enum nft_registers sreg_addr:8; - enum nft_registers sreg_dev:8; + u8 sreg_addr; + u8 sreg_dev; }; static void nft_dup_ipv6_eval(const struct nft_expr *expr, @@ -38,16 +38,16 @@ static int nft_dup_ipv6_init(const struct nft_ctx *ctx, if (tb[NFTA_DUP_SREG_ADDR] == NULL) return -EINVAL; - priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]); - err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in6_addr)); + err = nft_parse_register_load(tb[NFTA_DUP_SREG_ADDR], &priv->sreg_addr, + sizeof(struct in6_addr)); if (err < 0) return err; - if (tb[NFTA_DUP_SREG_DEV] != NULL) { - priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]); - return nft_validate_register_load(priv->sreg_dev, sizeof(int)); - } - return 0; + if (tb[NFTA_DUP_SREG_DEV]) + err = nft_parse_register_load(tb[NFTA_DUP_SREG_DEV], + &priv->sreg_dev, sizeof(int)); + + return err; } static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr) diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c index 92f3235fa287..602743f6dcee 100644 --- a/net/ipv6/netfilter/nft_fib_ipv6.c +++ b/net/ipv6/netfilter/nft_fib_ipv6.c @@ -37,6 +37,9 @@ static int nft_fib6_flowi_init(struct flowi6 *fl6, const struct nft_fib *priv, if (ipv6_addr_type(&fl6->daddr) & IPV6_ADDR_LINKLOCAL) { lookup_flags |= RT6_LOOKUP_F_IFACE; fl6->flowi6_oif = get_ifindex(dev ? dev : pkt->skb->dev); + } else if ((priv->flags & NFTA_FIB_F_IIF) && + (netif_is_l3_master(dev) || netif_is_l3_slave(dev))) { + fl6->flowi6_oif = dev->ifindex; } if (ipv6_addr_type(&fl6->saddr) & IPV6_ADDR_UNICAST) @@ -193,7 +196,8 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL)) goto put_rt_err; - if (oif && oif != rt->rt6i_idev->dev) + if (oif && oif != rt->rt6i_idev->dev && + l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) != oif->ifindex) goto put_rt_err; nft_fib_store_result(dest, priv, rt->rt6i_idev->dev); diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 6caa062f68e7..135e3a060caa 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -22,6 +22,11 @@ #include #include +static void ping_v6_destroy(struct sock *sk) +{ + inet6_destroy_sock(sk); +} + /* Compatibility glue so we can support IPv6 when it's compiled as a module */ static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) @@ -111,7 +116,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowi6_uid = sk->sk_uid; fl6.fl6_icmp_type = user_icmph.icmp6_type; fl6.fl6_icmp_code = user_icmph.icmp6_code; - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); ipcm6_init_sk(&ipc6, np); ipc6.sockc.mark = sk->sk_mark; @@ -166,6 +171,7 @@ struct proto pingv6_prot = { .owner = THIS_MODULE, .init = ping_init_sock, .close = ping_close, + .destroy = ping_v6_destroy, .connect = ip6_datagram_connect_v6_only, .disconnect = __udp_disconnect, .setsockopt = ipv6_setsockopt, diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 38349054e361..31eb54e92b3f 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -915,7 +915,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowi6_oif = np->mcast_oif; else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); if (hdrincl) fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 40a68c61c80b..2b9053eea78c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -4356,7 +4356,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes) struct inet6_dev *idev; int type; - if (netif_is_l3_master(skb->dev) && + if (netif_is_l3_master(skb->dev) || dst->dev == net->loopback_dev) idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif)); else @@ -4436,8 +4436,15 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net, } f6i = ip6_route_info_create(&cfg, gfp_flags, NULL); - if (!IS_ERR(f6i)) + if (!IS_ERR(f6i)) { f6i->dst_nocount = true; + + if (!anycast && + (net->ipv6.devconf_all->disable_policy || + idev->cnf.disable_policy)) + f6i->dst_nopolicy = true; + } + return f6i; } @@ -5591,7 +5598,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, if (nexthop_is_blackhole(rt->nh)) rtm->rtm_type = RTN_BLACKHOLE; - if (net->ipv4.sysctl_nexthop_compat_mode && + if (READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode) && rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0) goto nla_put_failure; @@ -6355,10 +6362,16 @@ static void __net_exit ip6_route_net_exit(struct net *net) static int __net_init ip6_route_net_init_late(struct net *net) { #ifdef CONFIG_PROC_FS - proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops, - sizeof(struct ipv6_route_iter)); - proc_create_net_single("rt6_stats", 0444, net->proc_net, - rt6_stats_seq_show, NULL); + if (!proc_create_net("ipv6_route", 0, net->proc_net, + &ipv6_route_seq_ops, + sizeof(struct ipv6_route_iter))) + return -ENOMEM; + + if (!proc_create_net_single("rt6_stats", 0444, net->proc_net, + rt6_stats_seq_show, NULL)) { + remove_proc_entry("ipv6_route", net->proc_net); + return -ENOMEM; + } #endif return 0; } diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index d2f8138e5a73..2278c0234c49 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c @@ -135,6 +135,11 @@ static int seg6_genl_sethmac(struct sk_buff *skb, struct genl_info *info) goto out_unlock; } + if (slen > nla_len(info->attrs[SEG6_ATTR_SECRET])) { + err = -EINVAL; + goto out_unlock; + } + if (hinfo) { err = seg6_hmac_info_del(net, hmackeyid); if (err) diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index 85dddfe3a2c6..552bce1fdfb9 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -400,7 +400,6 @@ int __init seg6_hmac_init(void) { return seg6_hmac_init_algo(); } -EXPORT_SYMBOL(seg6_hmac_init); int __net_init seg6_hmac_net_init(struct net *net) { @@ -410,7 +409,6 @@ int __net_init seg6_hmac_net_init(struct net *net) return 0; } -EXPORT_SYMBOL(seg6_hmac_net_init); void seg6_hmac_exit(void) { diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 4d4399c5c5ea..40ac23242c37 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -188,6 +188,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) } #endif + hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); + skb_postpush_rcsum(skb, hdr, tot_len); return 0; @@ -240,6 +242,8 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh) } #endif + hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); + skb_postpush_rcsum(skb, hdr, sizeof(struct ipv6hdr) + hdrlen); return 0; @@ -301,7 +305,6 @@ static int seg6_do_srh(struct sk_buff *skb) break; } - ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); skb_set_transport_header(skb, sizeof(struct ipv6hdr)); return 0; diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index eba23279912d..11f7da4139f6 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -435,7 +435,6 @@ static int input_action_end_b6(struct sk_buff *skb, struct seg6_local_lwt *slwt) if (err) goto drop; - ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); skb_set_transport_header(skb, sizeof(struct ipv6hdr)); seg6_lookup_nexthop(skb, NULL, 0); @@ -467,7 +466,6 @@ static int input_action_end_b6_encap(struct sk_buff *skb, if (err) goto drop; - ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); skb_set_transport_header(skb, sizeof(struct ipv6hdr)); seg6_lookup_nexthop(skb, NULL, 0); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index bab0e99f6e35..1ce486a9bc07 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -321,9 +321,7 @@ static int ipip6_tunnel_get_prl(struct net_device *dev, struct ifreq *ifr) kcalloc(cmax, sizeof(*kp), GFP_KERNEL | __GFP_NOWARN) : NULL; - rcu_read_lock(); - - ca = t->prl_count < cmax ? t->prl_count : cmax; + ca = min(t->prl_count, cmax); if (!kp) { /* We don't try hard to allocate much memory for @@ -338,7 +336,7 @@ static int ipip6_tunnel_get_prl(struct net_device *dev, struct ifreq *ifr) } } - c = 0; + rcu_read_lock(); for_each_prl_rcu(t->prl) { if (c >= cmax) break; @@ -350,7 +348,7 @@ static int ipip6_tunnel_get_prl(struct net_device *dev, struct ifreq *ifr) if (kprl.addr != htonl(INADDR_ANY)) break; } -out: + rcu_read_unlock(); len = sizeof(*kp) * c; @@ -359,7 +357,7 @@ out: ret = -EFAULT; kfree(kp); - +out: return ret; } @@ -1125,10 +1123,12 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev) if (tdev && !netif_is_l3_master(tdev)) { int t_hlen = tunnel->hlen + sizeof(struct iphdr); + int mtu; - dev->mtu = tdev->mtu - t_hlen; - if (dev->mtu < IPV6_MIN_MTU) - dev->mtu = IPV6_MIN_MTU; + mtu = tdev->mtu - t_hlen; + if (mtu < IPV6_MIN_MTU) + mtu = IPV6_MIN_MTU; + WRITE_ONCE(dev->mtu, mtu); } } diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 9b6cae1e49d9..12ae817aaf2e 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -141,7 +141,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) __u8 rcv_wscale; u32 tsoff = 0; - if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst) + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) || + !th->ack || th->rst) goto out; if (tcp_synq_no_recent_overflow(sk)) @@ -170,7 +171,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) goto out; ret = NULL; - req = cookie_tcp_reqsk_alloc(&tcp6_request_sock_ops, sk, skb); + req = cookie_tcp_reqsk_alloc(&tcp6_request_sock_ops, + &tcp_request_sock_ipv6_ops, sk, skb); if (!req) goto out; @@ -233,7 +235,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) fl6.fl6_dport = ireq->ir_rmt_port; fl6.fl6_sport = inet_sk(sk)->inet_sport; fl6.flowi6_uid = sk->sk_uid; - security_req_classify_flow(req, flowi6_to_flowi(&fl6)); + security_req_classify_flow(req, flowi6_to_flowi_common(&fl6)); dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index df33145b876c..c599e14be414 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -107,7 +107,7 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) if (dst && dst_hold_safe(dst)) { const struct rt6_info *rt = (const struct rt6_info *)dst; - sk->sk_rx_dst = dst; + rcu_assign_pointer(sk->sk_rx_dst, dst); inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); } @@ -278,7 +278,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); final_p = fl6_update_dst(&fl6, opt, &final); - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { @@ -339,6 +339,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, late_failure: tcp_set_state(sk, TCP_CLOSE); + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) + inet_reset_saddr(sk); failure: inet->inet_dport = 0; sk->sk_route_caps = 0; @@ -542,7 +544,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, if (np->repflow && ireq->pktopts) fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); - tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ? + tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ? (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) | (np->tclass & INET_ECN_MASK) : np->tclass; @@ -975,7 +977,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 fl6.fl6_dport = t1->dest; fl6.fl6_sport = t1->source; fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); - security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); + security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6)); /* Pass a socket to ip6_dst_lookup either it is for RST * Underlying function will use this to retrieve the network @@ -1344,7 +1346,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * /* Set ToS of the new socket based upon the value of incoming SYN. * ECT bits are set later in tcp_init_transfer(). */ - if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)) newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK; /* Clone native IPv6 options from listening socket (if any) @@ -1482,15 +1484,18 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ - struct dst_entry *dst = sk->sk_rx_dst; + struct dst_entry *dst; + + dst = rcu_dereference_protected(sk->sk_rx_dst, + lockdep_sock_is_held(sk)); sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || dst->ops->check(dst, np->rx_dst_cookie) == NULL) { + RCU_INIT_POINTER(sk->sk_rx_dst, NULL); dst_release(dst); - sk->sk_rx_dst = NULL; } } @@ -1815,7 +1820,7 @@ do_time_wait: goto discard_it; } -INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb) +void tcp_v6_early_demux(struct sk_buff *skb) { const struct ipv6hdr *hdr; const struct tcphdr *th; @@ -1842,7 +1847,7 @@ INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { - struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); + struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie); @@ -2166,12 +2171,7 @@ struct proto tcpv6_prot = { }; EXPORT_SYMBOL_GPL(tcpv6_prot); -/* thinking of making this const? Don't. - * early_demux can change based on sysctl. - */ -static struct inet6_protocol tcpv6_protocol = { - .early_demux = tcp_v6_early_demux, - .early_demux_handler = tcp_v6_early_demux, +static const struct inet6_protocol tcpv6_protocol = { .handler = tcp_v6_rcv, .err_handler = tcp_v6_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 10760164a80f..1805cc5f7418 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -179,7 +179,7 @@ static struct sock *udp6_lib_lookup2(struct net *net, result = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); /* Fall back to scoring if group has connections */ - if (result && !reuseport_has_conns(sk, false)) + if (result && !reuseport_has_conns(sk)) return result; result = result ? : sk; @@ -941,7 +941,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, struct dst_entry *dst = skb_dst(skb); int ret; - if (unlikely(sk->sk_rx_dst != dst)) + if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) udp6_sk_rx_dst_set(sk, dst); if (!uh->check && !udp_sk(sk)->no_check6_rx) { @@ -1019,7 +1019,7 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net, udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { if (sk->sk_state == TCP_ESTABLISHED && - INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif)) + inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif)) return sk; /* Only check first socket in chain */ break; @@ -1027,7 +1027,7 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net, return NULL; } -INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb) +void udp_v6_early_demux(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); const struct udphdr *uh; @@ -1055,7 +1055,7 @@ INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_efree; - dst = READ_ONCE(sk->sk_rx_dst); + dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); @@ -1497,7 +1497,7 @@ do_udp_sendmsg: } else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); if (ipc6.tclass < 0) ipc6.tclass = np->tclass; @@ -1640,12 +1640,7 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname, return ipv6_getsockopt(sk, level, optname, optval, optlen); } -/* thinking of making this const? Don't. - * early_demux can change based on sysctl. - */ -static struct inet6_protocol udpv6_protocol = { - .early_demux = udp_v6_early_demux, - .early_demux_handler = udp_v6_early_demux, +static const struct inet6_protocol udpv6_protocol = { .handler = udpv6_rcv, .err_handler = udpv6_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index af7a4b8b1e9c..247296e3294b 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -289,9 +289,13 @@ int __init xfrm6_init(void) if (ret) goto out_state; - register_pernet_subsys(&xfrm6_net_ops); + ret = register_pernet_subsys(&xfrm6_net_ops); + if (ret) + goto out_protocol; out: return ret; +out_protocol: + xfrm6_protocol_fini(); out_state: xfrm6_state_fini(); out_policy: diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 56dad9565bc9..32b516ab9c47 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -161,7 +161,8 @@ static void kcm_rcv_ready(struct kcm_sock *kcm) /* Buffer limit is okay now, add to ready list */ list_add_tail(&kcm->wait_rx_list, &kcm->mux->kcm_rx_waiters); - kcm->rx_wait = true; + /* paired with lockless reads in kcm_rfree() */ + WRITE_ONCE(kcm->rx_wait, true); } static void kcm_rfree(struct sk_buff *skb) @@ -177,7 +178,7 @@ static void kcm_rfree(struct sk_buff *skb) /* For reading rx_wait and rx_psock without holding lock */ smp_mb__after_atomic(); - if (!kcm->rx_wait && !kcm->rx_psock && + if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) && sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) { spin_lock_bh(&mux->rx_lock); kcm_rcv_ready(kcm); @@ -220,7 +221,7 @@ static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head) struct sk_buff *skb; struct kcm_sock *kcm; - while ((skb = __skb_dequeue(head))) { + while ((skb = skb_dequeue(head))) { /* Reset destructor to avoid calling kcm_rcv_ready */ skb->destructor = sock_rfree; skb_orphan(skb); @@ -236,7 +237,8 @@ try_again: if (kcm_queue_rcv_skb(&kcm->sk, skb)) { /* Should mean socket buffer full */ list_del(&kcm->wait_rx_list); - kcm->rx_wait = false; + /* paired with lockless reads in kcm_rfree() */ + WRITE_ONCE(kcm->rx_wait, false); /* Commit rx_wait to read in kcm_free */ smp_wmb(); @@ -279,10 +281,12 @@ static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock, kcm = list_first_entry(&mux->kcm_rx_waiters, struct kcm_sock, wait_rx_list); list_del(&kcm->wait_rx_list); - kcm->rx_wait = false; + /* paired with lockless reads in kcm_rfree() */ + WRITE_ONCE(kcm->rx_wait, false); psock->rx_kcm = kcm; - kcm->rx_psock = psock; + /* paired with lockless reads in kcm_rfree() */ + WRITE_ONCE(kcm->rx_psock, psock); spin_unlock_bh(&mux->rx_lock); @@ -309,7 +313,8 @@ static void unreserve_rx_kcm(struct kcm_psock *psock, spin_lock_bh(&mux->rx_lock); psock->rx_kcm = NULL; - kcm->rx_psock = NULL; + /* paired with lockless reads in kcm_rfree() */ + WRITE_ONCE(kcm->rx_psock, NULL); /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with * kcm_rfree @@ -1079,53 +1084,18 @@ out_error: return err; } -static struct sk_buff *kcm_wait_data(struct sock *sk, int flags, - long timeo, int *err) -{ - struct sk_buff *skb; - - while (!(skb = skb_peek(&sk->sk_receive_queue))) { - if (sk->sk_err) { - *err = sock_error(sk); - return NULL; - } - - if (sock_flag(sk, SOCK_DONE)) - return NULL; - - if ((flags & MSG_DONTWAIT) || !timeo) { - *err = -EAGAIN; - return NULL; - } - - sk_wait_data(sk, &timeo, NULL); - - /* Handle signals */ - if (signal_pending(current)) { - *err = sock_intr_errno(timeo); - return NULL; - } - } - - return skb; -} - static int kcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { + int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct kcm_sock *kcm = kcm_sk(sk); int err = 0; - long timeo; struct strp_msg *stm; int copied = 0; struct sk_buff *skb; - timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); - - lock_sock(sk); - - skb = kcm_wait_data(sk, flags, timeo, &err); + skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; @@ -1156,14 +1126,11 @@ msg_finished: /* Finished with message */ msg->msg_flags |= MSG_EOR; KCM_STATS_INCR(kcm->stats.rx_msgs); - skb_unlink(skb, &sk->sk_receive_queue); - kfree_skb(skb); } } out: - release_sock(sk); - + skb_free_datagram(sk, skb); return copied ? : err; } @@ -1171,9 +1138,9 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { + int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct kcm_sock *kcm = kcm_sk(sk); - long timeo; struct strp_msg *stm; int err = 0; ssize_t copied; @@ -1181,11 +1148,7 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos, /* Only support splice for SOCKSEQPACKET */ - timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); - - lock_sock(sk); - - skb = kcm_wait_data(sk, flags, timeo, &err); + skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto err_out; @@ -1213,13 +1176,11 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos, * finish reading the message. */ - release_sock(sk); - + skb_free_datagram(sk, skb); return copied; err_out: - release_sock(sk); - + skb_free_datagram(sk, skb); return err; } @@ -1239,7 +1200,8 @@ static void kcm_recv_disable(struct kcm_sock *kcm) if (!kcm->rx_psock) { if (kcm->rx_wait) { list_del(&kcm->wait_rx_list); - kcm->rx_wait = false; + /* paired with lockless reads in kcm_rfree() */ + WRITE_ONCE(kcm->rx_wait, false); } requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue); @@ -1411,12 +1373,6 @@ static int kcm_attach(struct socket *sock, struct socket *csock, psock->sk = csk; psock->bpf_prog = prog; - err = strp_init(&psock->strp, csk, &cb); - if (err) { - kmem_cache_free(kcm_psockp, psock); - goto out; - } - write_lock_bh(&csk->sk_callback_lock); /* Check if sk_user_data is aready by KCM or someone else. @@ -1424,13 +1380,18 @@ static int kcm_attach(struct socket *sock, struct socket *csock, */ if (csk->sk_user_data) { write_unlock_bh(&csk->sk_callback_lock); - strp_stop(&psock->strp); - strp_done(&psock->strp); kmem_cache_free(kcm_psockp, psock); err = -EALREADY; goto out; } + err = strp_init(&psock->strp, csk, &cb); + if (err) { + write_unlock_bh(&csk->sk_callback_lock); + kmem_cache_free(kcm_psockp, psock); + goto out; + } + psock->save_data_ready = csk->sk_data_ready; psock->save_write_space = csk->sk_write_space; psock->save_state_change = csk->sk_state_change; @@ -1793,7 +1754,8 @@ static void kcm_done(struct kcm_sock *kcm) if (kcm->rx_wait) { list_del(&kcm->wait_rx_list); - kcm->rx_wait = false; + /* paired with lockless reads in kcm_rfree() */ + WRITE_ONCE(kcm->rx_wait, false); } /* Move any pending receive messages to other kcm sockets */ requeue_rx_msgs(mux, &sk->sk_receive_queue); @@ -1838,10 +1800,10 @@ static int kcm_release(struct socket *sock) kcm = kcm_sk(sk); mux = kcm->mux; + lock_sock(sk); sock_orphan(sk); kfree_skb(kcm->seq_skb); - lock_sock(sk); /* Purge queue under lock to avoid race condition with tx_work trying * to act when queue is nonempty. If tx_work runs after this point * it will just return. diff --git a/net/key/af_key.c b/net/key/af_key.c index bd9b5c573b5a..8bc7d399987b 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1701,9 +1701,12 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad pfk->registered |= (1<sadb_msg_satype); } + mutex_lock(&pfkey_mutex); xfrm_probe_algs(); supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO); + mutex_unlock(&pfkey_mutex); + if (!supp_skb) { if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC) pfk->registered &= ~(1<sadb_msg_satype); @@ -2830,6 +2833,10 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb void *ext_hdrs[SADB_EXT_MAX]; int err; + /* Non-zero return value of pfkey_broadcast() does not always signal + * an error and even on an actual error we may still want to process + * the message so rather ignore the return value. + */ pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL, BROADCAST_PROMISC_ONLY, NULL, sock_net(sk)); @@ -2938,9 +2945,10 @@ static int count_esp_combs(const struct xfrm_tmpl *t) return sz + sizeof(struct sadb_prop); } -static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) +static int dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) { struct sadb_prop *p; + int sz = 0; int i; p = skb_put(skb, sizeof(struct sadb_prop)); @@ -2968,13 +2976,17 @@ static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) c->sadb_comb_soft_addtime = 20*60*60; c->sadb_comb_hard_usetime = 8*60*60; c->sadb_comb_soft_usetime = 7*60*60; + sz += sizeof(*c); } } + + return sz + sizeof(*p); } -static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) +static int dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) { struct sadb_prop *p; + int sz = 0; int i, k; p = skb_put(skb, sizeof(struct sadb_prop)); @@ -3016,8 +3028,11 @@ static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) c->sadb_comb_soft_addtime = 20*60*60; c->sadb_comb_hard_usetime = 8*60*60; c->sadb_comb_soft_usetime = 7*60*60; + sz += sizeof(*c); } } + + return sz + sizeof(*p); } static int key_notify_policy_expire(struct xfrm_policy *xp, const struct km_event *c) @@ -3147,6 +3162,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct struct sadb_x_sec_ctx *sec_ctx; struct xfrm_sec_ctx *xfrm_ctx; int ctx_size = 0; + int alg_size = 0; sockaddr_size = pfkey_sockaddr_size(x->props.family); if (!sockaddr_size) @@ -3158,16 +3174,16 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct sizeof(struct sadb_x_policy); if (x->id.proto == IPPROTO_AH) - size += count_ah_combs(t); + alg_size = count_ah_combs(t); else if (x->id.proto == IPPROTO_ESP) - size += count_esp_combs(t); + alg_size = count_esp_combs(t); if ((xfrm_ctx = x->security)) { ctx_size = PFKEY_ALIGN8(xfrm_ctx->ctx_len); size += sizeof(struct sadb_x_sec_ctx) + ctx_size; } - skb = alloc_skb(size + 16, GFP_ATOMIC); + skb = alloc_skb(size + alg_size + 16, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; @@ -3221,10 +3237,13 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct pol->sadb_x_policy_priority = xp->priority; /* Set sadb_comb's. */ + alg_size = 0; if (x->id.proto == IPPROTO_AH) - dump_ah_combs(skb, t); + alg_size = dump_ah_combs(skb, t); else if (x->id.proto == IPPROTO_ESP) - dump_esp_combs(skb, t); + alg_size = dump_esp_combs(skb, t); + + hdr->sadb_msg_len += alg_size / 8; /* security context */ if (xfrm_ctx) { diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 561b6d67ab8b..dc8987ed08ad 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1480,11 +1480,15 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, tunnel->l2tp_net = net; pn = l2tp_pernet(net); + sk = sock->sk; + sock_hold(sk); + tunnel->sock = sk; + spin_lock_bh(&pn->l2tp_tunnel_list_lock); list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) { if (tunnel_walk->tunnel_id == tunnel->tunnel_id) { spin_unlock_bh(&pn->l2tp_tunnel_list_lock); - + sock_put(sk); ret = -EEXIST; goto err_sock; } @@ -1492,10 +1496,6 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); spin_unlock_bh(&pn->l2tp_tunnel_list_lock); - sk = sock->sk; - sock_hold(sk); - tunnel->sock = sk; - if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { struct udp_tunnel_sock_cfg udp_cfg = { .sk_user_data = tunnel, diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index e5e5036257b0..d54dbd01d86f 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -502,14 +502,15 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct ipcm6_cookie ipc6; int addr_len = msg->msg_namelen; int transhdrlen = 4; /* zero session-id */ - int ulen = len + transhdrlen; + int ulen; int err; /* Rough check on arithmetic overflow, * better check is made in ip6_append_data(). */ - if (len > INT_MAX) + if (len > INT_MAX - transhdrlen) return -EMSGSIZE; + ulen = len + transhdrlen; /* Mirror BSD error message compatibility */ if (msg->msg_flags & MSG_OOB) @@ -606,7 +607,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); if (ipc6.tclass < 0) ipc6.tclass = np->tclass; diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c index 864326f150e2..f2c3a61ad134 100644 --- a/net/l3mdev/l3mdev.c +++ b/net/l3mdev/l3mdev.c @@ -147,7 +147,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex) dev = dev_get_by_index_rcu(net, ifindex); while (dev && !netif_is_l3_master(dev)) - dev = netdev_master_upper_dev_get(dev); + dev = netdev_master_upper_dev_get_rcu(dev); return dev ? dev->ifindex : 0; } diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c index 26d2f8ba7029..758ef63669e7 100644 --- a/net/mac80211/airtime.c +++ b/net/mac80211/airtime.c @@ -457,6 +457,9 @@ static u32 ieee80211_get_rate_duration(struct ieee80211_hw *hw, (status->encoding == RX_ENC_HE && streams > 8))) return 0; + if (idx >= MCS_GROUP_RATES) + return 0; + duration = airtime_mcs_groups[group].duration[idx]; duration <<= airtime_mcs_groups[group].shift; *overhead = 36 + (streams << 2); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 8010967a6874..c6a7f1c99abc 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -3357,9 +3357,6 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata, case NL80211_IFTYPE_MESH_POINT: { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - if (params->chandef.width != sdata->vif.bss_conf.chandef.width) - return -EINVAL; - /* changes into another band are not supported */ if (sdata->vif.bss_conf.chandef.chan->band != params->chandef.chan->band) diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 8f48aff74c7b..5639a71257e4 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -1652,12 +1652,9 @@ int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata) if (new_ctx->replace_state == IEEE80211_CHANCTX_REPLACE_NONE) { if (old_ctx) - err = ieee80211_vif_use_reserved_reassign(sdata); - else - err = ieee80211_vif_use_reserved_assign(sdata); + return ieee80211_vif_use_reserved_reassign(sdata); - if (err) - return err; + return ieee80211_vif_use_reserved_assign(sdata); } /* diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index a7ac53a2f00d..78ae58ec397a 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -541,6 +541,10 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata) sdata_assert_lock(sdata); + /* When not connected/joined, sending CSA doesn't make sense. */ + if (ifibss->state != IEEE80211_IBSS_MLME_JOINED) + return -ENOLINK; + /* update cfg80211 bss information with the new channel */ if (!is_zero_ether_addr(ifibss->bssid)) { cbss = cfg80211_get_bss(sdata->local->hw.wiphy, diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index fe8f586886b4..63499db5c63d 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1103,6 +1103,9 @@ struct tpt_led_trigger { * a scan complete for an aborted scan. * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being * cancelled. + * @SCAN_BEACON_WAIT: Set whenever we're passive scanning because of radar/no-IR + * and could send a probe request after receiving a beacon. + * @SCAN_BEACON_DONE: Beacon received, we can now send a probe request */ enum { SCAN_SW_SCANNING, @@ -1111,6 +1114,8 @@ enum { SCAN_COMPLETED, SCAN_ABORTED, SCAN_HW_CANCELLED, + SCAN_BEACON_WAIT, + SCAN_BEACON_DONE, }; /** @@ -1480,7 +1485,6 @@ struct ieee802_11_elems { const u8 *supp_rates; const u8 *ds_params; const struct ieee80211_tim_ie *tim; - const u8 *challenge; const u8 *rsn; const u8 *rsnx; const u8 *erp_info; @@ -1533,7 +1537,6 @@ struct ieee802_11_elems { u8 ssid_len; u8 supp_rates_len; u8 tim_len; - u8 challenge_len; u8 rsn_len; u8 rsnx_len; u8 ext_supp_rates_len; @@ -1548,6 +1551,8 @@ struct ieee802_11_elems { u8 country_elem_len; u8 bssid_index_len; + void *nontx_profile; + /* whether a parse error occurred while retrieving these elements */ bool parse_error; }; diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 73893025922f..ae90ac3be59a 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -1349,8 +1349,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) ieee80211_led_exit(local); destroy_workqueue(local->workqueue); fail_workqueue: - if (local->wiphy_ciphers_allocated) + if (local->wiphy_ciphers_allocated) { kfree(local->hw.wiphy->cipher_suites); + local->wiphy_ciphers_allocated = false; + } kfree(local->int_scan_req); return result; } @@ -1420,8 +1422,10 @@ void ieee80211_free_hw(struct ieee80211_hw *hw) mutex_destroy(&local->iflist_mtx); mutex_destroy(&local->mtx); - if (local->wiphy_ciphers_allocated) + if (local->wiphy_ciphers_allocated) { kfree(local->hw.wiphy->cipher_suites); + local->wiphy_ciphers_allocated = false; + } idr_for_each(&local->ack_status_frames, ieee80211_free_ack_frame, NULL); diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 870c8eafef92..c2b051e0610a 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -718,7 +718,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { - kfree_skb(skb); + ieee80211_free_txskb(&sdata->local->hw, skb); sdata->u.mesh.mshstats.dropped_frames_no_route++; } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 0dba353d3f8f..c52b8eb7fb8a 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2899,14 +2899,14 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data; + const struct element *challenge; u8 *pos; - struct ieee802_11_elems elems; u32 tx_flags = 0; pos = mgmt->u.auth.variable; - ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems, - mgmt->bssid, auth_data->bss->bssid); - if (!elems.challenge) + challenge = cfg80211_find_elem(WLAN_EID_CHALLENGE, pos, + len - (pos - (u8 *)mgmt)); + if (!challenge) return; auth_data->expected_transaction = 4; drv_mgd_prepare_tx(sdata->local, sdata, 0); @@ -2914,7 +2914,8 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_INTFL_MLME_CONN_TX; ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0, - elems.challenge - 2, elems.challenge_len + 2, + (void *)challenge, + challenge->datalen + sizeof(*challenge), auth_data->bss->bssid, auth_data->bss->bssid, auth_data->key, auth_data->key_len, auth_data->key_idx, tx_flags); @@ -3299,7 +3300,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, } capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, elems, - mgmt->bssid, assoc_data->bss->bssid); + mgmt->bssid, NULL); if (elems->aid_resp) aid = le16_to_cpu(elems->aid_resp->aid); @@ -3393,6 +3394,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, sdata_info(sdata, "AP bug: VHT operation missing from AssocResp\n"); } + kfree(bss_elems.nontx_profile); } /* @@ -3528,6 +3530,12 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, cbss->transmitted_bss->bssid); bss_conf->bssid_indicator = cbss->max_bssid_indicator; bss_conf->bssid_index = cbss->bssid_index; + } else { + bss_conf->nontransmitted = false; + memset(bss_conf->transmitter_bssid, 0, + sizeof(bss_conf->transmitter_bssid)); + bss_conf->bssid_indicator = 0; + bss_conf->bssid_index = 0; } /* @@ -3701,7 +3709,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, return; ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems, - mgmt->bssid, assoc_data->bss->bssid); + mgmt->bssid, NULL); if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY && elems.timeout_int && @@ -4038,6 +4046,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ifmgd->assoc_data->timeout = jiffies; ifmgd->assoc_data->timeout_started = true; run_again(sdata, ifmgd->assoc_data->timeout); + kfree(elems.nontx_profile); return; } @@ -4215,7 +4224,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ieee80211_report_disconnect(sdata, deauth_buf, sizeof(deauth_buf), true, WLAN_REASON_DEAUTH_LEAVING); - return; + goto free; } if (sta && elems.opmode_notif) @@ -4230,6 +4239,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, elems.cisco_dtpc_elem); ieee80211_bss_info_change_notify(sdata, changed); +free: + kfree(elems.nontx_profile); } void ieee80211_sta_rx_queued_ext(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 1e7614abd947..97a63b940482 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1387,8 +1387,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, goto dont_reorder; /* not part of a BA session */ - if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && - ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) + if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK) goto dont_reorder; /* new, potentially un-ordered, ampdu frame - process it */ @@ -1976,10 +1975,11 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS || mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + - NUM_DEFAULT_BEACON_KEYS) { - cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, - skb->data, - skb->len); + NUM_DEFAULT_BEACON_KEYS) { + if (rx->sdata->dev) + cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, + skb->data, + skb->len); return RX_DROP_MONITOR; /* unexpected BIP keyidx */ } @@ -2127,7 +2127,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) /* either the frame has been decrypted or will be dropped */ status->flag |= RX_FLAG_DECRYPTED; - if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE)) + if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE && + rx->sdata->dev)) cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, skb->data, skb->len); diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 6b50cb5e0e3c..b241ff8c015a 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -227,6 +227,8 @@ ieee80211_bss_info_update(struct ieee80211_local *local, rx_status, beacon); } + kfree(elems.nontx_profile); + return bss; } @@ -277,6 +279,16 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) if (likely(!sdata1 && !sdata2)) return; + if (test_and_clear_bit(SCAN_BEACON_WAIT, &local->scanning)) { + /* + * we were passive scanning because of radar/no-IR, but + * the beacon/proberesp rx gives us an opportunity to upgrade + * to active scan + */ + set_bit(SCAN_BEACON_DONE, &local->scanning); + ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0); + } + if (ieee80211_is_probe_resp(mgmt->frame_control)) { struct cfg80211_scan_request *scan_req; struct cfg80211_sched_scan_request *sched_scan_req; @@ -451,16 +463,19 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) scan_req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->mtx)); - if (scan_req != local->int_scan_req) { - local->scan_info.aborted = aborted; - cfg80211_scan_done(scan_req, &local->scan_info); - } RCU_INIT_POINTER(local->scan_req, NULL); RCU_INIT_POINTER(local->scan_sdata, NULL); local->scanning = 0; local->scan_chandef.chan = NULL; + synchronize_rcu(); + + if (scan_req != local->int_scan_req) { + local->scan_info.aborted = aborted; + cfg80211_scan_done(scan_req, &local->scan_info); + } + /* Set power back to normal operating levels. */ ieee80211_hw_config(local, 0); @@ -783,6 +798,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, IEEE80211_CHAN_RADAR)) || !req->n_ssids) { next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; + if (req->n_ssids) + set_bit(SCAN_BEACON_WAIT, &local->scanning); } else { ieee80211_scan_state_send_probe(local, &next_delay); next_delay = IEEE80211_CHANNEL_TIME; @@ -994,6 +1011,8 @@ set_channel: !scan_req->n_ssids) { *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; local->next_scan_state = SCAN_DECISION; + if (scan_req->n_ssids) + set_bit(SCAN_BEACON_WAIT, &local->scanning); return; } @@ -1086,6 +1105,8 @@ void ieee80211_scan_work(struct work_struct *work) goto out; } + clear_bit(SCAN_BEACON_WAIT, &local->scanning); + /* * as long as no delay is required advance immediately * without scheduling a new work @@ -1096,6 +1117,10 @@ void ieee80211_scan_work(struct work_struct *work) goto out_complete; } + if (test_and_clear_bit(SCAN_BEACON_DONE, &local->scanning) && + local->next_scan_state == SCAN_DECISION) + local->next_scan_state = SCAN_SEND_PROBE; + switch (local->next_scan_state) { case SCAN_DECISION: /* if no more bands/channels left, complete scan */ diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index e18c3855f616..cee39ae52245 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -645,13 +645,13 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) /* check if STA exists already */ if (sta_info_get_bss(sdata, sta->sta.addr)) { err = -EEXIST; - goto out_err; + goto out_cleanup; } sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL); if (!sinfo) { err = -ENOMEM; - goto out_err; + goto out_cleanup; } local->num_sta++; @@ -707,8 +707,8 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) out_drop_sta: local->num_sta--; synchronize_net(); + out_cleanup: cleanup_single_sta(sta); - out_err: mutex_unlock(&local->sta_mtx); kfree(sinfo); rcu_read_lock(); @@ -2175,9 +2175,9 @@ static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats, u64 value; do { - start = u64_stats_fetch_begin(&rxstats->syncp); + start = u64_stats_fetch_begin_irq(&rxstats->syncp); value = rxstats->msdu[tid]; - } while (u64_stats_fetch_retry(&rxstats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start)); return value; } @@ -2241,9 +2241,9 @@ static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats) u64 value; do { - start = u64_stats_fetch_begin(&rxstats->syncp); + start = u64_stats_fetch_begin_irq(&rxstats->syncp); value = rxstats->bytes; - } while (u64_stats_fetch_retry(&rxstats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start)); return value; } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index a1f129292ad8..7fa6efa8b83c 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1124,10 +1124,6 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, } else elem_parse_failed = true; break; - case WLAN_EID_CHALLENGE: - elems->challenge = pos; - elems->challenge_len = elen; - break; case WLAN_EID_VENDOR_SPECIFIC: if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && pos[2] == 0xf2) { @@ -1409,6 +1405,8 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len, for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) { if (elem->datalen < 2) continue; + if (elem->data[0] < 1 || elem->data[0] > 8) + continue; for_each_element(sub, elem->data + 1, elem->datalen - 1) { u8 new_bssid[ETH_ALEN]; @@ -1485,6 +1483,11 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, nontransmitted_profile, nontransmitted_profile_len); + if (!nontransmitted_profile_len) { + nontransmitted_profile_len = 0; + kfree(nontransmitted_profile); + nontransmitted_profile = NULL; + } } crc = _ieee802_11_parse_elems_crc(start, len, action, elems, filter, @@ -1514,7 +1517,7 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, offsetofend(struct ieee80211_bssid_index, dtim_count)) elems->dtim_count = elems->bssid_index->dtim_count; - kfree(nontransmitted_profile); + elems->nontx_profile = nontransmitted_profile; return crc; } diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 2fb99325135a..b9404b056087 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -145,8 +145,8 @@ u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, bool qos; /* all mesh/ocb stations are required to support WME */ - if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT || - sdata->vif.type == NL80211_IFTYPE_OCB) + if (sta && (sdata->vif.type == NL80211_IFTYPE_MESH_POINT || + sdata->vif.type == NL80211_IFTYPE_OCB)) qos = true; else if (sta) qos = sta->sta.wme; diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index 1cf5ac09edcb..a08240fe68a7 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c @@ -661,6 +661,7 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name, sdata->dev = ndev; sdata->wpan_dev.wpan_phy = local->hw.phy; sdata->local = local; + INIT_LIST_HEAD(&sdata->wpan_dev.list); /* setup type-dependent data */ ret = ieee802154_setup_sdata(sdata, type); diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c index b8ce84618a55..726b47a4611b 100644 --- a/net/mac802154/rx.c +++ b/net/mac802154/rx.c @@ -44,7 +44,7 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata, switch (mac_cb(skb)->dest.mode) { case IEEE802154_ADDR_NONE: - if (mac_cb(skb)->dest.mode != IEEE802154_ADDR_NONE) + if (hdr->source.mode != IEEE802154_ADDR_NONE) /* FIXME: check if we are PAN coordinator */ skb->pkt_type = PACKET_OTHERHOST; else @@ -132,7 +132,7 @@ static int ieee802154_parse_frame_start(struct sk_buff *skb, struct ieee802154_hdr *hdr) { int hlen; - struct ieee802154_mac_cb *cb = mac_cb_init(skb); + struct ieee802154_mac_cb *cb = mac_cb(skb); skb_reset_mac_header(skb); @@ -294,8 +294,9 @@ void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi) { struct ieee802154_local *local = hw_to_local(hw); + struct ieee802154_mac_cb *cb = mac_cb_init(skb); - mac_cb(skb)->lqi = lqi; + cb->lqi = lqi; skb->pkt_type = IEEE802154_RX_MSG; skb_queue_tail(&local->skb_queue, skb); tasklet_schedule(&local->tasklet); diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 9c047c148a11..72398149e4d4 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -1078,9 +1078,9 @@ static void mpls_get_stats(struct mpls_dev *mdev, p = per_cpu_ptr(mdev->stats, i); do { - start = u64_stats_fetch_begin(&p->syncp); + start = u64_stats_fetch_begin_irq(&p->syncp); local = p->stats; - } while (u64_stats_fetch_retry(&p->syncp, start)); + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); stats->rx_packets += local.rx_packets; stats->rx_bytes += local.rx_bytes; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 8123c79e2791..e61c85873ea2 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1421,7 +1421,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) if (msk->rcvq_space.copied <= msk->rcvq_space.space) goto new_measure; - if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf && + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { int rcvmem, rcvbuf; u64 rcvwin, grow; @@ -1439,7 +1439,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) do_div(rcvwin, advmss); rcvbuf = min_t(u64, rcvwin * rcvmem, - sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); if (rcvbuf > sk->sk_rcvbuf) { u32 window_clamp; @@ -1872,8 +1872,8 @@ static int mptcp_init_sock(struct sock *sk) return ret; sk_sockets_allocated_inc(sk); - sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1]; - sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1]; + sk->sk_rcvbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]); + sk->sk_sndbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]); return 0; } diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 56c53318a107..fac2717993d7 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -118,7 +118,6 @@ config NF_CONNTRACK_ZONES config NF_CONNTRACK_PROCFS bool "Supply CT list in procfs (OBSOLETE)" - default y depends on PROC_FS help This option enables for the list of known conntrack entries diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 2b19189a930f..c17a7dda0163 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -963,20 +963,9 @@ static struct nlmsghdr * start_msg(struct sk_buff *skb, u32 portid, u32 seq, unsigned int flags, enum ipset_cmd cmd) { - struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; - - nlh = nlmsg_put(skb, portid, seq, nfnl_msg_type(NFNL_SUBSYS_IPSET, cmd), - sizeof(*nfmsg), flags); - if (!nlh) - return NULL; - - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = NFPROTO_IPV4; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - - return nlh; + return nfnl_msg_put(skb, portid, seq, + nfnl_msg_type(NFNL_SUBSYS_IPSET, cmd), flags, + NFPROTO_IPV4, NFNETLINK_V0, 0); } /* Create a set */ diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c index 5d6d68eaf6a9..d7a81b2250e7 100644 --- a/net/netfilter/ipset/ip_set_hash_ip.c +++ b/net/netfilter/ipset/ip_set_hash_ip.c @@ -131,8 +131,11 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; - if (ip > ip_to) + if (ip > ip_to) { + if (ip_to == 0) + return -IPSET_ERR_HASH_ELEM; swap(ip, ip_to); + } } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); @@ -143,18 +146,20 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); - if (retried) { + /* 64bit division is not allowed on 32bit */ + if (((u64)ip_to - ip + 1) >> (32 - h->netmask) > IPSET_MAX_RANGE) + return -ERANGE; + + if (retried) ip = ntohl(h->next.ip); - e.ip = htonl(ip); - } for (; ip <= ip_to;) { + e.ip = htonl(ip); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; ip += hosts; - e.ip = htonl(ip); - if (e.ip == 0) + if (ip == 0) return 0; ret = 0; diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c index aba1df617d6e..eefce34a34f0 100644 --- a/net/netfilter/ipset/ip_set_hash_ipmark.c +++ b/net/netfilter/ipset/ip_set_hash_ipmark.c @@ -120,6 +120,8 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK])); e.mark &= h->markmask; + if (e.mark == 0 && e.ip == 0) + return -IPSET_ERR_HASH_ELEM; if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) { @@ -132,8 +134,11 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; - if (ip > ip_to) + if (ip > ip_to) { + if (e.mark == 0 && ip_to == 0) + return -IPSET_ERR_HASH_ELEM; swap(ip, ip_to); + } } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); @@ -142,6 +147,9 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], ip_set_mask_from_to(ip, ip_to, cidr); } + if (((u64)ip_to - ip + 1) > IPSET_MAX_RANGE) + return -ERANGE; + if (retried) ip = ntohl(h->next.ip); for (; ip <= ip_to; ip++) { diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c index 1ff228717e29..4a54e9e8ae59 100644 --- a/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/net/netfilter/ipset/ip_set_hash_ipport.c @@ -172,6 +172,9 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], swap(port, port_to); } + if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE) + return -ERANGE; + if (retried) ip = ntohl(h->next.ip); for (; ip <= ip_to; ip++) { diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c index fa88afd812fa..09737de5ecc3 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/net/netfilter/ipset/ip_set_hash_ipportip.c @@ -179,6 +179,9 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], swap(port, port_to); } + if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE) + return -ERANGE; + if (retried) ip = ntohl(h->next.ip); for (; ip <= ip_to; ip++) { diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index eef6ecfcb409..02685371a682 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c @@ -252,6 +252,9 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], swap(port, port_to); } + if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE) + return -ERANGE; + ip2_to = ip2_from; if (tb[IPSET_ATTR_IP2_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index 136cf0781d3a..9d1beaacb973 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c @@ -139,7 +139,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_net4_elem e = { .cidr = HOST_MASK }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 ip = 0, ip_to = 0; + u32 ip = 0, ip_to = 0, ipn, n = 0; int ret; if (tb[IPSET_ATTR_LINENO]) @@ -187,6 +187,15 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], if (ip + UINT_MAX == ip_to) return -IPSET_ERR_HASH_RANGE; } + ipn = ip; + do { + ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr); + n++; + } while (ipn++ < ip_to); + + if (n > IPSET_MAX_RANGE) + return -ERANGE; + if (retried) ip = ntohl(h->next.ip); do { diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index be5e95a0d876..c3ada9c63fa3 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -201,7 +201,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 ip = 0, ip_to = 0; + u32 ip = 0, ip_to = 0, ipn, n = 0; int ret; if (tb[IPSET_ATTR_LINENO]) @@ -255,6 +255,14 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], } else { ip_set_mask_from_to(ip, ip_to, e.cidr); } + ipn = ip; + do { + ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr); + n++; + } while (ipn++ < ip_to); + + if (n > IPSET_MAX_RANGE) + return -ERANGE; if (retried) ip = ntohl(h->next.ip); diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c index da4ef910b12d..b1411bc91a40 100644 --- a/net/netfilter/ipset/ip_set_hash_netnet.c +++ b/net/netfilter/ipset/ip_set_hash_netnet.c @@ -167,7 +167,8 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], struct hash_netnet4_elem e = { }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0; - u32 ip2 = 0, ip2_from = 0, ip2_to = 0; + u32 ip2 = 0, ip2_from = 0, ip2_to = 0, ipn; + u64 n = 0, m = 0; int ret; if (tb[IPSET_ATTR_LINENO]) @@ -243,6 +244,19 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], } else { ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]); } + ipn = ip; + do { + ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]); + n++; + } while (ipn++ < ip_to); + ipn = ip2_from; + do { + ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]); + m++; + } while (ipn++ < ip2_to); + + if (n*m > IPSET_MAX_RANGE) + return -ERANGE; if (retried) { ip = ntohl(h->next.ip[0]); diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index 34448df80fb9..d26d13528fe8 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c @@ -157,7 +157,8 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); - u32 port, port_to, p = 0, ip = 0, ip_to = 0; + u32 port, port_to, p = 0, ip = 0, ip_to = 0, ipn; + u64 n = 0; bool with_ports = false; u8 cidr; int ret; @@ -234,6 +235,14 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], } else { ip_set_mask_from_to(ip, ip_to, e.cidr + 1); } + ipn = ip; + do { + ipn = ip_set_range_to_cidr(ipn, ip_to, &cidr); + n++; + } while (ipn++ < ip_to); + + if (n*(port_to - port + 1) > IPSET_MAX_RANGE) + return -ERANGE; if (retried) { ip = ntohl(h->next.ip); diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c index 934c1712cba8..6446f4fccc72 100644 --- a/net/netfilter/ipset/ip_set_hash_netportnet.c +++ b/net/netfilter/ipset/ip_set_hash_netportnet.c @@ -181,7 +181,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], struct hash_netportnet4_elem e = { }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0, p = 0, port, port_to; - u32 ip2_from = 0, ip2_to = 0, ip2; + u32 ip2_from = 0, ip2_to = 0, ip2, ipn; + u64 n = 0, m = 0; bool with_ports = false; int ret; @@ -283,6 +284,19 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], } else { ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]); } + ipn = ip; + do { + ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]); + n++; + } while (ipn++ < ip_to); + ipn = ip2_from; + do { + ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]); + m++; + } while (ipn++ < ip2_to); + + if (n*m*(port_to - port + 1) > IPSET_MAX_RANGE) + return -ERANGE; if (retried) { ip = ntohl(h->next.ip[0]); diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c index f9b16f2b2219..fdacbc3c15be 100644 --- a/net/netfilter/ipvs/ip_vs_app.c +++ b/net/netfilter/ipvs/ip_vs_app.c @@ -599,13 +599,19 @@ static const struct seq_operations ip_vs_app_seq_ops = { int __net_init ip_vs_app_net_init(struct netns_ipvs *ipvs) { INIT_LIST_HEAD(&ipvs->app_list); - proc_create_net("ip_vs_app", 0, ipvs->net->proc_net, &ip_vs_app_seq_ops, - sizeof(struct seq_net_private)); +#ifdef CONFIG_PROC_FS + if (!proc_create_net("ip_vs_app", 0, ipvs->net->proc_net, + &ip_vs_app_seq_ops, + sizeof(struct seq_net_private))) + return -ENOMEM; +#endif return 0; } void __net_exit ip_vs_app_net_cleanup(struct netns_ipvs *ipvs) { unregister_ip_vs_app(ipvs, NULL /* all */); +#ifdef CONFIG_PROC_FS remove_proc_entry("ip_vs_app", ipvs->net->proc_net); +#endif } diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 2c467c422dc6..cb6d68220c26 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -1265,8 +1265,8 @@ static inline int todrop_entry(struct ip_vs_conn *cp) * The drop rate array needs tuning for real environments. * Called from timer bh only => no locking */ - static const char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; - static char todrop_counter[9] = {0}; + static const signed char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; + static signed char todrop_counter[9] = {0}; int i; /* if the conn entry hasn't lasted for 60 seconds, don't drop it. @@ -1447,20 +1447,36 @@ int __net_init ip_vs_conn_net_init(struct netns_ipvs *ipvs) { atomic_set(&ipvs->conn_count, 0); - proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net, - &ip_vs_conn_seq_ops, sizeof(struct ip_vs_iter_state)); - proc_create_net("ip_vs_conn_sync", 0, ipvs->net->proc_net, - &ip_vs_conn_sync_seq_ops, - sizeof(struct ip_vs_iter_state)); +#ifdef CONFIG_PROC_FS + if (!proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net, + &ip_vs_conn_seq_ops, + sizeof(struct ip_vs_iter_state))) + goto err_conn; + + if (!proc_create_net("ip_vs_conn_sync", 0, ipvs->net->proc_net, + &ip_vs_conn_sync_seq_ops, + sizeof(struct ip_vs_iter_state))) + goto err_conn_sync; +#endif + return 0; + +#ifdef CONFIG_PROC_FS +err_conn_sync: + remove_proc_entry("ip_vs_conn", ipvs->net->proc_net); +err_conn: + return -ENOMEM; +#endif } void __net_exit ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs) { /* flush all the connection entries first */ ip_vs_conn_flush(ipvs); +#ifdef CONFIG_PROC_FS remove_proc_entry("ip_vs_conn", ipvs->net->proc_net); remove_proc_entry("ip_vs_conn_sync", ipvs->net->proc_net); +#endif } int __init ip_vs_conn_init(void) @@ -1495,7 +1511,7 @@ int __init ip_vs_conn_init(void) pr_info("Connection hash table configured " "(size=%d, memory=%ldKbytes)\n", ip_vs_conn_tab_size, - (long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024); + (long)(ip_vs_conn_tab_size*sizeof(*ip_vs_conn_tab))/1024); IP_VS_DBG(0, "Each connection entry needs %zd bytes at least\n", sizeof(struct ip_vs_conn)); diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 16b48064f715..daab857c52a8 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1280,12 +1280,12 @@ static void set_sock_size(struct sock *sk, int mode, int val) lock_sock(sk); if (mode) { val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2, - sysctl_wmem_max); + READ_ONCE(sysctl_wmem_max)); sk->sk_sndbuf = val * 2; sk->sk_userlocks |= SOCK_SNDBUF_LOCK; } else { val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2, - sysctl_rmem_max); + READ_ONCE(sysctl_rmem_max)); sk->sk_rcvbuf = val * 2; sk->sk_userlocks |= SOCK_RCVBUF_LOCK; } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 79a61929c390..b74f6e5e507e 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1599,7 +1599,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, } #ifdef CONFIG_NF_CONNTRACK_MARK - ct->mark = exp->master->mark; + ct->mark = READ_ONCE(exp->master->mark); #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK ct->secmark = exp->master->secmark; diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index e40988a2f22f..65b5b05fe38d 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c @@ -148,15 +148,37 @@ static int help(struct sk_buff *skb, unsigned int protoff, data = ib_ptr; data_limit = ib_ptr + skb->len - dataoff; - /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24 - * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */ - while (data < data_limit - (19 + MINMATCHLEN)) { - if (memcmp(data, "\1DCC ", 5)) { + /* Skip any whitespace */ + while (data < data_limit - 10) { + if (*data == ' ' || *data == '\r' || *data == '\n') + data++; + else + break; + } + + /* strlen("PRIVMSG x ")=10 */ + if (data < data_limit - 10) { + if (strncasecmp("PRIVMSG ", data, 8)) + goto out; + data += 8; + } + + /* strlen(" :\1DCC SENT t AAAAAAAA P\1\n")=26 + * 7+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=26 + */ + while (data < data_limit - (21 + MINMATCHLEN)) { + /* Find first " :", the start of message */ + if (memcmp(data, " :", 2)) { data++; continue; } + data += 2; + + /* then check that place only for the DCC command */ + if (memcmp(data, "\1DCC ", 5)) + goto out; data += 5; - /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */ + /* we have at least (21+MINMATCHLEN)-(2+5) bytes valid data left */ iph = ip_hdr(skb); pr_debug("DCC found in master %pI4:%u %pI4:%u\n", @@ -172,7 +194,7 @@ static int help(struct sk_buff *skb, unsigned int protoff, pr_debug("DCC %s detected\n", dccprotos[i]); /* we have at least - * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid + * (21+MINMATCHLEN)-7-dccprotos[i].matchlen bytes valid * data left (== 14/13 bytes) */ if (parse_dcc(data, data_limit, &dcc_ip, &dcc_port, &addr_beg_p, &addr_end_p)) { @@ -185,8 +207,9 @@ static int help(struct sk_buff *skb, unsigned int protoff, /* dcc_ip can be the internal OR external (NAT'ed) IP */ tuple = &ct->tuplehash[dir].tuple; - if (tuple->src.u3.ip != dcc_ip && - tuple->dst.u3.ip != dcc_ip) { + if ((tuple->src.u3.ip != dcc_ip && + ct->tuplehash[!dir].tuple.dst.u3.ip != dcc_ip) || + dcc_port == 0) { net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n", &tuple->src.u3.ip, &dcc_ip, dcc_port); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index eeeaa34b3e7b..2efdc50f978b 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -319,7 +319,12 @@ nla_put_failure: #ifdef CONFIG_NF_CONNTRACK_MARK static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) { - if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark))) + u32 mark = READ_ONCE(ct->mark); + + if (!mark) + return 0; + + if (nla_put_be32(skb, CTA_MARK, htonl(mark))) goto nla_put_failure; return 0; @@ -553,22 +558,17 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, { const struct nf_conntrack_zone *zone; struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; struct nlattr *nest_parms; unsigned int event; if (portid) flags |= NLM_F_MULTI; event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_NEW); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, flags, nf_ct_l3num(ct), + NFNETLINK_V0, 0); + if (!nlh) goto nlmsg_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = nf_ct_l3num(ct); - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - zone = nf_ct_zone(ct); nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); @@ -711,7 +711,6 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) const struct nf_conntrack_zone *zone; struct net *net; struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; struct nlattr *nest_parms; struct nf_conn *ct = item->ct; struct sk_buff *skb; @@ -741,15 +740,11 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) goto errout; type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, type); - nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, item->portid, 0, type, flags, nf_ct_l3num(ct), + NFNETLINK_V0, 0); + if (!nlh) goto nlmsg_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = nf_ct_l3num(ct); - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - zone = nf_ct_zone(ct); nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); @@ -821,8 +816,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) } #ifdef CONFIG_NF_CONNTRACK_MARK - if ((events & (1 << IPCT_MARK) || ct->mark) - && ctnetlink_dump_mark(skb, ct) < 0) + if (events & (1 << IPCT_MARK) && + ctnetlink_dump_mark(skb, ct) < 0) goto nla_put_failure; #endif nlmsg_end(skb, nlh); @@ -1109,7 +1104,7 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data) } #ifdef CONFIG_NF_CONNTRACK_MARK - if ((ct->mark & filter->mark.mask) != filter->mark.val) + if ((READ_ONCE(ct->mark) & filter->mark.mask) != filter->mark.val) goto ignore_entry; #endif @@ -1989,9 +1984,9 @@ static void ctnetlink_change_mark(struct nf_conn *ct, mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK])); mark = ntohl(nla_get_be32(cda[CTA_MARK])); - newmark = (ct->mark & mask) ^ mark; - if (newmark != ct->mark) - ct->mark = newmark; + newmark = (READ_ONCE(ct->mark) & mask) ^ mark; + if (newmark != READ_ONCE(ct->mark)) + WRITE_ONCE(ct->mark, newmark); } #endif @@ -2483,20 +2478,15 @@ ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq, __u16 cpu, const struct ip_conntrack_stat *st) { struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0, event; event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS_CPU); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, + NFNETLINK_V0, htons(cpu)); + if (!nlh) goto nlmsg_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = AF_UNSPEC; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = htons(cpu); - if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) || nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) || nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) || @@ -2568,20 +2558,15 @@ ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, struct net *net) { struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0, event; unsigned int nr_conntracks = atomic_read(&net->ct.count); event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, + NFNETLINK_V0, 0); + if (!nlh) goto nlmsg_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = AF_UNSPEC; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks))) goto nla_put_failure; @@ -2746,7 +2731,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct) goto nla_put_failure; #ifdef CONFIG_NF_CONNTRACK_MARK - if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0) + if (ctnetlink_dump_mark(skb, ct) < 0) goto nla_put_failure; #endif if (ctnetlink_dump_labels(skb, ct) < 0) @@ -3085,19 +3070,14 @@ ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int event, const struct nf_conntrack_expect *exp) { struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0; event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, event); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, flags, + exp->tuple.src.l3num, NFNETLINK_V0, 0); + if (!nlh) goto nlmsg_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = exp->tuple.src.l3num; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - if (ctnetlink_exp_dump_expect(skb, exp) < 0) goto nla_put_failure; @@ -3117,7 +3097,6 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item) struct nf_conntrack_expect *exp = item->exp; struct net *net = nf_ct_exp_net(exp); struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; struct sk_buff *skb; unsigned int type, group; int flags = 0; @@ -3140,15 +3119,11 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item) goto errout; type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, type); - nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, item->portid, 0, type, flags, + exp->tuple.src.l3num, NFNETLINK_V0, 0); + if (!nlh) goto nlmsg_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = exp->tuple.src.l3num; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - if (ctnetlink_exp_dump_expect(skb, exp) < 0) goto nla_put_failure; @@ -3716,20 +3691,15 @@ ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu, const struct ip_conntrack_stat *st) { struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0, event; event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_EXP_GET_STATS_CPU); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, + NFNETLINK_V0, htons(cpu)); + if (!nlh) goto nlmsg_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = AF_UNSPEC; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = htons(cpu); - if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) || nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) || nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete))) diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index b83dc9bf0a5d..78fd9122b70c 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -477,7 +477,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr, return ret; if (ret == 0) break; - dataoff += *matchoff; + dataoff = *matchoff; } *in_header = 0; } @@ -489,7 +489,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr, break; if (ret == 0) return ret; - dataoff += *matchoff; + dataoff = *matchoff; } if (in_header) diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 313d1c8ff066..a7f88cdf3f87 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -360,7 +360,7 @@ static int ct_seq_show(struct seq_file *s, void *v) goto release; #if defined(CONFIG_NF_CONNTRACK_MARK) - seq_printf(s, "mark=%u ", ct->mark); + seq_printf(s, "mark=%u ", READ_ONCE(ct->mark)); #endif ct_show_secctx(s, ct); diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index d1862782be45..28306cb66719 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -910,6 +910,7 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable, struct flow_block_cb *block_cb, *next; int err = 0; + down_write(&flowtable->flow_block_lock); switch (cmd) { case FLOW_BLOCK_BIND: list_splice(&bo->cb_list, &flowtable->flow_block.cb_list); @@ -924,6 +925,7 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable, WARN_ON_ONCE(1); err = -EOPNOTSUPP; } + up_write(&flowtable->flow_block_lock); return err; } @@ -980,7 +982,9 @@ static int nf_flow_table_offload_cmd(struct flow_block_offload *bo, nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable, extack); + down_write(&flowtable->flow_block_lock); err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo); + up_write(&flowtable->flow_block_lock); if (err < 0) return err; diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index 2fc4ae960769..3d6d49420db8 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -854,7 +854,7 @@ synproxy_send_tcp_ipv6(struct net *net, fl6.fl6_sport = nth->source; fl6.fl6_dport = nth->dest; security_skb_classify_flow((struct sk_buff *)skb, - flowi6_to_flowi(&fl6)); + flowi6_to_flowi_common(&fl6)); err = nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false); if (err) { goto free_nskb; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index fdd1da9ecea9..2143edafba77 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -66,6 +66,41 @@ static const struct rhashtable_params nft_objname_ht_params = { .automatic_shrinking = true, }; +struct nft_audit_data { + struct nft_table *table; + int entries; + int op; + struct list_head list; +}; + +static const u8 nft2audit_op[NFT_MSG_MAX] = { // enum nf_tables_msg_types + [NFT_MSG_NEWTABLE] = AUDIT_NFT_OP_TABLE_REGISTER, + [NFT_MSG_GETTABLE] = AUDIT_NFT_OP_INVALID, + [NFT_MSG_DELTABLE] = AUDIT_NFT_OP_TABLE_UNREGISTER, + [NFT_MSG_NEWCHAIN] = AUDIT_NFT_OP_CHAIN_REGISTER, + [NFT_MSG_GETCHAIN] = AUDIT_NFT_OP_INVALID, + [NFT_MSG_DELCHAIN] = AUDIT_NFT_OP_CHAIN_UNREGISTER, + [NFT_MSG_NEWRULE] = AUDIT_NFT_OP_RULE_REGISTER, + [NFT_MSG_GETRULE] = AUDIT_NFT_OP_INVALID, + [NFT_MSG_DELRULE] = AUDIT_NFT_OP_RULE_UNREGISTER, + [NFT_MSG_NEWSET] = AUDIT_NFT_OP_SET_REGISTER, + [NFT_MSG_GETSET] = AUDIT_NFT_OP_INVALID, + [NFT_MSG_DELSET] = AUDIT_NFT_OP_SET_UNREGISTER, + [NFT_MSG_NEWSETELEM] = AUDIT_NFT_OP_SETELEM_REGISTER, + [NFT_MSG_GETSETELEM] = AUDIT_NFT_OP_INVALID, + [NFT_MSG_DELSETELEM] = AUDIT_NFT_OP_SETELEM_UNREGISTER, + [NFT_MSG_NEWGEN] = AUDIT_NFT_OP_GEN_REGISTER, + [NFT_MSG_GETGEN] = AUDIT_NFT_OP_INVALID, + [NFT_MSG_TRACE] = AUDIT_NFT_OP_INVALID, + [NFT_MSG_NEWOBJ] = AUDIT_NFT_OP_OBJ_REGISTER, + [NFT_MSG_GETOBJ] = AUDIT_NFT_OP_INVALID, + [NFT_MSG_DELOBJ] = AUDIT_NFT_OP_OBJ_UNREGISTER, + [NFT_MSG_GETOBJ_RESET] = AUDIT_NFT_OP_OBJ_RESET, + [NFT_MSG_NEWFLOWTABLE] = AUDIT_NFT_OP_FLOWTABLE_REGISTER, + [NFT_MSG_GETFLOWTABLE] = AUDIT_NFT_OP_INVALID, + [NFT_MSG_DELFLOWTABLE] = AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, +}; + static void nft_validate_state_update(struct net *net, u8 new_validate_state) { switch (net->nft.validate_state) { @@ -114,6 +149,7 @@ static struct nft_trans *nft_trans_alloc_gfp(const struct nft_ctx *ctx, if (trans == NULL) return NULL; + INIT_LIST_HEAD(&trans->list); trans->msg_type = msg_type; trans->ctx = *ctx; @@ -481,6 +517,7 @@ static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type, if (msg_type == NFT_MSG_NEWFLOWTABLE) nft_activate_next(ctx->net, flowtable); + INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans)); nft_trans_flowtable(trans) = flowtable; list_add_tail(&trans->list, &ctx->net->nft.commit_list); @@ -646,6 +683,11 @@ nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla, return ERR_PTR(-ENOENT); } +static __be16 nft_base_seq(const struct net *net) +{ + return htons(net->nft.base_seq & 0xffff); +} + static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = { [NFTA_TABLE_NAME] = { .type = NLA_STRING, .len = NFT_TABLE_MAXNAMELEN - 1 }, @@ -660,18 +702,13 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net, int family, const struct nft_table *table) { struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, flags, family, + NFNETLINK_V0, nft_base_seq(net)); + if (!nlh) goto nla_put_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = family; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = htons(net->nft.base_seq & 0xffff); - if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) || nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) || nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)) || @@ -708,17 +745,6 @@ static void nf_tables_table_notify(const struct nft_ctx *ctx, int event) { struct sk_buff *skb; int err; - char *buf = kasprintf(GFP_KERNEL, "%s:%llu;?:0", - ctx->table->name, ctx->table->handle); - - audit_log_nfcfg(buf, - ctx->family, - ctx->table->use, - event == NFT_MSG_NEWTABLE ? - AUDIT_NFT_OP_TABLE_REGISTER : - AUDIT_NFT_OP_TABLE_UNREGISTER, - GFP_KERNEL); - kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) @@ -1412,18 +1438,13 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, const struct nft_chain *chain) { struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, flags, family, + NFNETLINK_V0, nft_base_seq(net)); + if (!nlh) goto nla_put_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = family; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = htons(net->nft.base_seq & 0xffff); - if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name)) goto nla_put_failure; if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle), @@ -1475,18 +1496,6 @@ static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event) { struct sk_buff *skb; int err; - char *buf = kasprintf(GFP_KERNEL, "%s:%llu;%s:%llu", - ctx->table->name, ctx->table->handle, - ctx->chain->name, ctx->chain->handle); - - audit_log_nfcfg(buf, - ctx->family, - ctx->chain->use, - event == NFT_MSG_NEWCHAIN ? - AUDIT_NFT_OP_CHAIN_REGISTER : - AUDIT_NFT_OP_CHAIN_UNREGISTER, - GFP_KERNEL); - kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) @@ -1733,7 +1742,6 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net, goto err_hook_dev; } hook->ops.dev = dev; - hook->inactive = false; return hook; @@ -1963,8 +1971,10 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family, chain->flags |= NFT_CHAIN_BASE | flags; basechain->policy = NF_ACCEPT; if (chain->flags & NFT_CHAIN_HW_OFFLOAD && - nft_chain_offload_priority(basechain) < 0) + !nft_chain_offload_support(basechain)) { + list_splice_init(&basechain->hook_list, &hook->list); return -EOPNOTSUPP; + } flow_block_init(&basechain->flow_block); @@ -1993,7 +2003,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, const struct nlattr * const *nla = ctx->nla; struct nft_table *table = ctx->table; struct nft_base_chain *basechain; - struct nft_stats __percpu *stats; struct net *net = ctx->net; char name[NFT_NAME_MAXLEN]; struct nft_trans *trans; @@ -2005,6 +2014,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, return -EOVERFLOW; if (nla[NFTA_CHAIN_HOOK]) { + struct nft_stats __percpu *stats = NULL; struct nft_chain_hook hook; if (flags & NFT_CHAIN_BINDING) @@ -2029,15 +2039,17 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, return PTR_ERR(stats); } rcu_assign_pointer(basechain->stats, stats); - static_branch_inc(&nft_counters_enabled); } err = nft_basechain_init(basechain, family, &hook, flags); if (err < 0) { nft_chain_release_hook(&hook); kfree(basechain); + free_percpu(stats); return err; } + if (stats) + static_branch_inc(&nft_counters_enabled); } else { if (flags & NFT_CHAIN_BASE) return -EINVAL; @@ -2265,6 +2277,7 @@ err: } static struct nft_chain *nft_chain_lookup_byid(const struct net *net, + const struct nft_table *table, const struct nlattr *nla) { u32 id = ntohl(nla_get_be32(nla)); @@ -2274,6 +2287,7 @@ static struct nft_chain *nft_chain_lookup_byid(const struct net *net, struct nft_chain *chain = trans->ctx.chain; if (trans->msg_type == NFT_MSG_NEWCHAIN && + chain->table == table && id == nft_trans_chain_id(trans)) return chain; } @@ -2679,27 +2693,31 @@ static struct nft_expr *nft_expr_init(const struct nft_ctx *ctx, err = nf_tables_expr_parse(ctx, nla, &info); if (err < 0) - goto err1; + goto err_expr_parse; + + err = -EOPNOTSUPP; + if (!(info.ops->type->flags & NFT_EXPR_STATEFUL)) + goto err_expr_stateful; err = -ENOMEM; expr = kzalloc(info.ops->size, GFP_KERNEL); if (expr == NULL) - goto err2; + goto err_expr_stateful; err = nf_tables_newexpr(ctx, &info, expr); if (err < 0) - goto err3; + goto err_expr_new; return expr; -err3: +err_expr_new: kfree(expr); -err2: +err_expr_stateful: owner = info.ops->type->owner; if (info.ops->type->release_ops) info.ops->type->release_ops(info.ops); module_put(owner); -err1: +err_expr_parse: return ERR_PTR(err); } @@ -2779,20 +2797,15 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, const struct nft_rule *prule) { struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; const struct nft_expr *expr, *next; struct nlattr *list; u16 type = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); - nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, type, flags, family, NFNETLINK_V0, + nft_base_seq(net)); + if (!nlh) goto nla_put_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = family; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = htons(net->nft.base_seq & 0xffff); - if (nla_put_string(skb, NFTA_RULE_TABLE, table->name)) goto nla_put_failure; if (nla_put_string(skb, NFTA_RULE_CHAIN, chain->name)) @@ -2837,18 +2850,6 @@ static void nf_tables_rule_notify(const struct nft_ctx *ctx, { struct sk_buff *skb; int err; - char *buf = kasprintf(GFP_KERNEL, "%s:%llu;%s:%llu", - ctx->table->name, ctx->table->handle, - ctx->chain->name, ctx->chain->handle); - - audit_log_nfcfg(buf, - ctx->family, - rule->handle, - event == NFT_MSG_NEWRULE ? - AUDIT_NFT_OP_RULE_REGISTER : - AUDIT_NFT_OP_RULE_UNREGISTER, - GFP_KERNEL); - kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) @@ -3150,6 +3151,7 @@ static int nft_table_validate(struct net *net, const struct nft_table *table) } static struct nft_rule *nft_rule_lookup_byid(const struct net *net, + const struct nft_chain *chain, const struct nlattr *nla); #define NFT_RULE_MAXEXPRS 128 @@ -3195,7 +3197,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, return -EOPNOTSUPP; } else if (nla[NFTA_RULE_CHAIN_ID]) { - chain = nft_chain_lookup_byid(net, nla[NFTA_RULE_CHAIN_ID]); + chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID]); if (IS_ERR(chain)) { NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN_ID]); return PTR_ERR(chain); @@ -3237,7 +3239,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, return PTR_ERR(old_rule); } } else if (nla[NFTA_RULE_POSITION_ID]) { - old_rule = nft_rule_lookup_byid(net, nla[NFTA_RULE_POSITION_ID]); + old_rule = nft_rule_lookup_byid(net, chain, nla[NFTA_RULE_POSITION_ID]); if (IS_ERR(old_rule)) { NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION_ID]); return PTR_ERR(old_rule); @@ -3376,6 +3378,7 @@ err1: } static struct nft_rule *nft_rule_lookup_byid(const struct net *net, + const struct nft_chain *chain, const struct nlattr *nla) { u32 id = ntohl(nla_get_be32(nla)); @@ -3385,6 +3388,7 @@ static struct nft_rule *nft_rule_lookup_byid(const struct net *net, struct nft_rule *rule = nft_trans_rule(trans); if (trans->msg_type == NFT_MSG_NEWRULE && + trans->ctx.chain == chain && id == nft_trans_rule_id(trans)) return rule; } @@ -3433,7 +3437,7 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk, err = nft_delrule(&ctx, rule); } else if (nla[NFTA_RULE_ID]) { - rule = nft_rule_lookup_byid(net, nla[NFTA_RULE_ID]); + rule = nft_rule_lookup_byid(net, chain, nla[NFTA_RULE_ID]); if (IS_ERR(rule)) { NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_ID]); return PTR_ERR(rule); @@ -3634,6 +3638,7 @@ static struct nft_set *nft_set_lookup_byhandle(const struct nft_table *table, } static struct nft_set *nft_set_lookup_byid(const struct net *net, + const struct nft_table *table, const struct nlattr *nla, u8 genmask) { struct nft_trans *trans; @@ -3644,6 +3649,7 @@ static struct nft_set *nft_set_lookup_byid(const struct net *net, struct nft_set *set = nft_trans_set(trans); if (id == nft_trans_set_id(trans) && + set->table == table && nft_active_genmask(set, genmask)) return set; } @@ -3664,7 +3670,7 @@ struct nft_set *nft_set_lookup_global(const struct net *net, if (!nla_set_id) return set; - set = nft_set_lookup_byid(net, nla_set_id, genmask); + set = nft_set_lookup_byid(net, table, nla_set_id, genmask); } return set; } @@ -3690,7 +3696,7 @@ cont: list_for_each_entry(i, &ctx->table->sets, list) { int tmp; - if (!nft_is_active_next(ctx->net, set)) + if (!nft_is_active_next(ctx->net, i)) continue; if (!sscanf(i->name, name, &tmp)) continue; @@ -3774,23 +3780,17 @@ static int nf_tables_fill_set_concat(struct sk_buff *skb, static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, const struct nft_set *set, u16 event, u16 flags) { - struct nfgenmsg *nfmsg; struct nlmsghdr *nlh; u32 portid = ctx->portid; struct nlattr *nest; u32 seq = ctx->seq; event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), - flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, flags, ctx->family, + NFNETLINK_V0, nft_base_seq(ctx->net)); + if (!nlh) goto nla_put_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = ctx->family; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = htons(ctx->net->nft.base_seq & 0xffff); - if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name)) goto nla_put_failure; if (nla_put_string(skb, NFTA_SET_NAME, set->name)) @@ -3870,18 +3870,6 @@ static void nf_tables_set_notify(const struct nft_ctx *ctx, struct sk_buff *skb; u32 portid = ctx->portid; int err; - char *buf = kasprintf(gfp_flags, "%s:%llu;%s:%llu", - ctx->table->name, ctx->table->handle, - set->name, set->handle); - - audit_log_nfcfg(buf, - ctx->family, - set->field_count, - event == NFT_MSG_NEWSET ? - AUDIT_NFT_OP_SET_REGISTER : - AUDIT_NFT_OP_SET_UNREGISTER, - gfp_flags); - kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) @@ -4047,6 +4035,9 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr, u32 len; int err; + if (desc->field_count >= ARRAY_SIZE(desc->field_len)) + return -E2BIG; + err = nla_parse_nested_deprecated(tb, NFTA_SET_FIELD_MAX, attr, nft_concat_policy, NULL); if (err < 0) @@ -4056,9 +4047,8 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr, return -EINVAL; len = ntohl(nla_get_be32(tb[NFTA_SET_FIELD_LEN])); - - if (len * BITS_PER_BYTE / 32 > NFT_REG32_COUNT) - return -E2BIG; + if (!len || len > U8_MAX) + return -EINVAL; desc->field_len[desc->field_count++] = len; @@ -4069,7 +4059,8 @@ static int nft_set_desc_concat(struct nft_set_desc *desc, const struct nlattr *nla) { struct nlattr *attr; - int rem, err; + u32 num_regs = 0; + int rem, err, i; nla_for_each_nested(attr, nla, rem) { if (nla_type(attr) != NFTA_LIST_ELEM) @@ -4080,6 +4071,12 @@ static int nft_set_desc_concat(struct nft_set_desc *desc, return err; } + for (i = 0; i < desc->field_count; i++) + num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32)); + + if (num_regs > NFT_REG32_COUNT) + return -E2BIG; + return 0; } @@ -4220,6 +4217,11 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]); if (err < 0) return err; + + if (desc.field_count > 1 && !(flags & NFT_SET_CONCAT)) + return -EINVAL; + } else if (flags & NFT_SET_CONCAT) { + return -EINVAL; } if (nla[NFTA_SET_EXPR]) @@ -4401,6 +4403,12 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk, return nft_delset(&ctx, set); } +static int nft_validate_register_store(const struct nft_ctx *ctx, + enum nft_registers reg, + const struct nft_data *data, + enum nft_data_types type, + unsigned int len); + static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx, struct nft_set *set, const struct nft_set_iter *iter, @@ -4690,7 +4698,6 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) struct nft_set *set; struct nft_set_dump_args args; bool set_found = false; - struct nfgenmsg *nfmsg; struct nlmsghdr *nlh; struct nlattr *nest; u32 portid, seq; @@ -4723,16 +4730,11 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) portid = NETLINK_CB(cb->skb).portid; seq = cb->nlh->nlmsg_seq; - nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), - NLM_F_MULTI); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, NLM_F_MULTI, + table->family, NFNETLINK_V0, nft_base_seq(net)); + if (!nlh) goto nla_put_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = table->family; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = htons(net->nft.base_seq & 0xffff); - if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, table->name)) goto nla_put_failure; if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name)) @@ -4789,22 +4791,16 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb, const struct nft_set *set, const struct nft_set_elem *elem) { - struct nfgenmsg *nfmsg; struct nlmsghdr *nlh; struct nlattr *nest; int err; event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), - flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, flags, ctx->family, + NFNETLINK_V0, nft_base_seq(ctx->net)); + if (!nlh) goto nla_put_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = ctx->family; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = htons(ctx->net->nft.base_seq & 0xffff); - if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name)) goto nla_put_failure; if (nla_put_string(skb, NFTA_SET_NAME, set->name)) @@ -4847,19 +4843,13 @@ static int nft_setelem_parse_flags(const struct nft_set *set, static int nft_setelem_parse_key(struct nft_ctx *ctx, struct nft_set *set, struct nft_data *key, struct nlattr *attr) { - struct nft_data_desc desc; - int err; + struct nft_data_desc desc = { + .type = NFT_DATA_VALUE, + .size = NFT_DATA_VALUE_MAXLEN, + .len = set->klen, + }; - err = nft_data_init(ctx, key, NFT_DATA_VALUE_MAXLEN, &desc, attr); - if (err < 0) - return err; - - if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) { - nft_data_release(key, desc.type); - return -EINVAL; - } - - return 0; + return nft_data_init(ctx, key, &desc, attr); } static int nft_setelem_parse_data(struct nft_ctx *ctx, struct nft_set *set, @@ -4867,18 +4857,19 @@ static int nft_setelem_parse_data(struct nft_ctx *ctx, struct nft_set *set, struct nft_data *data, struct nlattr *attr) { - int err; + u32 dtype; - err = nft_data_init(ctx, data, NFT_DATA_VALUE_MAXLEN, desc, attr); - if (err < 0) - return err; + if (set->dtype == NFT_DATA_VERDICT) + dtype = NFT_DATA_VERDICT; + else + dtype = NFT_DATA_VALUE; - if (desc->type != NFT_DATA_VERDICT && desc->len != set->dlen) { - nft_data_release(data, desc->type); - return -EINVAL; - } + desc->type = dtype; + desc->size = NFT_DATA_VALUE_MAXLEN; + desc->len = set->dlen; + desc->flags = NFT_DATA_DESC_SETELEM; - return 0; + return nft_data_init(ctx, data, desc, attr); } static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set, @@ -4996,18 +4987,6 @@ static void nf_tables_setelem_notify(const struct nft_ctx *ctx, u32 portid = ctx->portid; struct sk_buff *skb; int err; - char *buf = kasprintf(GFP_KERNEL, "%s:%llu;%s:%llu", - ctx->table->name, ctx->table->handle, - set->name, set->handle); - - audit_log_nfcfg(buf, - ctx->family, - set->handle, - event == NFT_MSG_NEWSETELEM ? - AUDIT_NFT_OP_SETELEM_REGISTER : - AUDIT_NFT_OP_SETELEM_UNREGISTER, - GFP_KERNEL); - kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) return; @@ -5055,9 +5034,6 @@ struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx, return expr; err = -EOPNOTSUPP; - if (!(expr->ops->type->flags & NFT_EXPR_STATEFUL)) - goto err_set_elem_expr; - if (expr->ops->type->flags & NFT_EXPR_GC) { if (set->flags & NFT_SET_TIMEOUT) goto err_set_elem_expr; @@ -5214,6 +5190,15 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, return -EINVAL; } + if (set->flags & NFT_SET_OBJECT) { + if (!nla[NFTA_SET_ELEM_OBJREF] && + !(flags & NFT_SET_ELEM_INTERVAL_END)) + return -EINVAL; + } else { + if (nla[NFTA_SET_ELEM_OBJREF]) + return -EINVAL; + } + if ((flags & NFT_SET_ELEM_INTERVAL_END) && (nla[NFTA_SET_ELEM_DATA] || nla[NFTA_SET_ELEM_OBJREF] || @@ -5291,10 +5276,6 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, expr->ops->size); if (nla[NFTA_SET_ELEM_OBJREF] != NULL) { - if (!(set->flags & NFT_SET_OBJECT)) { - err = -EINVAL; - goto err_parse_key_end; - } obj = nft_obj_lookup(ctx->net, ctx->table, nla[NFTA_SET_ELEM_OBJREF], set->objtype, genmask); @@ -6064,19 +6045,14 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net, int family, const struct nft_table *table, struct nft_object *obj, bool reset) { - struct nfgenmsg *nfmsg; struct nlmsghdr *nlh; event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, flags, family, + NFNETLINK_V0, nft_base_seq(net)); + if (!nlh) goto nla_put_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = family; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = htons(net->nft.base_seq & 0xffff); - if (nla_put_string(skb, NFTA_OBJ_TABLE, table->name) || nla_put_string(skb, NFTA_OBJ_NAME, obj->key.name) || nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) || @@ -6139,12 +6115,11 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb) filter->type != NFT_OBJECT_UNSPEC && obj->ops->type->type != filter->type) goto cont; - if (reset) { char *buf = kasprintf(GFP_ATOMIC, - "%s:%llu;?:0", + "%s:%u", table->name, - table->handle); + net->nft.base_seq); audit_log_nfcfg(buf, family, @@ -6265,8 +6240,8 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk, reset = true; if (reset) { - char *buf = kasprintf(GFP_ATOMIC, "%s:%llu;?:0", - table->name, table->handle); + char *buf = kasprintf(GFP_ATOMIC, "%s:%u", + table->name, net->nft.base_seq); audit_log_nfcfg(buf, family, @@ -6353,15 +6328,15 @@ void nft_obj_notify(struct net *net, const struct nft_table *table, { struct sk_buff *skb; int err; - char *buf = kasprintf(gfp, "%s:%llu;?:0", - table->name, table->handle); + char *buf = kasprintf(gfp, "%s:%u", + table->name, net->nft.base_seq); audit_log_nfcfg(buf, family, obj->handle, event == NFT_MSG_NEWOBJ ? - AUDIT_NFT_OP_OBJ_REGISTER : - AUDIT_NFT_OP_OBJ_UNREGISTER, + AUDIT_NFT_OP_OBJ_REGISTER : + AUDIT_NFT_OP_OBJ_UNREGISTER, gfp); kfree(buf); @@ -6684,11 +6659,15 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh, if (nla[NFTA_FLOWTABLE_FLAGS]) { flags = ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS])); - if (flags & ~NFT_FLOWTABLE_MASK) - return -EOPNOTSUPP; + if (flags & ~NFT_FLOWTABLE_MASK) { + err = -EOPNOTSUPP; + goto err_flowtable_update_hook; + } if ((flowtable->data.flags & NFT_FLOWTABLE_HW_OFFLOAD) ^ - (flags & NFT_FLOWTABLE_HW_OFFLOAD)) - return -EOPNOTSUPP; + (flags & NFT_FLOWTABLE_HW_OFFLOAD)) { + err = -EOPNOTSUPP; + goto err_flowtable_update_hook; + } } else { flags = flowtable->data.flags; } @@ -6870,6 +6849,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx, { const struct nlattr * const *nla = ctx->nla; struct nft_flowtable_hook flowtable_hook; + LIST_HEAD(flowtable_del_list); struct nft_hook *this, *hook; struct nft_trans *trans; int err; @@ -6885,7 +6865,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx, err = -ENOENT; goto err_flowtable_del_hook; } - hook->inactive = true; + list_move(&hook->list, &flowtable_del_list); } trans = nft_trans_alloc(ctx, NFT_MSG_DELFLOWTABLE, @@ -6898,6 +6878,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx, nft_trans_flowtable(trans) = flowtable; nft_trans_flowtable_update(trans) = true; INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans)); + list_splice(&flowtable_del_list, &nft_trans_flowtable_hooks(trans)); nft_flowtable_hook_release(&flowtable_hook); list_add_tail(&trans->list, &ctx->net->nft.commit_list); @@ -6905,13 +6886,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx, return 0; err_flowtable_del_hook: - list_for_each_entry(this, &flowtable_hook.list, list) { - hook = nft_hook_list_find(&flowtable->hook_list, this); - if (!hook) - break; - - hook->inactive = false; - } + list_splice(&flowtable_del_list, &flowtable->hook_list); nft_flowtable_hook_release(&flowtable_hook); return err; @@ -6976,20 +6951,15 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net, struct list_head *hook_list) { struct nlattr *nest, *nest_devs; - struct nfgenmsg *nfmsg; struct nft_hook *hook; struct nlmsghdr *nlh; event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, flags, family, + NFNETLINK_V0, nft_base_seq(net)); + if (!nlh) goto nla_put_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = family; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = htons(net->nft.base_seq & 0xffff); - if (nla_put_string(skb, NFTA_FLOWTABLE_TABLE, flowtable->table->name) || nla_put_string(skb, NFTA_FLOWTABLE_NAME, flowtable->name) || nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) || @@ -7179,18 +7149,6 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx, { struct sk_buff *skb; int err; - char *buf = kasprintf(GFP_KERNEL, "%s:%llu;%s:%llu", - flowtable->table->name, flowtable->table->handle, - flowtable->name, flowtable->handle); - - audit_log_nfcfg(buf, - ctx->family, - flowtable->hooknum, - event == NFT_MSG_NEWFLOWTABLE ? - AUDIT_NFT_OP_FLOWTABLE_REGISTER : - AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, - GFP_KERNEL); - kfree(buf); if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) @@ -7234,19 +7192,14 @@ static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, u32 portid, u32 seq) { struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; char buf[TASK_COMM_LEN]; int event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWGEN); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), 0); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, 0, AF_UNSPEC, + NFNETLINK_V0, nft_base_seq(net)); + if (!nlh) goto nla_put_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = AF_UNSPEC; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = htons(net->nft.base_seq & 0xffff); - if (nla_put_be32(skb, NFTA_GEN_ID, htonl(net->nft.base_seq)) || nla_put_be32(skb, NFTA_GEN_PROC_PID, htonl(task_pid_nr(current))) || nla_put_string(skb, NFTA_GEN_PROC_NAME, get_task_comm(buf, current))) @@ -7311,9 +7264,6 @@ static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb, struct sk_buff *skb2; int err; - audit_log_nfcfg("?:0;?:0", 0, net->nft.base_seq, - AUDIT_NFT_OP_GEN_REGISTER, GFP_KERNEL); - if (!nlmsg_report(nlh) && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) return; @@ -7761,17 +7711,6 @@ void nft_chain_del(struct nft_chain *chain) list_del_rcu(&chain->list); } -static void nft_flowtable_hooks_del(struct nft_flowtable *flowtable, - struct list_head *hook_list) -{ - struct nft_hook *hook, *next; - - list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) { - if (hook->inactive) - list_move(&hook->list, hook_list); - } -} - static void nf_tables_module_autoload_cleanup(struct net *net) { struct nft_module_request *req, *next; @@ -7852,12 +7791,74 @@ new_batch: WARN_ON_ONCE(!list_empty(&net->nft.notify_list)); } +static int nf_tables_commit_audit_alloc(struct list_head *adl, + struct nft_table *table) +{ + struct nft_audit_data *adp; + + list_for_each_entry(adp, adl, list) { + if (adp->table == table) + return 0; + } + adp = kzalloc(sizeof(*adp), GFP_KERNEL); + if (!adp) + return -ENOMEM; + adp->table = table; + list_add(&adp->list, adl); + return 0; +} + +static void nf_tables_commit_audit_free(struct list_head *adl) +{ + struct nft_audit_data *adp, *adn; + + list_for_each_entry_safe(adp, adn, adl, list) { + list_del(&adp->list); + kfree(adp); + } +} + +static void nf_tables_commit_audit_collect(struct list_head *adl, + struct nft_table *table, u32 op) +{ + struct nft_audit_data *adp; + + list_for_each_entry(adp, adl, list) { + if (adp->table == table) + goto found; + } + WARN_ONCE(1, "table=%s not expected in commit list", table->name); + return; +found: + adp->entries++; + if (!adp->op || adp->op > op) + adp->op = op; +} + +#define AUNFTABLENAMELEN (NFT_TABLE_MAXNAMELEN + 22) + +static void nf_tables_commit_audit_log(struct list_head *adl, u32 generation) +{ + struct nft_audit_data *adp, *adn; + char aubuf[AUNFTABLENAMELEN]; + + list_for_each_entry_safe(adp, adn, adl, list) { + snprintf(aubuf, AUNFTABLENAMELEN, "%s:%u", adp->table->name, + generation); + audit_log_nfcfg(aubuf, adp->table->family, adp->entries, + nft2audit_op[adp->op], GFP_KERNEL); + list_del(&adp->list); + kfree(adp); + } +} + static int nf_tables_commit(struct net *net, struct sk_buff *skb) { struct nft_trans *trans, *next; struct nft_trans_elem *te; struct nft_chain *chain; struct nft_table *table; + LIST_HEAD(adl); int err; if (list_empty(&net->nft.commit_list)) { @@ -7877,6 +7878,12 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { int ret; + ret = nf_tables_commit_audit_alloc(&adl, trans->ctx.table); + if (ret) { + nf_tables_commit_chain_prepare_cancel(net); + nf_tables_commit_audit_free(&adl); + return ret; + } if (trans->msg_type == NFT_MSG_NEWRULE || trans->msg_type == NFT_MSG_DELRULE) { chain = trans->ctx.chain; @@ -7884,6 +7891,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) ret = nf_tables_commit_chain_prepare(net, chain); if (ret < 0) { nf_tables_commit_chain_prepare_cancel(net); + nf_tables_commit_audit_free(&adl); return ret; } } @@ -7905,6 +7913,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) net->nft.gencursor = nft_gencursor_next(net); list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { + nf_tables_commit_audit_collect(&adl, trans->ctx.table, + trans->msg_type); switch (trans->msg_type) { case NFT_MSG_NEWTABLE: if (nft_trans_table_update(trans)) { @@ -7947,6 +7957,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nf_tables_rule_notify(&trans->ctx, nft_trans_rule(trans), NFT_MSG_NEWRULE); + if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) + nft_flow_rule_destroy(nft_trans_flow_rule(trans)); + nft_trans_destroy(trans); break; case NFT_MSG_DELRULE: @@ -7957,6 +7970,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans), NFT_TRANS_COMMIT); + + if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) + nft_flow_rule_destroy(nft_trans_flow_rule(trans)); break; case NFT_MSG_NEWSET: nft_clear(net, nft_trans_set(trans)); @@ -8035,8 +8051,6 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) break; case NFT_MSG_DELFLOWTABLE: if (nft_trans_flowtable_update(trans)) { - nft_flowtable_hooks_del(nft_trans_flowtable(trans), - &nft_trans_flowtable_hooks(trans)); nf_tables_flowtable_notify(&trans->ctx, nft_trans_flowtable(trans), &nft_trans_flowtable_hooks(trans), @@ -8058,6 +8072,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nft_commit_notify(net, NETLINK_CB(skb).portid); nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); + nf_tables_commit_audit_log(&adl, net->nft.base_seq); nf_tables_commit_release(net); return 0; @@ -8114,7 +8129,6 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) { struct nft_trans *trans, *next; struct nft_trans_elem *te; - struct nft_hook *hook; if (action == NFNL_ABORT_VALIDATE && nf_tables_validate(net) < 0) @@ -8232,8 +8246,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) break; case NFT_MSG_DELFLOWTABLE: if (nft_trans_flowtable_update(trans)) { - list_for_each_entry(hook, &nft_trans_flowtable(trans)->hook_list, list) - hook->inactive = false; + list_splice(&nft_trans_flowtable_hooks(trans), + &nft_trans_flowtable(trans)->hook_list); } else { trans->ctx.table->use++; nft_clear(trans->ctx.net, nft_trans_flowtable(trans)); @@ -8512,7 +8526,7 @@ EXPORT_SYMBOL_GPL(nft_dump_register); * Validate that the input register is one of the general purpose * registers and that the length of the load is within the bounds. */ -int nft_validate_register_load(enum nft_registers reg, unsigned int len) +static int nft_validate_register_load(enum nft_registers reg, unsigned int len) { if (reg < NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE) return -EINVAL; @@ -8523,7 +8537,21 @@ int nft_validate_register_load(enum nft_registers reg, unsigned int len) return 0; } -EXPORT_SYMBOL_GPL(nft_validate_register_load); + +int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len) +{ + u32 reg; + int err; + + reg = nft_parse_register(attr); + err = nft_validate_register_load(reg, len); + if (err < 0) + return err; + + *sreg = reg; + return 0; +} +EXPORT_SYMBOL_GPL(nft_parse_register_load); /** * nft_validate_register_store - validate an expressions' register store @@ -8539,10 +8567,11 @@ EXPORT_SYMBOL_GPL(nft_validate_register_load); * A value of NULL for the data means that its runtime gathered * data. */ -int nft_validate_register_store(const struct nft_ctx *ctx, - enum nft_registers reg, - const struct nft_data *data, - enum nft_data_types type, unsigned int len) +static int nft_validate_register_store(const struct nft_ctx *ctx, + enum nft_registers reg, + const struct nft_data *data, + enum nft_data_types type, + unsigned int len) { int err; @@ -8574,7 +8603,24 @@ int nft_validate_register_store(const struct nft_ctx *ctx, return 0; } } -EXPORT_SYMBOL_GPL(nft_validate_register_store); + +int nft_parse_register_store(const struct nft_ctx *ctx, + const struct nlattr *attr, u8 *dreg, + const struct nft_data *data, + enum nft_data_types type, unsigned int len) +{ + int err; + u32 reg; + + reg = nft_parse_register(attr); + err = nft_validate_register_store(ctx, reg, data, type, len); + if (err < 0) + return err; + + *dreg = reg; + return 0; +} +EXPORT_SYMBOL_GPL(nft_parse_register_store); static const struct nla_policy nft_verdict_policy[NFTA_VERDICT_MAX + 1] = { [NFTA_VERDICT_CODE] = { .type = NLA_U32 }, @@ -8622,7 +8668,7 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, tb[NFTA_VERDICT_CHAIN], genmask); } else if (tb[NFTA_VERDICT_CHAIN_ID]) { - chain = nft_chain_lookup_byid(ctx->net, + chain = nft_chain_lookup_byid(ctx->net, ctx->table, tb[NFTA_VERDICT_CHAIN_ID]); if (IS_ERR(chain)) return PTR_ERR(chain); @@ -8634,6 +8680,11 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, return PTR_ERR(chain); if (nft_is_base_chain(chain)) return -EOPNOTSUPP; + if (nft_chain_is_bound(chain)) + return -EINVAL; + if (desc->flags & NFT_DATA_DESC_SETELEM && + chain->flags & NFT_CHAIN_BINDING) + return -EINVAL; chain->use++; data->verdict.chain = chain; @@ -8641,7 +8692,7 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, } desc->len = sizeof(data->verdict); - desc->type = NFT_DATA_VERDICT; + return 0; } @@ -8694,20 +8745,25 @@ nla_put_failure: } static int nft_value_init(const struct nft_ctx *ctx, - struct nft_data *data, unsigned int size, - struct nft_data_desc *desc, const struct nlattr *nla) + struct nft_data *data, struct nft_data_desc *desc, + const struct nlattr *nla) { unsigned int len; len = nla_len(nla); if (len == 0) return -EINVAL; - if (len > size) + if (len > desc->size) return -EOVERFLOW; + if (desc->len) { + if (len != desc->len) + return -EINVAL; + } else { + desc->len = len; + } nla_memcpy(data->data, nla, len); - desc->type = NFT_DATA_VALUE; - desc->len = len; + return 0; } @@ -8727,7 +8783,6 @@ static const struct nla_policy nft_data_policy[NFTA_DATA_MAX + 1] = { * * @ctx: context of the expression using the data * @data: destination struct nft_data - * @size: maximum data length * @desc: data description * @nla: netlink attribute containing data * @@ -8737,24 +8792,35 @@ static const struct nla_policy nft_data_policy[NFTA_DATA_MAX + 1] = { * The caller can indicate that it only wants to accept data of type * NFT_DATA_VALUE by passing NULL for the ctx argument. */ -int nft_data_init(const struct nft_ctx *ctx, - struct nft_data *data, unsigned int size, +int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data, struct nft_data_desc *desc, const struct nlattr *nla) { struct nlattr *tb[NFTA_DATA_MAX + 1]; int err; + if (WARN_ON_ONCE(!desc->size)) + return -EINVAL; + err = nla_parse_nested_deprecated(tb, NFTA_DATA_MAX, nla, nft_data_policy, NULL); if (err < 0) return err; - if (tb[NFTA_DATA_VALUE]) - return nft_value_init(ctx, data, size, desc, - tb[NFTA_DATA_VALUE]); - if (tb[NFTA_DATA_VERDICT] && ctx != NULL) - return nft_verdict_init(ctx, data, desc, tb[NFTA_DATA_VERDICT]); - return -EINVAL; + if (tb[NFTA_DATA_VALUE]) { + if (desc->type != NFT_DATA_VALUE) + return -EINVAL; + + err = nft_value_init(ctx, data, desc, tb[NFTA_DATA_VALUE]); + } else if (tb[NFTA_DATA_VERDICT] && ctx != NULL) { + if (desc->type != NFT_DATA_VERDICT) + return -EINVAL; + + err = nft_verdict_init(ctx, data, desc, tb[NFTA_DATA_VERDICT]); + } else { + err = -EINVAL; + } + + return err; } EXPORT_SYMBOL_GPL(nft_data_init); diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index a61b5bf5aa0f..9dc18429ed87 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -67,6 +67,50 @@ static void nft_cmp_fast_eval(const struct nft_expr *expr, regs->verdict.code = NFT_BREAK; } +static void nft_cmp16_fast_eval(const struct nft_expr *expr, + struct nft_regs *regs) +{ + const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr); + const u64 *reg_data = (const u64 *)®s->data[priv->sreg]; + const u64 *mask = (const u64 *)&priv->mask; + const u64 *data = (const u64 *)&priv->data; + + if (((reg_data[0] & mask[0]) == data[0] && + ((reg_data[1] & mask[1]) == data[1])) ^ priv->inv) + return; + regs->verdict.code = NFT_BREAK; +} + +static noinline void __nft_trace_verdict(struct nft_traceinfo *info, + const struct nft_chain *chain, + const struct nft_regs *regs) +{ + enum nft_trace_types type; + + switch (regs->verdict.code) { + case NFT_CONTINUE: + case NFT_RETURN: + type = NFT_TRACETYPE_RETURN; + break; + default: + type = NFT_TRACETYPE_RULE; + break; + } + + __nft_trace_packet(info, chain, type); +} + +static inline void nft_trace_verdict(struct nft_traceinfo *info, + const struct nft_chain *chain, + const struct nft_rule *rule, + const struct nft_regs *regs) +{ + if (static_branch_unlikely(&nft_trace_enabled)) { + info->rule = rule; + __nft_trace_verdict(info, chain, regs); + } +} + static bool nft_payload_fast_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) @@ -185,6 +229,8 @@ next_rule: nft_rule_for_each_expr(expr, last, rule) { if (expr->ops == &nft_cmp_fast_ops) nft_cmp_fast_eval(expr, ®s); + else if (expr->ops == &nft_cmp16_fast_ops) + nft_cmp16_fast_eval(expr, ®s); else if (expr->ops == &nft_bitwise_fast_ops) nft_bitwise_fast_eval(expr, ®s); else if (expr->ops != &nft_payload_fast_ops || @@ -207,13 +253,13 @@ next_rule: break; } + nft_trace_verdict(&info, chain, rule, ®s); + switch (regs.verdict.code & NF_VERDICT_MASK) { case NF_ACCEPT: case NF_DROP: case NF_QUEUE: case NF_STOLEN: - nft_trace_packet(&info, chain, rule, - NFT_TRACETYPE_RULE); return regs.verdict.code; } @@ -226,15 +272,10 @@ next_rule: stackptr++; fallthrough; case NFT_GOTO: - nft_trace_packet(&info, chain, rule, - NFT_TRACETYPE_RULE); - chain = regs.verdict.chain; goto do_chain; case NFT_CONTINUE: case NFT_RETURN: - nft_trace_packet(&info, chain, rule, - NFT_TRACETYPE_RETURN); break; default: WARN_ON(1); diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index 839fd09f1bb4..4e99b1731b3f 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -208,7 +208,7 @@ static int nft_setup_cb_call(enum tc_setup_type type, void *type_data, return 0; } -int nft_chain_offload_priority(struct nft_base_chain *basechain) +static int nft_chain_offload_priority(const struct nft_base_chain *basechain) { if (basechain->ops.priority <= 0 || basechain->ops.priority > USHRT_MAX) @@ -217,6 +217,27 @@ int nft_chain_offload_priority(struct nft_base_chain *basechain) return 0; } +bool nft_chain_offload_support(const struct nft_base_chain *basechain) +{ + struct net_device *dev; + struct nft_hook *hook; + + if (nft_chain_offload_priority(basechain) < 0) + return false; + + list_for_each_entry(hook, &basechain->hook_list, list) { + if (hook->ops.pf != NFPROTO_NETDEV || + hook->ops.hooknum != NF_NETDEV_INGRESS) + return false; + + dev = hook->ops.dev; + if (!dev->netdev_ops->ndo_setup_tc && !flow_indr_dev_exists()) + return false; + } + + return true; +} + static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow, const struct nft_base_chain *basechain, const struct nft_rule *rule, diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c index 87b36da5cd98..0cf3278007ba 100644 --- a/net/netfilter/nf_tables_trace.c +++ b/net/netfilter/nf_tables_trace.c @@ -183,7 +183,6 @@ static bool nft_trace_have_verdict_chain(struct nft_traceinfo *info) void nft_trace_notify(struct nft_traceinfo *info) { const struct nft_pktinfo *pkt = info->pkt; - struct nfgenmsg *nfmsg; struct nlmsghdr *nlh; struct sk_buff *skb; unsigned int size; @@ -219,15 +218,11 @@ void nft_trace_notify(struct nft_traceinfo *info) return; event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_TRACE); - nlh = nlmsg_put(skb, 0, 0, event, sizeof(struct nfgenmsg), 0); + nlh = nfnl_msg_put(skb, 0, 0, event, 0, info->basechain->type->family, + NFNETLINK_V0, 0); if (!nlh) goto nla_put_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = info->basechain->type->family; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - if (nla_put_be32(skb, NFTA_TRACE_NFPROTO, htonl(nft_pf(pkt)))) goto nla_put_failure; diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index 5bfec829c12f..ec3e378da73d 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c @@ -132,21 +132,16 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, int event, struct nf_acct *acct) { struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0; u64 pkts, bytes; u32 old_flags; event = nfnl_msg_type(NFNL_SUBSYS_ACCT, event); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, + NFNETLINK_V0, 0); + if (!nlh) goto nlmsg_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = AF_UNSPEC; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - if (nla_put_string(skb, NFACCT_NAME, acct->name)) goto nla_put_failure; diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index 91afbf8ac8cf..52d5f2411834 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c @@ -530,20 +530,15 @@ nfnl_cthelper_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, int event, struct nf_conntrack_helper *helper) { struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0; int status; event = nfnl_msg_type(NFNL_SUBSYS_CTHELPER, event); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, + NFNETLINK_V0, 0); + if (!nlh) goto nlmsg_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = AF_UNSPEC; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - if (nla_put_string(skb, NFCTH_NAME, helper->name)) goto nla_put_failure; diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 89a381f7f945..de831a257512 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -160,22 +160,17 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, int event, struct ctnl_timeout *timeout) { struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0; const struct nf_conntrack_l4proto *l4proto = timeout->timeout.l4proto; struct nlattr *nest_parms; int ret; event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, + NFNETLINK_V0, 0); + if (!nlh) goto nlmsg_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = AF_UNSPEC; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - if (nla_put_string(skb, CTA_TIMEOUT_NAME, timeout->name) || nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->timeout.l3num)) || @@ -382,21 +377,16 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid, const unsigned int *timeouts) { struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0; struct nlattr *nest_parms; int ret; event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, + NFNETLINK_V0, 0); + if (!nlh) goto nlmsg_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = AF_UNSPEC; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - if (nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(l3num)) || nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, l4proto->l4proto)) goto nla_put_failure; diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 33c13edbca4b..f087baa95b07 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -452,20 +452,15 @@ __build_packet_message(struct nfnl_log_net *log, { struct nfulnl_msg_packet_hdr pmsg; struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; sk_buff_data_t old_tail = inst->skb->tail; struct sock *sk; const unsigned char *hwhdrp; - nlh = nlmsg_put(inst->skb, 0, 0, - nfnl_msg_type(NFNL_SUBSYS_ULOG, NFULNL_MSG_PACKET), - sizeof(struct nfgenmsg), 0); + nlh = nfnl_msg_put(inst->skb, 0, 0, + nfnl_msg_type(NFNL_SUBSYS_ULOG, NFULNL_MSG_PACKET), + 0, pf, NFNETLINK_V0, htons(inst->group_num)); if (!nlh) return -1; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = pf; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = htons(inst->group_num); memset(&pmsg, 0, sizeof(pmsg)); pmsg.hw_protocol = skb->protocol; diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c index 79fbf37291f3..51e3953b414c 100644 --- a/net/netfilter/nfnetlink_osf.c +++ b/net/netfilter/nfnetlink_osf.c @@ -269,6 +269,7 @@ bool nf_osf_find(const struct sk_buff *skb, struct nf_osf_hdr_ctx ctx; const struct tcphdr *tcp; struct tcphdr _tcph; + bool found = false; memset(&ctx, 0, sizeof(ctx)); @@ -283,10 +284,11 @@ bool nf_osf_find(const struct sk_buff *skb, data->genre = f->genre; data->version = f->version; + found = true; break; } - return true; + return found; } EXPORT_SYMBOL_GPL(nf_osf_find); diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 1640da5c5077..9d87606c76ff 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -383,7 +383,6 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, struct nlattr *nla; struct nfqnl_msg_packet_hdr *pmsg; struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; struct sk_buff *entskb = entry->skb; struct net_device *indev; struct net_device *outdev; @@ -469,18 +468,15 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, goto nlmsg_failure; } - nlh = nlmsg_put(skb, 0, 0, - nfnl_msg_type(NFNL_SUBSYS_QUEUE, NFQNL_MSG_PACKET), - sizeof(struct nfgenmsg), 0); + nlh = nfnl_msg_put(skb, 0, 0, + nfnl_msg_type(NFNL_SUBSYS_QUEUE, NFQNL_MSG_PACKET), + 0, entry->state.pf, NFNETLINK_V0, + htons(queue->queue_num)); if (!nlh) { skb_tx_error(entskb); kfree_skb(skb); goto nlmsg_failure; } - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = entry->state.pf; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = htons(queue->queue_num); nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg)); pmsg = nla_data(nla); @@ -838,11 +834,16 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) } static int -nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff) +nfqnl_mangle(void *data, unsigned int data_len, struct nf_queue_entry *e, int diff) { struct sk_buff *nskb; if (diff < 0) { + unsigned int min_len = skb_transport_offset(e->skb); + + if (data_len < min_len) + return -EINVAL; + if (pskb_trim(e->skb, data_len)) return -ENOMEM; } else if (diff > 0) { diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c index bbd773d74377..d6ab7aa14adc 100644 --- a/net/netfilter/nft_bitwise.c +++ b/net/netfilter/nft_bitwise.c @@ -16,8 +16,8 @@ #include struct nft_bitwise { - enum nft_registers sreg:8; - enum nft_registers dreg:8; + u8 sreg; + u8 dreg; enum nft_bitwise_ops op:8; u8 len; struct nft_data mask; @@ -93,7 +93,16 @@ static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = { static int nft_bitwise_init_bool(struct nft_bitwise *priv, const struct nlattr *const tb[]) { - struct nft_data_desc mask, xor; + struct nft_data_desc mask = { + .type = NFT_DATA_VALUE, + .size = sizeof(priv->mask), + .len = priv->len, + }; + struct nft_data_desc xor = { + .type = NFT_DATA_VALUE, + .size = sizeof(priv->xor), + .len = priv->len, + }; int err; if (tb[NFTA_BITWISE_DATA]) @@ -103,36 +112,30 @@ static int nft_bitwise_init_bool(struct nft_bitwise *priv, !tb[NFTA_BITWISE_XOR]) return -EINVAL; - err = nft_data_init(NULL, &priv->mask, sizeof(priv->mask), &mask, - tb[NFTA_BITWISE_MASK]); + err = nft_data_init(NULL, &priv->mask, &mask, tb[NFTA_BITWISE_MASK]); if (err < 0) return err; - if (mask.type != NFT_DATA_VALUE || mask.len != priv->len) { - err = -EINVAL; - goto err1; - } - err = nft_data_init(NULL, &priv->xor, sizeof(priv->xor), &xor, - tb[NFTA_BITWISE_XOR]); + err = nft_data_init(NULL, &priv->xor, &xor, tb[NFTA_BITWISE_XOR]); if (err < 0) - goto err1; - if (xor.type != NFT_DATA_VALUE || xor.len != priv->len) { - err = -EINVAL; - goto err2; - } + goto err_xor_err; return 0; -err2: - nft_data_release(&priv->xor, xor.type); -err1: + +err_xor_err: nft_data_release(&priv->mask, mask.type); + return err; } static int nft_bitwise_init_shift(struct nft_bitwise *priv, const struct nlattr *const tb[]) { - struct nft_data_desc d; + struct nft_data_desc desc = { + .type = NFT_DATA_VALUE, + .size = sizeof(priv->data), + .len = sizeof(u32), + }; int err; if (tb[NFTA_BITWISE_MASK] || @@ -142,13 +145,12 @@ static int nft_bitwise_init_shift(struct nft_bitwise *priv, if (!tb[NFTA_BITWISE_DATA]) return -EINVAL; - err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &d, - tb[NFTA_BITWISE_DATA]); + err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_BITWISE_DATA]); if (err < 0) return err; - if (d.type != NFT_DATA_VALUE || d.len != sizeof(u32) || - priv->data.data[0] >= BITS_PER_TYPE(u32)) { - nft_data_release(&priv->data, d.type); + + if (priv->data.data[0] >= BITS_PER_TYPE(u32)) { + nft_data_release(&priv->data, desc.type); return -EINVAL; } @@ -169,14 +171,14 @@ static int nft_bitwise_init(const struct nft_ctx *ctx, priv->len = len; - priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]); - err = nft_validate_register_load(priv->sreg, priv->len); + err = nft_parse_register_load(tb[NFTA_BITWISE_SREG], &priv->sreg, + priv->len); if (err < 0) return err; - priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]); - err = nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, priv->len); + err = nft_parse_register_store(ctx, tb[NFTA_BITWISE_DREG], + &priv->dreg, NULL, NFT_DATA_VALUE, + priv->len); if (err < 0) return err; @@ -290,22 +292,21 @@ static const struct nft_expr_ops nft_bitwise_ops = { static int nft_bitwise_extract_u32_data(const struct nlattr * const tb, u32 *out) { - struct nft_data_desc desc; struct nft_data data; - int err = 0; + struct nft_data_desc desc = { + .type = NFT_DATA_VALUE, + .size = sizeof(data), + .len = sizeof(u32), + }; + int err; - err = nft_data_init(NULL, &data, sizeof(data), &desc, tb); + err = nft_data_init(NULL, &data, &desc, tb); if (err < 0) return err; - if (desc.type != NFT_DATA_VALUE || desc.len != sizeof(u32)) { - err = -EINVAL; - goto err; - } *out = data.data[0]; -err: - nft_data_release(&data, desc.type); - return err; + + return 0; } static int nft_bitwise_fast_init(const struct nft_ctx *ctx, @@ -315,14 +316,13 @@ static int nft_bitwise_fast_init(const struct nft_ctx *ctx, struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr); int err; - priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]); - err = nft_validate_register_load(priv->sreg, sizeof(u32)); + err = nft_parse_register_load(tb[NFTA_BITWISE_SREG], &priv->sreg, + sizeof(u32)); if (err < 0) return err; - priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]); - err = nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, sizeof(u32)); + err = nft_parse_register_store(ctx, tb[NFTA_BITWISE_DREG], &priv->dreg, + NULL, NFT_DATA_VALUE, sizeof(u32)); if (err < 0) return err; diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c index 12bed3f7bbc6..9d5947ab8d4e 100644 --- a/net/netfilter/nft_byteorder.c +++ b/net/netfilter/nft_byteorder.c @@ -16,8 +16,8 @@ #include struct nft_byteorder { - enum nft_registers sreg:8; - enum nft_registers dreg:8; + u8 sreg; + u8 dreg; enum nft_byteorder_ops op:8; u8 len; u8 size; @@ -131,20 +131,20 @@ static int nft_byteorder_init(const struct nft_ctx *ctx, return -EINVAL; } - priv->sreg = nft_parse_register(tb[NFTA_BYTEORDER_SREG]); err = nft_parse_u32_check(tb[NFTA_BYTEORDER_LEN], U8_MAX, &len); if (err < 0) return err; priv->len = len; - err = nft_validate_register_load(priv->sreg, priv->len); + err = nft_parse_register_load(tb[NFTA_BYTEORDER_SREG], &priv->sreg, + priv->len); if (err < 0) return err; - priv->dreg = nft_parse_register(tb[NFTA_BYTEORDER_DREG]); - return nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, priv->len); + return nft_parse_register_store(ctx, tb[NFTA_BYTEORDER_DREG], + &priv->dreg, NULL, NFT_DATA_VALUE, + priv->len); } static int nft_byteorder_dump(struct sk_buff *skb, const struct nft_expr *expr) diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c index 1d42d06f5b64..461763a571f2 100644 --- a/net/netfilter/nft_cmp.c +++ b/net/netfilter/nft_cmp.c @@ -18,7 +18,7 @@ struct nft_cmp_expr { struct nft_data data; - enum nft_registers sreg:8; + u8 sreg; u8 len; enum nft_cmp_ops op:8; }; @@ -73,22 +73,17 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_cmp_expr *priv = nft_expr_priv(expr); - struct nft_data_desc desc; + struct nft_data_desc desc = { + .type = NFT_DATA_VALUE, + .size = sizeof(priv->data), + }; int err; - err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc, - tb[NFTA_CMP_DATA]); + err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]); if (err < 0) return err; - if (desc.type != NFT_DATA_VALUE) { - err = -EINVAL; - nft_data_release(&priv->data, desc.type); - return err; - } - - priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]); - err = nft_validate_register_load(priv->sreg, desc.len); + err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len); if (err < 0) return err; @@ -202,17 +197,18 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { struct nft_cmp_fast_expr *priv = nft_expr_priv(expr); - struct nft_data_desc desc; struct nft_data data; + struct nft_data_desc desc = { + .type = NFT_DATA_VALUE, + .size = sizeof(data), + }; int err; - err = nft_data_init(NULL, &data, sizeof(data), &desc, - tb[NFTA_CMP_DATA]); + err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]); if (err < 0) return err; - priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]); - err = nft_validate_register_load(priv->sreg, desc.len); + err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len); if (err < 0) return err; @@ -274,12 +270,108 @@ const struct nft_expr_ops nft_cmp_fast_ops = { .offload = nft_cmp_fast_offload, }; +static u32 nft_cmp_mask(u32 bitlen) +{ + return (__force u32)cpu_to_le32(~0U >> (sizeof(u32) * BITS_PER_BYTE - bitlen)); +} + +static void nft_cmp16_fast_mask(struct nft_data *data, unsigned int bitlen) +{ + int len = bitlen / BITS_PER_BYTE; + int i, words = len / sizeof(u32); + + for (i = 0; i < words; i++) { + data->data[i] = 0xffffffff; + bitlen -= sizeof(u32) * BITS_PER_BYTE; + } + + if (len % sizeof(u32)) + data->data[i++] = nft_cmp_mask(bitlen); + + for (; i < 4; i++) + data->data[i] = 0; +} + +static int nft_cmp16_fast_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr); + struct nft_data_desc desc = { + .type = NFT_DATA_VALUE, + .size = sizeof(priv->data), + }; + int err; + + err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]); + if (err < 0) + return err; + + err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len); + if (err < 0) + return err; + + nft_cmp16_fast_mask(&priv->mask, desc.len * BITS_PER_BYTE); + priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ; + priv->len = desc.len; + + return 0; +} + +static int nft_cmp16_fast_offload(struct nft_offload_ctx *ctx, + struct nft_flow_rule *flow, + const struct nft_expr *expr) +{ + const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr); + struct nft_cmp_expr cmp = { + .data = priv->data, + .sreg = priv->sreg, + .len = priv->len, + .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ, + }; + + return __nft_cmp_offload(ctx, flow, &cmp); +} + +static int nft_cmp16_fast_dump(struct sk_buff *skb, const struct nft_expr *expr) +{ + const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr); + enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ; + + if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg)) + goto nla_put_failure; + if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op))) + goto nla_put_failure; + + if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data, + NFT_DATA_VALUE, priv->len) < 0) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -1; +} + + +const struct nft_expr_ops nft_cmp16_fast_ops = { + .type = &nft_cmp_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp16_fast_expr)), + .eval = NULL, /* inlined */ + .init = nft_cmp16_fast_init, + .dump = nft_cmp16_fast_dump, + .offload = nft_cmp16_fast_offload, +}; + static const struct nft_expr_ops * nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { - struct nft_data_desc desc; struct nft_data data; + struct nft_data_desc desc = { + .type = NFT_DATA_VALUE, + .size = sizeof(data), + }; enum nft_cmp_ops op; + u8 sreg; int err; if (tb[NFTA_CMP_SREG] == NULL || @@ -300,23 +392,21 @@ nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) return ERR_PTR(-EINVAL); } - err = nft_data_init(NULL, &data, sizeof(data), &desc, - tb[NFTA_CMP_DATA]); + err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]); if (err < 0) return ERR_PTR(err); - if (desc.type != NFT_DATA_VALUE) { - err = -EINVAL; - goto err1; + sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG])); + + if (op == NFT_CMP_EQ || op == NFT_CMP_NEQ) { + if (desc.len <= sizeof(u32)) + return &nft_cmp_fast_ops; + else if (desc.len <= sizeof(data) && + ((sreg >= NFT_REG_1 && sreg <= NFT_REG_4) || + (sreg >= NFT_REG32_00 && sreg <= NFT_REG32_12 && sreg % 2 == 0))) + return &nft_cmp16_fast_ops; } - - if (desc.len <= sizeof(u32) && (op == NFT_CMP_EQ || op == NFT_CMP_NEQ)) - return &nft_cmp_fast_ops; - return &nft_cmp_ops; -err1: - nft_data_release(&data, desc.type); - return ERR_PTR(-EINVAL); } struct nft_expr_type nft_cmp_type __read_mostly = { diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 8e56f353ff35..b8dbd20a6a4c 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -591,19 +591,14 @@ nfnl_compat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, int rev, int target) { struct nlmsghdr *nlh; - struct nfgenmsg *nfmsg; unsigned int flags = portid ? NLM_F_MULTI : 0; event = nfnl_msg_type(NFNL_SUBSYS_NFT_COMPAT, event); - nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); - if (nlh == NULL) + nlh = nfnl_msg_put(skb, portid, seq, event, flags, family, + NFNETLINK_V0, 0); + if (!nlh) goto nlmsg_failure; - nfmsg = nlmsg_data(nlh); - nfmsg->nfgen_family = family; - nfmsg->version = NFNETLINK_V0; - nfmsg->res_id = 0; - if (nla_put_string(skb, NFTA_COMPAT_NAME, name) || nla_put_be32(skb, NFTA_COMPAT_REV, htonl(rev)) || nla_put_be32(skb, NFTA_COMPAT_TYPE, htonl(target))) diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 7fcb73ac2e6e..14093d86e682 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -27,8 +27,8 @@ struct nft_ct { enum nft_ct_keys key:8; enum ip_conntrack_dir dir:8; union { - enum nft_registers dreg:8; - enum nft_registers sreg:8; + u8 dreg; + u8 sreg; }; }; @@ -97,7 +97,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr, return; #ifdef CONFIG_NF_CONNTRACK_MARK case NFT_CT_MARK: - *dest = ct->mark; + *dest = READ_ONCE(ct->mark); return; #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK @@ -294,8 +294,8 @@ static void nft_ct_set_eval(const struct nft_expr *expr, switch (priv->key) { #ifdef CONFIG_NF_CONNTRACK_MARK case NFT_CT_MARK: - if (ct->mark != value) { - ct->mark = value; + if (READ_ONCE(ct->mark) != value) { + WRITE_ONCE(ct->mark, value); nf_conntrack_event_cache(IPCT_MARK, ct); } break; @@ -499,9 +499,8 @@ static int nft_ct_get_init(const struct nft_ctx *ctx, } } - priv->dreg = nft_parse_register(tb[NFTA_CT_DREG]); - err = nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, len); + err = nft_parse_register_store(ctx, tb[NFTA_CT_DREG], &priv->dreg, NULL, + NFT_DATA_VALUE, len); if (err < 0) return err; @@ -608,8 +607,7 @@ static int nft_ct_set_init(const struct nft_ctx *ctx, } } - priv->sreg = nft_parse_register(tb[NFTA_CT_SREG]); - err = nft_validate_register_load(priv->sreg, len); + err = nft_parse_register_load(tb[NFTA_CT_SREG], &priv->sreg, len); if (err < 0) goto err1; diff --git a/net/netfilter/nft_dup_netdev.c b/net/netfilter/nft_dup_netdev.c index 70c457476b87..5b5c607fbf83 100644 --- a/net/netfilter/nft_dup_netdev.c +++ b/net/netfilter/nft_dup_netdev.c @@ -14,7 +14,7 @@ #include struct nft_dup_netdev { - enum nft_registers sreg_dev:8; + u8 sreg_dev; }; static void nft_dup_netdev_eval(const struct nft_expr *expr, @@ -40,8 +40,8 @@ static int nft_dup_netdev_init(const struct nft_ctx *ctx, if (tb[NFTA_DUP_SREG_DEV] == NULL) return -EINVAL; - priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]); - return nft_validate_register_load(priv->sreg_dev, sizeof(int)); + return nft_parse_register_load(tb[NFTA_DUP_SREG_DEV], &priv->sreg_dev, + sizeof(int)); } static int nft_dup_netdev_dump(struct sk_buff *skb, const struct nft_expr *expr) diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 58904bee1a0d..8c45e01fecdd 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -16,8 +16,8 @@ struct nft_dynset { struct nft_set *set; struct nft_set_ext_tmpl tmpl; enum nft_dynset_ops op:8; - enum nft_registers sreg_key:8; - enum nft_registers sreg_data:8; + u8 sreg_key; + u8 sreg_data; bool invert; u64 timeout; struct nft_expr *expr; @@ -154,8 +154,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx, return err; } - priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]); - err = nft_validate_register_load(priv->sreg_key, set->klen); + err = nft_parse_register_load(tb[NFTA_DYNSET_SREG_KEY], &priv->sreg_key, + set->klen); if (err < 0) return err; @@ -165,8 +165,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx, if (set->dtype == NFT_DATA_VERDICT) return -EOPNOTSUPP; - priv->sreg_data = nft_parse_register(tb[NFTA_DYNSET_SREG_DATA]); - err = nft_validate_register_load(priv->sreg_data, set->dlen); + err = nft_parse_register_load(tb[NFTA_DYNSET_SREG_DATA], + &priv->sreg_data, set->dlen); if (err < 0) return err; } else if (set->flags & NFT_SET_MAP) diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index faa0844c01fb..670dd146fb2b 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -19,8 +19,8 @@ struct nft_exthdr { u8 offset; u8 len; u8 op; - enum nft_registers dreg:8; - enum nft_registers sreg:8; + u8 dreg; + u8 sreg; u8 flags; }; @@ -353,12 +353,12 @@ static int nft_exthdr_init(const struct nft_ctx *ctx, priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); priv->offset = offset; priv->len = len; - priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]); priv->flags = flags; priv->op = op; - return nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, priv->len); + return nft_parse_register_store(ctx, tb[NFTA_EXTHDR_DREG], + &priv->dreg, NULL, NFT_DATA_VALUE, + priv->len); } static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx, @@ -403,11 +403,11 @@ static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx, priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); priv->offset = offset; priv->len = len; - priv->sreg = nft_parse_register(tb[NFTA_EXTHDR_SREG]); priv->flags = flags; priv->op = op; - return nft_validate_register_load(priv->sreg, priv->len); + return nft_parse_register_load(tb[NFTA_EXTHDR_SREG], &priv->sreg, + priv->len); } static int nft_exthdr_ipv4_init(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c index 4dfdaeaf09a5..b10ce732b337 100644 --- a/net/netfilter/nft_fib.c +++ b/net/netfilter/nft_fib.c @@ -86,7 +86,6 @@ int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr, return -EINVAL; priv->result = ntohl(nla_get_be32(tb[NFTA_FIB_RESULT])); - priv->dreg = nft_parse_register(tb[NFTA_FIB_DREG]); switch (priv->result) { case NFT_FIB_RESULT_OIF: @@ -106,8 +105,8 @@ int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr, return -EINVAL; } - err = nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, len); + err = nft_parse_register_store(ctx, tb[NFTA_FIB_DREG], &priv->dreg, + NULL, NFT_DATA_VALUE, len); if (err < 0) return err; diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c index 3b0dcd170551..7730409f6f09 100644 --- a/net/netfilter/nft_fwd_netdev.c +++ b/net/netfilter/nft_fwd_netdev.c @@ -18,7 +18,7 @@ #include struct nft_fwd_netdev { - enum nft_registers sreg_dev:8; + u8 sreg_dev; }; static void nft_fwd_netdev_eval(const struct nft_expr *expr, @@ -50,8 +50,8 @@ static int nft_fwd_netdev_init(const struct nft_ctx *ctx, if (tb[NFTA_FWD_SREG_DEV] == NULL) return -EINVAL; - priv->sreg_dev = nft_parse_register(tb[NFTA_FWD_SREG_DEV]); - return nft_validate_register_load(priv->sreg_dev, sizeof(int)); + return nft_parse_register_load(tb[NFTA_FWD_SREG_DEV], &priv->sreg_dev, + sizeof(int)); } static int nft_fwd_netdev_dump(struct sk_buff *skb, const struct nft_expr *expr) @@ -83,8 +83,8 @@ static bool nft_fwd_netdev_offload_action(const struct nft_expr *expr) } struct nft_fwd_neigh { - enum nft_registers sreg_dev:8; - enum nft_registers sreg_addr:8; + u8 sreg_dev; + u8 sreg_addr; u8 nfproto; }; @@ -162,8 +162,6 @@ static int nft_fwd_neigh_init(const struct nft_ctx *ctx, !tb[NFTA_FWD_NFPROTO]) return -EINVAL; - priv->sreg_dev = nft_parse_register(tb[NFTA_FWD_SREG_DEV]); - priv->sreg_addr = nft_parse_register(tb[NFTA_FWD_SREG_ADDR]); priv->nfproto = ntohl(nla_get_be32(tb[NFTA_FWD_NFPROTO])); switch (priv->nfproto) { @@ -177,11 +175,13 @@ static int nft_fwd_neigh_init(const struct nft_ctx *ctx, return -EOPNOTSUPP; } - err = nft_validate_register_load(priv->sreg_dev, sizeof(int)); + err = nft_parse_register_load(tb[NFTA_FWD_SREG_DEV], &priv->sreg_dev, + sizeof(int)); if (err < 0) return err; - return nft_validate_register_load(priv->sreg_addr, addr_len); + return nft_parse_register_load(tb[NFTA_FWD_SREG_ADDR], &priv->sreg_addr, + addr_len); } static int nft_fwd_neigh_dump(struct sk_buff *skb, const struct nft_expr *expr) diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 96371d878e7e..f829f5289e16 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -14,8 +14,8 @@ #include struct nft_jhash { - enum nft_registers sreg:8; - enum nft_registers dreg:8; + u8 sreg; + u8 dreg; u8 len; bool autogen_seed:1; u32 modulus; @@ -38,7 +38,7 @@ static void nft_jhash_eval(const struct nft_expr *expr, } struct nft_symhash { - enum nft_registers dreg:8; + u8 dreg; u32 modulus; u32 offset; }; @@ -83,9 +83,6 @@ static int nft_jhash_init(const struct nft_ctx *ctx, if (tb[NFTA_HASH_OFFSET]) priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET])); - priv->sreg = nft_parse_register(tb[NFTA_HASH_SREG]); - priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]); - err = nft_parse_u32_check(tb[NFTA_HASH_LEN], U8_MAX, &len); if (err < 0) return err; @@ -94,6 +91,10 @@ static int nft_jhash_init(const struct nft_ctx *ctx, priv->len = len; + err = nft_parse_register_load(tb[NFTA_HASH_SREG], &priv->sreg, len); + if (err < 0) + return err; + priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS])); if (priv->modulus < 1) return -ERANGE; @@ -108,9 +109,8 @@ static int nft_jhash_init(const struct nft_ctx *ctx, get_random_bytes(&priv->seed, sizeof(priv->seed)); } - return nft_validate_register_load(priv->sreg, len) && - nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, sizeof(u32)); + return nft_parse_register_store(ctx, tb[NFTA_HASH_DREG], &priv->dreg, + NULL, NFT_DATA_VALUE, sizeof(u32)); } static int nft_symhash_init(const struct nft_ctx *ctx, @@ -126,8 +126,6 @@ static int nft_symhash_init(const struct nft_ctx *ctx, if (tb[NFTA_HASH_OFFSET]) priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET])); - priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]); - priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS])); if (priv->modulus < 1) return -ERANGE; @@ -135,8 +133,9 @@ static int nft_symhash_init(const struct nft_ctx *ctx, if (priv->offset + priv->modulus - 1 < priv->offset) return -EOVERFLOW; - return nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, sizeof(u32)); + return nft_parse_register_store(ctx, tb[NFTA_HASH_DREG], + &priv->dreg, NULL, NFT_DATA_VALUE, + sizeof(u32)); } static int nft_jhash_dump(struct sk_buff *skb, diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 5c9d88560a47..fcdbc5ed3f36 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -29,28 +29,44 @@ static const struct nla_policy nft_immediate_policy[NFTA_IMMEDIATE_MAX + 1] = { [NFTA_IMMEDIATE_DATA] = { .type = NLA_NESTED }, }; +static enum nft_data_types nft_reg_to_type(const struct nlattr *nla) +{ + enum nft_data_types type; + u8 reg; + + reg = ntohl(nla_get_be32(nla)); + if (reg == NFT_REG_VERDICT) + type = NFT_DATA_VERDICT; + else + type = NFT_DATA_VALUE; + + return type; +} + static int nft_immediate_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_immediate_expr *priv = nft_expr_priv(expr); - struct nft_data_desc desc; + struct nft_data_desc desc = { + .size = sizeof(priv->data), + }; int err; if (tb[NFTA_IMMEDIATE_DREG] == NULL || tb[NFTA_IMMEDIATE_DATA] == NULL) return -EINVAL; - err = nft_data_init(ctx, &priv->data, sizeof(priv->data), &desc, - tb[NFTA_IMMEDIATE_DATA]); + desc.type = nft_reg_to_type(tb[NFTA_IMMEDIATE_DREG]); + err = nft_data_init(ctx, &priv->data, &desc, tb[NFTA_IMMEDIATE_DATA]); if (err < 0) return err; priv->dlen = desc.len; - priv->dreg = nft_parse_register(tb[NFTA_IMMEDIATE_DREG]); - err = nft_validate_register_store(ctx, priv->dreg, &priv->data, - desc.type, desc.len); + err = nft_parse_register_store(ctx, tb[NFTA_IMMEDIATE_DREG], + &priv->dreg, &priv->data, desc.type, + desc.len); if (err < 0) goto err1; diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index f1363b8aabba..b0f558b4fea5 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c @@ -17,8 +17,8 @@ struct nft_lookup { struct nft_set *set; - enum nft_registers sreg:8; - enum nft_registers dreg:8; + u8 sreg; + u8 dreg; bool invert; struct nft_set_binding binding; }; @@ -76,8 +76,8 @@ static int nft_lookup_init(const struct nft_ctx *ctx, if (IS_ERR(set)) return PTR_ERR(set); - priv->sreg = nft_parse_register(tb[NFTA_LOOKUP_SREG]); - err = nft_validate_register_load(priv->sreg, set->klen); + err = nft_parse_register_load(tb[NFTA_LOOKUP_SREG], &priv->sreg, + set->klen); if (err < 0) return err; @@ -100,9 +100,9 @@ static int nft_lookup_init(const struct nft_ctx *ctx, if (!(set->flags & NFT_SET_MAP)) return -EINVAL; - priv->dreg = nft_parse_register(tb[NFTA_LOOKUP_DREG]); - err = nft_validate_register_store(ctx, priv->dreg, NULL, - set->dtype, set->dlen); + err = nft_parse_register_store(ctx, tb[NFTA_LOOKUP_DREG], + &priv->dreg, NULL, set->dtype, + set->dlen); if (err < 0) return err; } else if (set->flags & NFT_SET_MAP) diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c index 71390b727040..9953e8053753 100644 --- a/net/netfilter/nft_masq.c +++ b/net/netfilter/nft_masq.c @@ -15,8 +15,8 @@ struct nft_masq { u32 flags; - enum nft_registers sreg_proto_min:8; - enum nft_registers sreg_proto_max:8; + u8 sreg_proto_min; + u8 sreg_proto_max; }; static const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = { @@ -54,19 +54,15 @@ static int nft_masq_init(const struct nft_ctx *ctx, } if (tb[NFTA_MASQ_REG_PROTO_MIN]) { - priv->sreg_proto_min = - nft_parse_register(tb[NFTA_MASQ_REG_PROTO_MIN]); - - err = nft_validate_register_load(priv->sreg_proto_min, plen); + err = nft_parse_register_load(tb[NFTA_MASQ_REG_PROTO_MIN], + &priv->sreg_proto_min, plen); if (err < 0) return err; if (tb[NFTA_MASQ_REG_PROTO_MAX]) { - priv->sreg_proto_max = - nft_parse_register(tb[NFTA_MASQ_REG_PROTO_MAX]); - - err = nft_validate_register_load(priv->sreg_proto_max, - plen); + err = nft_parse_register_load(tb[NFTA_MASQ_REG_PROTO_MAX], + &priv->sreg_proto_max, + plen); if (err < 0) return err; } else { diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index bf4b3ad5314c..44d9b38e5f90 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -32,8 +33,6 @@ #define NFT_META_SECS_PER_DAY 86400 #define NFT_META_DAYS_PER_WEEK 7 -static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state); - static u8 nft_meta_weekday(void) { time64_t secs = ktime_get_real_seconds(); @@ -267,13 +266,6 @@ static bool nft_meta_get_eval_ifname(enum nft_meta_keys key, u32 *dest, return true; } -static noinline u32 nft_prandom_u32(void) -{ - struct rnd_state *state = this_cpu_ptr(&nft_prandom_state); - - return prandom_u32_state(state); -} - #ifdef CONFIG_IP_ROUTE_CLASSID static noinline bool nft_meta_get_eval_rtclassid(const struct sk_buff *skb, u32 *dest) @@ -385,7 +377,7 @@ void nft_meta_get_eval(const struct nft_expr *expr, break; #endif case NFT_META_PRANDOM: - *dest = nft_prandom_u32(); + *dest = get_random_u32(); break; #ifdef CONFIG_XFRM case NFT_META_SECPATH: @@ -514,7 +506,6 @@ int nft_meta_get_init(const struct nft_ctx *ctx, len = IFNAMSIZ; break; case NFT_META_PRANDOM: - prandom_init_once(&nft_prandom_state); len = sizeof(u32); break; #ifdef CONFIG_XFRM @@ -535,9 +526,8 @@ int nft_meta_get_init(const struct nft_ctx *ctx, return -EOPNOTSUPP; } - priv->dreg = nft_parse_register(tb[NFTA_META_DREG]); - return nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, len); + return nft_parse_register_store(ctx, tb[NFTA_META_DREG], &priv->dreg, + NULL, NFT_DATA_VALUE, len); } EXPORT_SYMBOL_GPL(nft_meta_get_init); @@ -661,8 +651,7 @@ int nft_meta_set_init(const struct nft_ctx *ctx, return -EOPNOTSUPP; } - priv->sreg = nft_parse_register(tb[NFTA_META_SREG]); - err = nft_validate_register_load(priv->sreg, len); + err = nft_parse_register_load(tb[NFTA_META_SREG], &priv->sreg, len); if (err < 0) return err; diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index ea53fd999f46..db8f9116eeb4 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -21,10 +21,10 @@ #include struct nft_nat { - enum nft_registers sreg_addr_min:8; - enum nft_registers sreg_addr_max:8; - enum nft_registers sreg_proto_min:8; - enum nft_registers sreg_proto_max:8; + u8 sreg_addr_min; + u8 sreg_addr_max; + u8 sreg_proto_min; + u8 sreg_proto_max; enum nf_nat_manip_type type:8; u8 family; u16 flags; @@ -208,18 +208,15 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, priv->family = family; if (tb[NFTA_NAT_REG_ADDR_MIN]) { - priv->sreg_addr_min = - nft_parse_register(tb[NFTA_NAT_REG_ADDR_MIN]); - err = nft_validate_register_load(priv->sreg_addr_min, alen); + err = nft_parse_register_load(tb[NFTA_NAT_REG_ADDR_MIN], + &priv->sreg_addr_min, alen); if (err < 0) return err; if (tb[NFTA_NAT_REG_ADDR_MAX]) { - priv->sreg_addr_max = - nft_parse_register(tb[NFTA_NAT_REG_ADDR_MAX]); - - err = nft_validate_register_load(priv->sreg_addr_max, - alen); + err = nft_parse_register_load(tb[NFTA_NAT_REG_ADDR_MAX], + &priv->sreg_addr_max, + alen); if (err < 0) return err; } else { @@ -231,19 +228,15 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, plen = sizeof_field(struct nf_nat_range, min_addr.all); if (tb[NFTA_NAT_REG_PROTO_MIN]) { - priv->sreg_proto_min = - nft_parse_register(tb[NFTA_NAT_REG_PROTO_MIN]); - - err = nft_validate_register_load(priv->sreg_proto_min, plen); + err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MIN], + &priv->sreg_proto_min, plen); if (err < 0) return err; if (tb[NFTA_NAT_REG_PROTO_MAX]) { - priv->sreg_proto_max = - nft_parse_register(tb[NFTA_NAT_REG_PROTO_MAX]); - - err = nft_validate_register_load(priv->sreg_proto_max, - plen); + err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MAX], + &priv->sreg_proto_max, + plen); if (err < 0) return err; } else { @@ -341,7 +334,8 @@ static void nft_nat_inet_eval(const struct nft_expr *expr, { const struct nft_nat *priv = nft_expr_priv(expr); - if (priv->family == nft_pf(pkt)) + if (priv->family == nft_pf(pkt) || + priv->family == NFPROTO_INET) nft_nat_eval(expr, regs, pkt); } diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c index f1fc824f9737..4e43214e88de 100644 --- a/net/netfilter/nft_numgen.c +++ b/net/netfilter/nft_numgen.c @@ -9,14 +9,13 @@ #include #include #include +#include #include #include #include -static DEFINE_PER_CPU(struct rnd_state, nft_numgen_prandom_state); - struct nft_ng_inc { - enum nft_registers dreg:8; + u8 dreg; u32 modulus; atomic_t counter; u32 offset; @@ -66,11 +65,10 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx, if (priv->offset + priv->modulus - 1 < priv->offset) return -EOVERFLOW; - priv->dreg = nft_parse_register(tb[NFTA_NG_DREG]); atomic_set(&priv->counter, priv->modulus - 1); - return nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, sizeof(u32)); + return nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg, + NULL, NFT_DATA_VALUE, sizeof(u32)); } static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg, @@ -100,17 +98,14 @@ static int nft_ng_inc_dump(struct sk_buff *skb, const struct nft_expr *expr) } struct nft_ng_random { - enum nft_registers dreg:8; + u8 dreg; u32 modulus; u32 offset; }; -static u32 nft_ng_random_gen(struct nft_ng_random *priv) +static u32 nft_ng_random_gen(const struct nft_ng_random *priv) { - struct rnd_state *state = this_cpu_ptr(&nft_numgen_prandom_state); - - return reciprocal_scale(prandom_u32_state(state), priv->modulus) + - priv->offset; + return reciprocal_scale(get_random_u32(), priv->modulus) + priv->offset; } static void nft_ng_random_eval(const struct nft_expr *expr, @@ -138,12 +133,8 @@ static int nft_ng_random_init(const struct nft_ctx *ctx, if (priv->offset + priv->modulus - 1 < priv->offset) return -EOVERFLOW; - prandom_init_once(&nft_numgen_prandom_state); - - priv->dreg = nft_parse_register(tb[NFTA_NG_DREG]); - - return nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, sizeof(u32)); + return nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg, + NULL, NFT_DATA_VALUE, sizeof(u32)); } static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr) diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index 5f9207a9f485..bc104d36d3bb 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c @@ -95,7 +95,7 @@ static const struct nft_expr_ops nft_objref_ops = { struct nft_objref_map { struct nft_set *set; - enum nft_registers sreg:8; + u8 sreg; struct nft_set_binding binding; }; @@ -137,8 +137,8 @@ static int nft_objref_map_init(const struct nft_ctx *ctx, if (!(set->flags & NFT_SET_OBJECT)) return -EINVAL; - priv->sreg = nft_parse_register(tb[NFTA_OBJREF_SET_SREG]); - err = nft_validate_register_load(priv->sreg, set->klen); + err = nft_parse_register_load(tb[NFTA_OBJREF_SET_SREG], &priv->sreg, + set->klen); if (err < 0) return err; diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c index 2c957629ea66..720dc9fba6d4 100644 --- a/net/netfilter/nft_osf.c +++ b/net/netfilter/nft_osf.c @@ -6,7 +6,7 @@ #include struct nft_osf { - enum nft_registers dreg:8; + u8 dreg; u8 ttl; u32 flags; }; @@ -83,9 +83,9 @@ static int nft_osf_init(const struct nft_ctx *ctx, priv->flags = flags; } - priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]); - err = nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN); + err = nft_parse_register_store(ctx, tb[NFTA_OSF_DREG], &priv->dreg, + NULL, NFT_DATA_VALUE, + NFT_OSF_MAXGENRELEN); if (err < 0) return err; @@ -115,9 +115,21 @@ static int nft_osf_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nft_data **data) { - return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) | - (1 << NF_INET_PRE_ROUTING) | - (1 << NF_INET_FORWARD)); + unsigned int hooks; + + switch (ctx->family) { + case NFPROTO_IPV4: + case NFPROTO_IPV6: + case NFPROTO_INET: + hooks = (1 << NF_INET_LOCAL_IN) | + (1 << NF_INET_PRE_ROUTING) | + (1 << NF_INET_FORWARD); + break; + default: + return -EOPNOTSUPP; + } + + return nft_chain_validate_hooks(ctx->chain, hooks); } static struct nft_expr_type nft_osf_type; diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index 6a8495bd08bb..551e0d6cf63d 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -144,10 +144,10 @@ static int nft_payload_init(const struct nft_ctx *ctx, priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); - priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]); - return nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, priv->len); + return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG], + &priv->dreg, NULL, NFT_DATA_VALUE, + priv->len); } static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr) @@ -660,18 +660,23 @@ static int nft_payload_set_init(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { struct nft_payload_set *priv = nft_expr_priv(expr); + u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE; + int err; priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); - priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]); if (tb[NFTA_PAYLOAD_CSUM_TYPE]) - priv->csum_type = - ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE])); - if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) - priv->csum_offset = - ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET])); + csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE])); + if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) { + err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX, + &csum_offset); + if (err < 0) + return err; + + priv->csum_offset = csum_offset; + } if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) { u32 flags; @@ -682,7 +687,7 @@ static int nft_payload_set_init(const struct nft_ctx *ctx, priv->csum_flags = flags; } - switch (priv->csum_type) { + switch (csum_type) { case NFT_PAYLOAD_CSUM_NONE: case NFT_PAYLOAD_CSUM_INET: break; @@ -696,8 +701,10 @@ static int nft_payload_set_init(const struct nft_ctx *ctx, default: return -EOPNOTSUPP; } + priv->csum_type = csum_type; - return nft_validate_register_load(priv->sreg, priv->len); + return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg, + priv->len); } static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr) @@ -733,6 +740,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx, { enum nft_payload_bases base; unsigned int offset, len; + int err; if (tb[NFTA_PAYLOAD_BASE] == NULL || tb[NFTA_PAYLOAD_OFFSET] == NULL || @@ -758,8 +766,13 @@ nft_payload_select_ops(const struct nft_ctx *ctx, if (tb[NFTA_PAYLOAD_DREG] == NULL) return ERR_PTR(-EINVAL); - offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); - len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); + err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset); + if (err < 0) + return ERR_PTR(err); + + err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len); + if (err < 0) + return ERR_PTR(err); if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) && base != NFT_PAYLOAD_LL_HEADER) diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c index 23265d757acb..9ba1de51ac07 100644 --- a/net/netfilter/nft_queue.c +++ b/net/netfilter/nft_queue.c @@ -19,10 +19,10 @@ static u32 jhash_initval __read_mostly; struct nft_queue { - enum nft_registers sreg_qnum:8; - u16 queuenum; - u16 queues_total; - u16 flags; + u8 sreg_qnum; + u16 queuenum; + u16 queues_total; + u16 flags; }; static void nft_queue_eval(const struct nft_expr *expr, @@ -111,8 +111,8 @@ static int nft_queue_sreg_init(const struct nft_ctx *ctx, struct nft_queue *priv = nft_expr_priv(expr); int err; - priv->sreg_qnum = nft_parse_register(tb[NFTA_QUEUE_SREG_QNUM]); - err = nft_validate_register_load(priv->sreg_qnum, sizeof(u32)); + err = nft_parse_register_load(tb[NFTA_QUEUE_SREG_QNUM], + &priv->sreg_qnum, sizeof(u32)); if (err < 0) return err; diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c index 89efcc5a533d..e6bbe32c323d 100644 --- a/net/netfilter/nft_range.c +++ b/net/netfilter/nft_range.c @@ -15,7 +15,7 @@ struct nft_range_expr { struct nft_data data_from; struct nft_data data_to; - enum nft_registers sreg:8; + u8 sreg; u8 len; enum nft_range_ops op:8; }; @@ -51,7 +51,14 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr const struct nlattr * const tb[]) { struct nft_range_expr *priv = nft_expr_priv(expr); - struct nft_data_desc desc_from, desc_to; + struct nft_data_desc desc_from = { + .type = NFT_DATA_VALUE, + .size = sizeof(priv->data_from), + }; + struct nft_data_desc desc_to = { + .type = NFT_DATA_VALUE, + .size = sizeof(priv->data_to), + }; int err; u32 op; @@ -61,33 +68,23 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr !tb[NFTA_RANGE_TO_DATA]) return -EINVAL; - err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from), - &desc_from, tb[NFTA_RANGE_FROM_DATA]); + err = nft_data_init(NULL, &priv->data_from, &desc_from, + tb[NFTA_RANGE_FROM_DATA]); if (err < 0) return err; - if (desc_from.type != NFT_DATA_VALUE) { - err = -EINVAL; - goto err1; - } - - err = nft_data_init(NULL, &priv->data_to, sizeof(priv->data_to), - &desc_to, tb[NFTA_RANGE_TO_DATA]); + err = nft_data_init(NULL, &priv->data_to, &desc_to, + tb[NFTA_RANGE_TO_DATA]); if (err < 0) goto err1; - if (desc_to.type != NFT_DATA_VALUE) { - err = -EINVAL; - goto err2; - } - if (desc_from.len != desc_to.len) { err = -EINVAL; goto err2; } - priv->sreg = nft_parse_register(tb[NFTA_RANGE_SREG]); - err = nft_validate_register_load(priv->sreg, desc_from.len); + err = nft_parse_register_load(tb[NFTA_RANGE_SREG], &priv->sreg, + desc_from.len); if (err < 0) goto err2; diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c index 2056051c0af0..ba09890dddb5 100644 --- a/net/netfilter/nft_redir.c +++ b/net/netfilter/nft_redir.c @@ -14,8 +14,8 @@ #include struct nft_redir { - enum nft_registers sreg_proto_min:8; - enum nft_registers sreg_proto_max:8; + u8 sreg_proto_min; + u8 sreg_proto_max; u16 flags; }; @@ -50,19 +50,15 @@ static int nft_redir_init(const struct nft_ctx *ctx, plen = sizeof_field(struct nf_nat_range, min_addr.all); if (tb[NFTA_REDIR_REG_PROTO_MIN]) { - priv->sreg_proto_min = - nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MIN]); - - err = nft_validate_register_load(priv->sreg_proto_min, plen); + err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MIN], + &priv->sreg_proto_min, plen); if (err < 0) return err; if (tb[NFTA_REDIR_REG_PROTO_MAX]) { - priv->sreg_proto_max = - nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MAX]); - - err = nft_validate_register_load(priv->sreg_proto_max, - plen); + err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MAX], + &priv->sreg_proto_max, + plen); if (err < 0) return err; } else { diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c index 7cfcb0e2f7ee..bcd01a63e38f 100644 --- a/net/netfilter/nft_rt.c +++ b/net/netfilter/nft_rt.c @@ -15,7 +15,7 @@ struct nft_rt { enum nft_rt_keys key:8; - enum nft_registers dreg:8; + u8 dreg; }; static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skbdst) @@ -141,9 +141,8 @@ static int nft_rt_get_init(const struct nft_ctx *ctx, return -EOPNOTSUPP; } - priv->dreg = nft_parse_register(tb[NFTA_RT_DREG]); - return nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, len); + return nft_parse_register_store(ctx, tb[NFTA_RT_DREG], &priv->dreg, + NULL, NFT_DATA_VALUE, len); } static int nft_rt_get_dump(struct sk_buff *skb, diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 858c8d4d659a..a5cfb321ae23 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -142,6 +142,7 @@ static bool nft_rhash_update(struct nft_set *set, const u32 *key, /* Another cpu may race to insert the element with the same key */ if (prev) { nft_set_elem_destroy(set, he, true); + atomic_dec(&set->nelems); he = prev; } @@ -151,6 +152,7 @@ out: err2: nft_set_elem_destroy(set, he, true); + atomic_dec(&set->nelems); err1: return false; } diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index f67c4436c5d3..30cf0673d6c1 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -1162,6 +1162,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, struct nft_pipapo_match *m = priv->clone; u8 genmask = nft_genmask_next(net); struct nft_pipapo_field *f; + const u8 *start_p, *end_p; int i, bsize_max, err = 0; if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END)) @@ -1202,9 +1203,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, } /* Validate */ + start_p = start; + end_p = end; nft_pipapo_for_each_field(f, i, m) { - const u8 *start_p = start, *end_p = end; - if (f->rules >= (unsigned long)NFT_PIPAPO_RULE0_MAX) return -ENOSPC; @@ -2120,6 +2121,32 @@ out_scratch: return err; } +/** + * nft_set_pipapo_match_destroy() - Destroy elements from key mapping array + * @set: nftables API set representation + * @m: matching data pointing to key mapping array + */ +static void nft_set_pipapo_match_destroy(const struct nft_set *set, + struct nft_pipapo_match *m) +{ + struct nft_pipapo_field *f; + int i, r; + + for (i = 0, f = m->f; i < m->field_count - 1; i++, f++) + ; + + for (r = 0; r < f->rules; r++) { + struct nft_pipapo_elem *e; + + if (r < f->rules - 1 && f->mt[r + 1].e == f->mt[r].e) + continue; + + e = f->mt[r].e; + + nft_set_elem_destroy(set, e, true); + } +} + /** * nft_pipapo_destroy() - Free private data for set and all committed elements * @set: nftables API set representation @@ -2128,26 +2155,13 @@ static void nft_pipapo_destroy(const struct nft_set *set) { struct nft_pipapo *priv = nft_set_priv(set); struct nft_pipapo_match *m; - struct nft_pipapo_field *f; - int i, r, cpu; + int cpu; m = rcu_dereference_protected(priv->match, true); if (m) { rcu_barrier(); - for (i = 0, f = m->f; i < m->field_count - 1; i++, f++) - ; - - for (r = 0; r < f->rules; r++) { - struct nft_pipapo_elem *e; - - if (r < f->rules - 1 && f->mt[r + 1].e == f->mt[r].e) - continue; - - e = f->mt[r].e; - - nft_set_elem_destroy(set, e, true); - } + nft_set_pipapo_match_destroy(set, m); #ifdef NFT_PIPAPO_ALIGN free_percpu(m->scratch_aligned); @@ -2161,6 +2175,11 @@ static void nft_pipapo_destroy(const struct nft_set *set) } if (priv->clone) { + m = priv->clone; + + if (priv->dirty) + nft_set_pipapo_match_destroy(set, m); + #ifdef NFT_PIPAPO_ALIGN free_percpu(priv->clone->scratch_aligned); #endif diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 217ab3644c25..94a5446c5eae 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -348,7 +348,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, *ext = &rbe->ext; return -EEXIST; } else { - p = &parent->rb_left; + overlap = false; + if (nft_rbtree_interval_end(rbe)) + p = &parent->rb_left; + else + p = &parent->rb_right; } } diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c index a28aca5124ce..f6d517185d9c 100644 --- a/net/netfilter/nft_socket.c +++ b/net/netfilter/nft_socket.c @@ -10,7 +10,7 @@ struct nft_socket { enum nft_socket_keys key:8; union { - enum nft_registers dreg:8; + u8 dreg; }; }; @@ -33,6 +33,32 @@ static void nft_socket_wildcard(const struct nft_pktinfo *pkt, } } +static struct sock *nft_socket_do_lookup(const struct nft_pktinfo *pkt) +{ + const struct net_device *indev = nft_in(pkt); + const struct sk_buff *skb = pkt->skb; + struct sock *sk = NULL; + + if (!indev) + return NULL; + + switch (nft_pf(pkt)) { + case NFPROTO_IPV4: + sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, indev); + break; +#if IS_ENABLED(CONFIG_NF_TABLES_IPV6) + case NFPROTO_IPV6: + sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, indev); + break; +#endif + default: + WARN_ON_ONCE(1); + break; + } + + return sk; +} + static void nft_socket_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) @@ -46,20 +72,7 @@ static void nft_socket_eval(const struct nft_expr *expr, sk = NULL; if (!sk) - switch(nft_pf(pkt)) { - case NFPROTO_IPV4: - sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, nft_in(pkt)); - break; -#if IS_ENABLED(CONFIG_NF_TABLES_IPV6) - case NFPROTO_IPV6: - sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, nft_in(pkt)); - break; -#endif - default: - WARN_ON_ONCE(1); - regs->verdict.code = NFT_BREAK; - return; - } + sk = nft_socket_do_lookup(pkt); if (!sk) { regs->verdict.code = NFT_BREAK; @@ -133,9 +146,8 @@ static int nft_socket_init(const struct nft_ctx *ctx, return -EOPNOTSUPP; } - priv->dreg = nft_parse_register(tb[NFTA_SOCKET_DREG]); - return nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, len); + return nft_parse_register_store(ctx, tb[NFTA_SOCKET_DREG], &priv->dreg, + NULL, NFT_DATA_VALUE, len); } static int nft_socket_dump(struct sk_buff *skb, @@ -150,6 +162,16 @@ static int nft_socket_dump(struct sk_buff *skb, return 0; } +static int nft_socket_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ + return nft_chain_validate_hooks(ctx->chain, + (1 << NF_INET_PRE_ROUTING) | + (1 << NF_INET_LOCAL_IN) | + (1 << NF_INET_LOCAL_OUT)); +} + static struct nft_expr_type nft_socket_type; static const struct nft_expr_ops nft_socket_ops = { .type = &nft_socket_type, @@ -157,6 +179,7 @@ static const struct nft_expr_ops nft_socket_ops = { .eval = nft_socket_eval, .init = nft_socket_init, .dump = nft_socket_dump, + .validate = nft_socket_validate, }; static struct nft_expr_type nft_socket_type __read_mostly = { diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c index 242222dc52c3..37c728bdad41 100644 --- a/net/netfilter/nft_tproxy.c +++ b/net/netfilter/nft_tproxy.c @@ -13,9 +13,9 @@ #endif struct nft_tproxy { - enum nft_registers sreg_addr:8; - enum nft_registers sreg_port:8; - u8 family; + u8 sreg_addr; + u8 sreg_port; + u8 family; }; static void nft_tproxy_eval_v4(const struct nft_expr *expr, @@ -254,15 +254,15 @@ static int nft_tproxy_init(const struct nft_ctx *ctx, } if (tb[NFTA_TPROXY_REG_ADDR]) { - priv->sreg_addr = nft_parse_register(tb[NFTA_TPROXY_REG_ADDR]); - err = nft_validate_register_load(priv->sreg_addr, alen); + err = nft_parse_register_load(tb[NFTA_TPROXY_REG_ADDR], + &priv->sreg_addr, alen); if (err < 0) return err; } if (tb[NFTA_TPROXY_REG_PORT]) { - priv->sreg_port = nft_parse_register(tb[NFTA_TPROXY_REG_PORT]); - err = nft_validate_register_load(priv->sreg_port, sizeof(u16)); + err = nft_parse_register_load(tb[NFTA_TPROXY_REG_PORT], + &priv->sreg_port, sizeof(u16)); if (err < 0) return err; } diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c index d3eb953d0333..2ee50996da8c 100644 --- a/net/netfilter/nft_tunnel.c +++ b/net/netfilter/nft_tunnel.c @@ -15,7 +15,7 @@ struct nft_tunnel { enum nft_tunnel_keys key:8; - enum nft_registers dreg:8; + u8 dreg; enum nft_tunnel_mode mode:8; }; @@ -93,8 +93,6 @@ static int nft_tunnel_get_init(const struct nft_ctx *ctx, return -EOPNOTSUPP; } - priv->dreg = nft_parse_register(tb[NFTA_TUNNEL_DREG]); - if (tb[NFTA_TUNNEL_MODE]) { priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE])); if (priv->mode > NFT_TUNNEL_MODE_MAX) @@ -103,8 +101,8 @@ static int nft_tunnel_get_init(const struct nft_ctx *ctx, priv->mode = NFT_TUNNEL_MODE_NONE; } - return nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, len); + return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg, + NULL, NFT_DATA_VALUE, len); } static int nft_tunnel_get_dump(struct sk_buff *skb, @@ -135,6 +133,7 @@ static const struct nft_expr_ops nft_tunnel_get_ops = { static struct nft_expr_type nft_tunnel_type __read_mostly = { .name = "tunnel", + .family = NFPROTO_NETDEV, .ops = &nft_tunnel_get_ops, .policy = nft_tunnel_policy, .maxattr = NFTA_TUNNEL_MAX, diff --git a/net/netfilter/nft_xfrm.c b/net/netfilter/nft_xfrm.c index 06d5cabf1d7c..cbbbc4ecad3a 100644 --- a/net/netfilter/nft_xfrm.c +++ b/net/netfilter/nft_xfrm.c @@ -24,7 +24,7 @@ static const struct nla_policy nft_xfrm_policy[NFTA_XFRM_MAX + 1] = { struct nft_xfrm { enum nft_xfrm_keys key:8; - enum nft_registers dreg:8; + u8 dreg; u8 dir; u8 spnum; }; @@ -86,9 +86,8 @@ static int nft_xfrm_get_init(const struct nft_ctx *ctx, priv->spnum = spnum; - priv->dreg = nft_parse_register(tb[NFTA_XFRM_DREG]); - return nft_validate_register_store(ctx, priv->dreg, NULL, - NFT_DATA_VALUE, len); + return nft_parse_register_store(ctx, tb[NFTA_XFRM_DREG], &priv->dreg, + NULL, NFT_DATA_VALUE, len); } /* Return true if key asks for daddr/saddr and current diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index e5ebc0810675..ad3c033db64e 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c @@ -30,6 +30,7 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info) u_int32_t new_targetmark; struct nf_conn *ct; u_int32_t newmark; + u_int32_t oldmark; ct = nf_ct_get(skb, &ctinfo); if (ct == NULL) @@ -37,14 +38,15 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info) switch (info->mode) { case XT_CONNMARK_SET: - newmark = (ct->mark & ~info->ctmask) ^ info->ctmark; + oldmark = READ_ONCE(ct->mark); + newmark = (oldmark & ~info->ctmask) ^ info->ctmark; if (info->shift_dir == D_SHIFT_RIGHT) newmark >>= info->shift_bits; else newmark <<= info->shift_bits; - if (ct->mark != newmark) { - ct->mark = newmark; + if (READ_ONCE(ct->mark) != newmark) { + WRITE_ONCE(ct->mark, newmark); nf_conntrack_event_cache(IPCT_MARK, ct); } break; @@ -55,15 +57,15 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info) else new_targetmark <<= info->shift_bits; - newmark = (ct->mark & ~info->ctmask) ^ + newmark = (READ_ONCE(ct->mark) & ~info->ctmask) ^ new_targetmark; - if (ct->mark != newmark) { - ct->mark = newmark; + if (READ_ONCE(ct->mark) != newmark) { + WRITE_ONCE(ct->mark, newmark); nf_conntrack_event_cache(IPCT_MARK, ct); } break; case XT_CONNMARK_RESTORE: - new_targetmark = (ct->mark & info->ctmask); + new_targetmark = (READ_ONCE(ct->mark) & info->ctmask); if (info->shift_dir == D_SHIFT_RIGHT) new_targetmark >>= info->shift_bits; else @@ -126,7 +128,7 @@ connmark_mt(const struct sk_buff *skb, struct xt_action_param *par) if (ct == NULL) return false; - return ((ct->mark & info->mask) == info->mark) ^ info->invert; + return ((READ_ONCE(ct->mark) & info->mask) == info->mark) ^ info->invert; } static int connmark_mt_check(const struct xt_mtchk_param *par) diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index 5e1239cef000..91b35b7c80d8 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c @@ -885,6 +885,8 @@ int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, unsigned char bitmask; unsigned char byte; + if (offset >= bitmap_len) + return -1; byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index f37916156ca5..d96a610929d9 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1988,7 +1988,6 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, copied = len; } - skb_reset_transport_header(data_skb); err = skb_copy_datagram_msg(data_skb, 0, msg, copied); if (msg->msg_name) { @@ -2276,6 +2275,13 @@ static int netlink_dump(struct sock *sk) * single netdev. The outcome is MSG_TRUNC error. */ skb_reserve(skb, skb_tailroom(skb) - alloc_size); + + /* Make sure malicious BPF programs can not read unitialized memory + * from skb->head -> skb->data + */ + skb_reset_network_header(skb); + skb_reset_mac_header(skb); + netlink_skb_set_owner_r(skb, sk); if (nlk->dump_done_errno > 0) { diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index c992424e4d63..9fd7ba01b9f8 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -1182,13 +1182,17 @@ static int ctrl_dumppolicy_start(struct netlink_callback *cb) op.policy, op.maxattr); if (err) - return err; + goto err_free_state; } } if (!ctx->state) return -ENODATA; return 0; + +err_free_state: + netlink_policy_dump_free(ctx->state); + return err; } static void *ctrl_dumppolicy_prep(struct sk_buff *skb, diff --git a/net/netlink/policy.c b/net/netlink/policy.c index 8d7c900e27f4..87e3de0fde89 100644 --- a/net/netlink/policy.c +++ b/net/netlink/policy.c @@ -144,7 +144,7 @@ int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate, err = add_policy(&state, policy, maxtype); if (err) - return err; + goto err_try_undo; for (policy_idx = 0; policy_idx < state->n_alloc && state->policies[policy_idx].policy; @@ -164,7 +164,7 @@ int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate, policy[type].nested_policy, policy[type].len); if (err) - return err; + goto err_try_undo; break; default: break; @@ -174,6 +174,16 @@ int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate, *pstate = state; return 0; + +err_try_undo: + /* Try to preserve reasonable unwind semantics - if we're starting from + * scratch clean up fully, otherwise record what we got and caller will. + */ + if (!*pstate) + netlink_policy_dump_free(state); + else + *pstate = state; + return err; } static bool diff --git a/net/nfc/core.c b/net/nfc/core.c index 6800470dd6df..2ef56366bd5f 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -38,7 +38,7 @@ int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -94,7 +94,7 @@ int nfc_dev_up(struct nfc_dev *dev) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -142,7 +142,7 @@ int nfc_dev_down(struct nfc_dev *dev) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -206,7 +206,7 @@ int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -245,7 +245,7 @@ int nfc_stop_poll(struct nfc_dev *dev) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -290,7 +290,7 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -334,7 +334,7 @@ int nfc_dep_link_down(struct nfc_dev *dev) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -400,7 +400,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -446,7 +446,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx, u8 mode) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -493,7 +493,7 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb, device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; kfree_skb(skb); goto error; @@ -550,7 +550,7 @@ int nfc_enable_se(struct nfc_dev *dev, u32 se_idx) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -599,7 +599,7 @@ int nfc_disable_se(struct nfc_dev *dev, u32 se_idx) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -1126,6 +1126,7 @@ int nfc_register_device(struct nfc_dev *dev) dev->rfkill = NULL; } } + dev->shutting_down = false; device_unlock(&dev->dev); rc = nfc_genl_device_added(dev); @@ -1157,13 +1158,12 @@ void nfc_unregister_device(struct nfc_dev *dev) if (dev->rfkill) { rfkill_unregister(dev->rfkill); rfkill_destroy(dev->rfkill); + dev->rfkill = NULL; } + dev->shutting_down = true; device_unlock(&dev->dev); if (dev->ops->check_presence) { - device_lock(&dev->dev); - dev->shutting_down = true; - device_unlock(&dev->dev); del_timer_sync(&dev->check_pres_timer); cancel_work_sync(&dev->check_pres_work); } diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index e38719e2ee58..ed9019d807c7 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -530,7 +530,7 @@ static int nci_open_device(struct nci_dev *ndev) skb_queue_purge(&ndev->tx_q); ndev->ops->close(ndev); - ndev->flags = 0; + ndev->flags &= BIT(NCI_UNREG); } done: @@ -548,6 +548,10 @@ static int nci_close_device(struct nci_dev *ndev) mutex_lock(&ndev->req_lock); if (!test_and_clear_bit(NCI_UP, &ndev->flags)) { + /* Need to flush the cmd wq in case + * there is a queued/running cmd_work + */ + flush_workqueue(ndev->cmd_wq); del_timer_sync(&ndev->cmd_timer); del_timer_sync(&ndev->data_timer); mutex_unlock(&ndev->req_lock); diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c index ce3382be937f..b4548d887489 100644 --- a/net/nfc/nci/data.c +++ b/net/nfc/nci/data.c @@ -118,7 +118,7 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev, skb_frag = nci_skb_alloc(ndev, (NCI_DATA_HDR_SIZE + frag_len), - GFP_KERNEL); + GFP_ATOMIC); if (skb_frag == NULL) { rc = -ENOMEM; goto free_exit; @@ -279,8 +279,10 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_plen(skb->data)); conn_info = nci_get_conn_info_by_conn_id(ndev, nci_conn_id(skb->data)); - if (!conn_info) + if (!conn_info) { + kfree_skb(skb); return; + } /* strip the nci data header */ skb_pull(skb, NCI_DATA_HDR_SIZE); diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c index 04e55ccb3383..4fe336ff2bfa 100644 --- a/net/nfc/nci/hci.c +++ b/net/nfc/nci/hci.c @@ -153,7 +153,7 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe, i = 0; skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len + - NCI_DATA_HDR_SIZE, GFP_KERNEL); + NCI_DATA_HDR_SIZE, GFP_ATOMIC); if (!skb) return -ENOMEM; @@ -186,7 +186,7 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe, if (i < data_len) { skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len + - NCI_DATA_HDR_SIZE, GFP_KERNEL); + NCI_DATA_HDR_SIZE, GFP_ATOMIC); if (!skb) return -ENOMEM; diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c index 33e1170817f0..f8b20cddd5c9 100644 --- a/net/nfc/nci/ntf.c +++ b/net/nfc/nci/ntf.c @@ -218,6 +218,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev, target->sens_res = nfca_poll->sens_res; target->sel_res = nfca_poll->sel_res; target->nfcid1_len = nfca_poll->nfcid1_len; + if (target->nfcid1_len > ARRAY_SIZE(target->nfcid1)) + return -EPROTO; if (target->nfcid1_len > 0) { memcpy(target->nfcid1, nfca_poll->nfcid1, target->nfcid1_len); @@ -226,6 +228,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev, nfcb_poll = (struct rf_tech_specific_params_nfcb_poll *)params; target->sensb_res_len = nfcb_poll->sensb_res_len; + if (target->sensb_res_len > ARRAY_SIZE(target->sensb_res)) + return -EPROTO; if (target->sensb_res_len > 0) { memcpy(target->sensb_res, nfcb_poll->sensb_res, target->sensb_res_len); @@ -234,6 +238,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev, nfcf_poll = (struct rf_tech_specific_params_nfcf_poll *)params; target->sensf_res_len = nfcf_poll->sensf_res_len; + if (target->sensf_res_len > ARRAY_SIZE(target->sensf_res)) + return -EPROTO; if (target->sensf_res_len > 0) { memcpy(target->sensf_res, nfcf_poll->sensf_res, target->sensf_res_len); diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 78acc4e9ac93..b8939ebaa6d3 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -1244,7 +1244,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name, struct sk_buff *msg; void *hdr; - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return -ENOMEM; @@ -1260,7 +1260,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name, genlmsg_end(msg, hdr); - genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); + genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); return 0; diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 525c1540f10e..80fee9d118ee 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -372,6 +372,7 @@ static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, update_ip_l4_checksum(skb, nh, *addr, new_addr); csum_replace4(&nh->check, *addr, new_addr); skb_clear_hash(skb); + ovs_ct_clear(skb, NULL); *addr = new_addr; } @@ -419,6 +420,7 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, update_ipv6_checksum(skb, l4_proto, addr, new_addr); skb_clear_hash(skb); + ovs_ct_clear(skb, NULL); memcpy(addr, new_addr, sizeof(__be32[4])); } @@ -659,6 +661,7 @@ static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key, static void set_tp_port(struct sk_buff *skb, __be16 *port, __be16 new_port, __sum16 *check) { + ovs_ct_clear(skb, NULL); inet_proto_csum_replace2(check, skb, *port, new_port, false); *port = new_port; } @@ -698,6 +701,7 @@ static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key, uh->dest = dst; flow_key->tp.src = src; flow_key->tp.dst = dst; + ovs_ct_clear(skb, NULL); } skb_clear_hash(skb); @@ -760,6 +764,8 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key, sh->checksum = old_csum ^ old_correct_csum ^ new_csum; skb_clear_hash(skb); + ovs_ct_clear(skb, NULL); + flow_key->tp.src = sh->source; flow_key->tp.dst = sh->dest; @@ -1044,7 +1050,7 @@ static int clone(struct datapath *dp, struct sk_buff *skb, int rem = nla_len(attr); bool dont_clone_flow_key; - /* The first action is always 'OVS_CLONE_ATTR_ARG'. */ + /* The first action is always 'OVS_CLONE_ATTR_EXEC'. */ clone_arg = nla_data(attr); dont_clone_flow_key = nla_get_u32(clone_arg); actions = nla_next(clone_arg, &rem); diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 7ff98d39ec94..0f0f380e81a4 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -150,7 +150,7 @@ static u8 ovs_ct_get_state(enum ip_conntrack_info ctinfo) static u32 ovs_ct_get_mark(const struct nf_conn *ct) { #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) - return ct ? ct->mark : 0; + return ct ? READ_ONCE(ct->mark) : 0; #else return 0; #endif @@ -336,9 +336,9 @@ static int ovs_ct_set_mark(struct nf_conn *ct, struct sw_flow_key *key, #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) u32 new_mark; - new_mark = ct_mark | (ct->mark & ~(mask)); - if (ct->mark != new_mark) { - ct->mark = new_mark; + new_mark = ct_mark | (READ_ONCE(ct->mark) & ~(mask)); + if (READ_ONCE(ct->mark) != new_mark) { + WRITE_ONCE(ct->mark, new_mark); if (nf_ct_is_confirmed(ct)) nf_conntrack_event_cache(IPCT_MARK, ct); key->ct.mark = new_mark; @@ -1324,7 +1324,8 @@ int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key) if (skb_nfct(skb)) { nf_conntrack_put(skb_nfct(skb)); nf_ct_set(skb, NULL, IP_CT_UNTRACKED); - ovs_ct_fill_key(skb, key); + if (key) + ovs_ct_fill_key(skb, key); } return 0; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 9d6ef6cb9b26..7ed97dc0b561 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -241,10 +241,17 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) upcall.portid = ovs_vport_find_upcall_portid(p, skb); upcall.mru = OVS_CB(skb)->mru; error = ovs_dp_upcall(dp, skb, key, &upcall, 0); - if (unlikely(error)) - kfree_skb(skb); - else + switch (error) { + case 0: + case -EAGAIN: + case -ERESTARTSYS: + case -EINTR: consume_skb(skb); + break; + default: + kfree_skb(skb); + break; + } stats_counter = &stats->n_missed; goto out; } @@ -537,8 +544,9 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, out: if (err) skb_tx_error(skb); - kfree_skb(user_skb); - kfree_skb(nskb); + consume_skb(user_skb); + consume_skb(nskb); + return err; } @@ -1584,7 +1592,8 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb, if (IS_ERR(dp)) return; - WARN(dp->user_features, "Dropping previously announced user features\n"); + pr_warn("%s: Dropping previously announced user features\n", + ovs_dp_name(dp)); dp->user_features = 0; } diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index b03d142ec82e..c9ba61413c98 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -265,7 +265,7 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) if (flags & IP6_FH_F_FRAG) { if (frag_off) { key->ip.frag = OVS_FRAG_TYPE_LATER; - key->ip.proto = nexthdr; + key->ip.proto = NEXTHDR_FRAGMENT; return 0; } key->ip.frag = OVS_FRAG_TYPE_FIRST; diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 8c4bdfa627ca..293a798e89f4 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -2288,6 +2288,62 @@ static struct sw_flow_actions *nla_alloc_flow_actions(int size) return sfa; } +static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len); + +static void ovs_nla_free_check_pkt_len_action(const struct nlattr *action) +{ + const struct nlattr *a; + int rem; + + nla_for_each_nested(a, action, rem) { + switch (nla_type(a)) { + case OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL: + case OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER: + ovs_nla_free_nested_actions(nla_data(a), nla_len(a)); + break; + } + } +} + +static void ovs_nla_free_clone_action(const struct nlattr *action) +{ + const struct nlattr *a = nla_data(action); + int rem = nla_len(action); + + switch (nla_type(a)) { + case OVS_CLONE_ATTR_EXEC: + /* The real list of actions follows this attribute. */ + a = nla_next(a, &rem); + ovs_nla_free_nested_actions(a, rem); + break; + } +} + +static void ovs_nla_free_dec_ttl_action(const struct nlattr *action) +{ + const struct nlattr *a = nla_data(action); + + switch (nla_type(a)) { + case OVS_DEC_TTL_ATTR_ACTION: + ovs_nla_free_nested_actions(nla_data(a), nla_len(a)); + break; + } +} + +static void ovs_nla_free_sample_action(const struct nlattr *action) +{ + const struct nlattr *a = nla_data(action); + int rem = nla_len(action); + + switch (nla_type(a)) { + case OVS_SAMPLE_ATTR_ARG: + /* The real list of actions follows this attribute. */ + a = nla_next(a, &rem); + ovs_nla_free_nested_actions(a, rem); + break; + } +} + static void ovs_nla_free_set_action(const struct nlattr *a) { const struct nlattr *ovs_key = nla_data(a); @@ -2301,25 +2357,54 @@ static void ovs_nla_free_set_action(const struct nlattr *a) } } -void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts) +static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len) { const struct nlattr *a; int rem; - if (!sf_acts) + /* Whenever new actions are added, the need to update this + * function should be considered. + */ + BUILD_BUG_ON(OVS_ACTION_ATTR_MAX != 23); + + if (!actions) return; - nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) { + nla_for_each_attr(a, actions, len, rem) { switch (nla_type(a)) { - case OVS_ACTION_ATTR_SET: - ovs_nla_free_set_action(a); + case OVS_ACTION_ATTR_CHECK_PKT_LEN: + ovs_nla_free_check_pkt_len_action(a); break; + + case OVS_ACTION_ATTR_CLONE: + ovs_nla_free_clone_action(a); + break; + case OVS_ACTION_ATTR_CT: ovs_ct_free_action(a); break; + + case OVS_ACTION_ATTR_DEC_TTL: + ovs_nla_free_dec_ttl_action(a); + break; + + case OVS_ACTION_ATTR_SAMPLE: + ovs_nla_free_sample_action(a); + break; + + case OVS_ACTION_ATTR_SET: + ovs_nla_free_set_action(a); + break; } } +} +void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts) +{ + if (!sf_acts) + return; + + ovs_nla_free_nested_actions(sf_acts->actions, sf_acts->actions_len); kfree(sf_acts); } @@ -2351,7 +2436,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2); if (new_acts_size > MAX_ACTIONS_BUFSIZE) { - if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) { + if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) { OVS_NLERR(log, "Flow action size exceeds max %u", MAX_ACTIONS_BUFSIZE); return ERR_PTR(-EMSGSIZE); @@ -3419,7 +3504,9 @@ static int clone_action_to_attr(const struct nlattr *attr, if (!start) return -EMSGSIZE; - err = ovs_nla_put_actions(nla_data(attr), rem, skb); + /* Skipping the OVS_CLONE_ATTR_EXEC that is always the first attribute. */ + attr = nla_next(nla_data(attr), &rem); + err = ovs_nla_put_actions(attr, rem, skb); if (err) nla_nest_cancel(skb, start); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index d0c95d7dd292..eaa030e2ad55 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2243,8 +2243,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, if (skb->ip_summed == CHECKSUM_PARTIAL) status |= TP_STATUS_CSUMNOTREADY; else if (skb->pkt_type != PACKET_OUTGOING && - (skb->ip_summed == CHECKSUM_COMPLETE || - skb_csum_unnecessary(skb))) + skb_csum_unnecessary(skb)) status |= TP_STATUS_CSUM_VALID; if (snaplen > res) @@ -2817,8 +2816,9 @@ tpacket_error: status = TP_STATUS_SEND_REQUEST; err = po->xmit(skb); - if (unlikely(err > 0)) { - err = net_xmit_errno(err); + if (unlikely(err != 0)) { + if (err > 0) + err = net_xmit_errno(err); if (err && __packet_get_status(po, ph) == TP_STATUS_AVAILABLE) { /* skb was destructed already */ @@ -2985,8 +2985,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) if (err) goto out_free; - if (sock->type == SOCK_RAW && - !dev_validate_header(dev, skb->data, len)) { + if ((sock->type == SOCK_RAW && + !dev_validate_header(dev, skb->data, len)) || !skb->len) { err = -EINVAL; goto out_free; } @@ -3019,8 +3019,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) skb->no_fcs = 1; err = po->xmit(skb); - if (err > 0 && (err = net_xmit_errno(err)) != 0) - goto out_unlock; + if (unlikely(err != 0)) { + if (err > 0) + err = net_xmit_errno(err); + if (err) + goto out_unlock; + } dev_put(dev); @@ -3475,8 +3479,7 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (skb->ip_summed == CHECKSUM_PARTIAL) aux.tp_status |= TP_STATUS_CSUMNOTREADY; else if (skb->pkt_type != PACKET_OUTGOING && - (skb->ip_summed == CHECKSUM_COMPLETE || - skb_csum_unnecessary(skb))) + skb_csum_unnecessary(skb)) aux.tp_status |= TP_STATUS_CSUM_VALID; aux.tp_len = origlen; diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 56cffbfa000b..13448ca5aeff 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -20,6 +20,8 @@ /* auto-bind range */ #define QRTR_MIN_EPH_SOCKET 0x4000 #define QRTR_MAX_EPH_SOCKET 0x7fff +#define QRTR_EPH_PORT_RANGE \ + XA_LIMIT(QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET) /** * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1 @@ -106,8 +108,7 @@ static LIST_HEAD(qrtr_all_nodes); static DEFINE_MUTEX(qrtr_node_lock); /* local port allocation management */ -static DEFINE_IDR(qrtr_ports); -static DEFINE_MUTEX(qrtr_port_lock); +static DEFINE_XARRAY_ALLOC(qrtr_ports); /** * struct qrtr_node - endpoint node @@ -635,7 +636,7 @@ static struct qrtr_sock *qrtr_port_lookup(int port) port = 0; rcu_read_lock(); - ipc = idr_find(&qrtr_ports, port); + ipc = xa_load(&qrtr_ports, port); if (ipc) sock_hold(&ipc->sk); rcu_read_unlock(); @@ -677,9 +678,7 @@ static void qrtr_port_remove(struct qrtr_sock *ipc) __sock_put(&ipc->sk); - mutex_lock(&qrtr_port_lock); - idr_remove(&qrtr_ports, port); - mutex_unlock(&qrtr_port_lock); + xa_erase(&qrtr_ports, port); /* Ensure that if qrtr_port_lookup() did enter the RCU read section we * wait for it to up increment the refcount */ @@ -698,29 +697,20 @@ static void qrtr_port_remove(struct qrtr_sock *ipc) */ static int qrtr_port_assign(struct qrtr_sock *ipc, int *port) { - u32 min_port; int rc; - mutex_lock(&qrtr_port_lock); if (!*port) { - min_port = QRTR_MIN_EPH_SOCKET; - rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, QRTR_MAX_EPH_SOCKET, GFP_ATOMIC); - if (!rc) - *port = min_port; + rc = xa_alloc(&qrtr_ports, port, ipc, QRTR_EPH_PORT_RANGE, + GFP_KERNEL); } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) { rc = -EACCES; } else if (*port == QRTR_PORT_CTRL) { - min_port = 0; - rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, 0, GFP_ATOMIC); + rc = xa_insert(&qrtr_ports, 0, ipc, GFP_KERNEL); } else { - min_port = *port; - rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, *port, GFP_ATOMIC); - if (!rc) - *port = min_port; + rc = xa_insert(&qrtr_ports, *port, ipc, GFP_KERNEL); } - mutex_unlock(&qrtr_port_lock); - if (rc == -ENOSPC) + if (rc == -EBUSY) return -EADDRINUSE; else if (rc < 0) return rc; @@ -734,20 +724,16 @@ static int qrtr_port_assign(struct qrtr_sock *ipc, int *port) static void qrtr_reset_ports(void) { struct qrtr_sock *ipc; - int id; - - mutex_lock(&qrtr_port_lock); - idr_for_each_entry(&qrtr_ports, ipc, id) { - /* Don't reset control port */ - if (id == 0) - continue; + unsigned long index; + rcu_read_lock(); + xa_for_each_start(&qrtr_ports, index, ipc, 1) { sock_hold(&ipc->sk); ipc->sk.sk_err = ENETRESET; ipc->sk.sk_error_report(&ipc->sk); sock_put(&ipc->sk); } - mutex_unlock(&qrtr_port_lock); + rcu_read_unlock(); } /* Bind socket to address. diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 6fdedd9dbbc2..cfbf0e129cba 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -363,6 +363,7 @@ static int acquire_refill(struct rds_connection *conn) static void release_refill(struct rds_connection *conn) { clear_bit(RDS_RECV_REFILL, &conn->c_flags); + smp_mb__after_atomic(); /* We don't use wait_on_bit()/wake_up_bit() because our waking is in a * hot path and finding waiters is very rare. We don't want to walk diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 5327d130c4b5..b560d06e6d96 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -166,10 +166,10 @@ void rds_tcp_reset_callbacks(struct socket *sock, */ atomic_set(&cp->cp_state, RDS_CONN_RESETTING); wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags)); - lock_sock(osock->sk); /* reset receive side state for rds_tcp_data_recv() for osock */ cancel_delayed_work_sync(&cp->cp_send_w); cancel_delayed_work_sync(&cp->cp_recv_w); + lock_sock(osock->sk); if (tc->t_tinc) { rds_inc_put(&tc->t_tinc->ti_inc); tc->t_tinc = NULL; diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index cf7d974e0f61..29a208ed8fb8 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -191,6 +191,7 @@ static void rose_kill_by_device(struct net_device *dev) rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); if (rose->neighbour) rose->neighbour->use--; + dev_put(rose->device); rose->device = NULL; } } @@ -591,6 +592,8 @@ static struct sock *rose_make_new(struct sock *osk) rose->idle = orose->idle; rose->defer = orose->defer; rose->device = orose->device; + if (rose->device) + dev_hold(rose->device); rose->qbitincl = orose->qbitincl; return sk; @@ -644,6 +647,7 @@ static int rose_release(struct socket *sock) break; } + dev_put(rose->device); sock->sk = NULL; release_sock(sk); sock_put(sk); @@ -720,7 +724,6 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; unsigned char cause, diagnostic; - struct net_device *dev; ax25_uid_assoc *user; int n, err = 0; @@ -777,9 +780,12 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le } if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ + struct net_device *dev; + sock_reset_flag(sk, SOCK_ZAPPED); - if ((dev = rose_dev_first()) == NULL) { + dev = rose_dev_first(); + if (!dev) { err = -ENETUNREACH; goto out_release; } @@ -787,6 +793,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le user = ax25_findbyuid(current_euid()); if (!user) { err = -EINVAL; + dev_put(dev); goto out_release; } diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c index f6102e6f5161..730d2205f197 100644 --- a/net/rose/rose_link.c +++ b/net/rose/rose_link.c @@ -236,6 +236,9 @@ void rose_transmit_clear_request(struct rose_neigh *neigh, unsigned int lci, uns unsigned char *dptr; int len; + if (!neigh->dev) + return; + len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 3; if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c index 11c45c8c6c16..036d92c0ad79 100644 --- a/net/rose/rose_loopback.c +++ b/net/rose/rose_loopback.c @@ -96,7 +96,8 @@ static void rose_loopback_timer(struct timer_list *unused) } if (frametype == ROSE_CALL_REQUEST) { - if (!rose_loopback_neigh->dev) { + if (!rose_loopback_neigh->dev && + !rose_loopback_neigh->loopback) { kfree_skb(skb); continue; } diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index 6e35703ff353..981bdefd478b 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c @@ -227,8 +227,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh) { struct rose_neigh *s; - rose_stop_ftimer(rose_neigh); - rose_stop_t0timer(rose_neigh); + del_timer_sync(&rose_neigh->ftimer); + del_timer_sync(&rose_neigh->t0timer); skb_queue_purge(&rose_neigh->queue); @@ -613,6 +613,8 @@ struct net_device *rose_dev_first(void) if (first == NULL || strncmp(dev->name, first->name, 3) < 0) first = dev; } + if (first) + dev_hold(first); rcu_read_unlock(); return first; diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c index b3138fc2e552..f06ddbed3fed 100644 --- a/net/rose/rose_timer.c +++ b/net/rose/rose_timer.c @@ -31,89 +31,89 @@ static void rose_idletimer_expiry(struct timer_list *); void rose_start_heartbeat(struct sock *sk) { - del_timer(&sk->sk_timer); + sk_stop_timer(sk, &sk->sk_timer); sk->sk_timer.function = rose_heartbeat_expiry; sk->sk_timer.expires = jiffies + 5 * HZ; - add_timer(&sk->sk_timer); + sk_reset_timer(sk, &sk->sk_timer, sk->sk_timer.expires); } void rose_start_t1timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); - del_timer(&rose->timer); + sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t1; - add_timer(&rose->timer); + sk_reset_timer(sk, &rose->timer, rose->timer.expires); } void rose_start_t2timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); - del_timer(&rose->timer); + sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t2; - add_timer(&rose->timer); + sk_reset_timer(sk, &rose->timer, rose->timer.expires); } void rose_start_t3timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); - del_timer(&rose->timer); + sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t3; - add_timer(&rose->timer); + sk_reset_timer(sk, &rose->timer, rose->timer.expires); } void rose_start_hbtimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); - del_timer(&rose->timer); + sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->hb; - add_timer(&rose->timer); + sk_reset_timer(sk, &rose->timer, rose->timer.expires); } void rose_start_idletimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); - del_timer(&rose->idletimer); + sk_stop_timer(sk, &rose->idletimer); if (rose->idle > 0) { rose->idletimer.function = rose_idletimer_expiry; rose->idletimer.expires = jiffies + rose->idle; - add_timer(&rose->idletimer); + sk_reset_timer(sk, &rose->idletimer, rose->idletimer.expires); } } void rose_stop_heartbeat(struct sock *sk) { - del_timer(&sk->sk_timer); + sk_stop_timer(sk, &sk->sk_timer); } void rose_stop_timer(struct sock *sk) { - del_timer(&rose_sk(sk)->timer); + sk_stop_timer(sk, &rose_sk(sk)->timer); } void rose_stop_idletimer(struct sock *sk) { - del_timer(&rose_sk(sk)->idletimer); + sk_stop_timer(sk, &rose_sk(sk)->idletimer); } static void rose_heartbeat_expiry(struct timer_list *t) @@ -130,6 +130,7 @@ static void rose_heartbeat_expiry(struct timer_list *t) (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { bh_unlock_sock(sk); rose_destroy_socket(sk); + sock_put(sk); return; } break; @@ -152,6 +153,7 @@ static void rose_heartbeat_expiry(struct timer_list *t) rose_start_heartbeat(sk); bh_unlock_sock(sk); + sock_put(sk); } static void rose_timer_expiry(struct timer_list *t) @@ -181,6 +183,7 @@ static void rose_timer_expiry(struct timer_list *t) break; } bh_unlock_sock(sk); + sock_put(sk); } static void rose_idletimer_expiry(struct timer_list *t) @@ -205,4 +208,5 @@ static void rose_idletimer_expiry(struct timer_list *t) sock_set_flag(sk, SOCK_DEAD); } bh_unlock_sock(sk); + sock_put(sk); } diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 41671af6b33f..0354f90dc93a 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -351,7 +351,7 @@ static void rxrpc_dummy_notify_rx(struct sock *sk, struct rxrpc_call *rxcall, */ void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call) { - _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); + _enter("%d{%d}", call->debug_id, refcount_read(&call->ref)); mutex_lock(&call->user_mutex); rxrpc_release_call(rxrpc_sk(sock->sk), call); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 3bad9f5f9102..d86894a1c35d 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -14,14 +14,6 @@ #include #include "protocol.h" -#if 0 -#define CHECK_SLAB_OKAY(X) \ - BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \ - (POISON_FREE << 8 | POISON_FREE)) -#else -#define CHECK_SLAB_OKAY(X) do {} while (0) -#endif - #define FCRYPT_BSIZE 8 struct rxrpc_crypt { union { @@ -86,7 +78,7 @@ struct rxrpc_net { struct work_struct client_conn_reaper; struct timer_list client_conn_reap_timer; - struct list_head local_endpoints; + struct hlist_head local_endpoints; struct mutex local_mutex; /* Lock for ->local_endpoints */ DECLARE_HASHTABLE (peer_hash, 10); @@ -264,9 +256,9 @@ struct rxrpc_security { struct rxrpc_local { struct rcu_head rcu; atomic_t active_users; /* Number of users of the local endpoint */ - atomic_t usage; /* Number of references to the structure */ + refcount_t ref; /* Number of references to the structure */ struct rxrpc_net *rxnet; /* The network ns in which this resides */ - struct list_head link; + struct hlist_node link; struct socket *socket; /* my UDP socket */ struct work_struct processor; struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */ @@ -289,7 +281,7 @@ struct rxrpc_local { */ struct rxrpc_peer { struct rcu_head rcu; /* This must be first */ - atomic_t usage; + refcount_t ref; unsigned long hash_key; struct hlist_node hash_link; struct rxrpc_local *local; @@ -391,7 +383,8 @@ enum rxrpc_conn_proto_state { */ struct rxrpc_bundle { struct rxrpc_conn_parameters params; - atomic_t usage; + refcount_t ref; + atomic_t active; /* Number of active users */ unsigned int debug_id; bool try_upgrade; /* True if the bundle is attempting upgrade */ bool alloc_conn; /* True if someone's getting a conn */ @@ -412,7 +405,7 @@ struct rxrpc_connection { struct rxrpc_conn_proto proto; struct rxrpc_conn_parameters params; - atomic_t usage; + refcount_t ref; struct rcu_head rcu; struct list_head cache_link; @@ -592,7 +585,7 @@ struct rxrpc_call { int error; /* Local error incurred */ enum rxrpc_call_state state; /* current state of call */ enum rxrpc_call_completion completion; /* Call completion condition */ - atomic_t usage; + refcount_t ref; u16 service_id; /* service ID */ u8 security_ix; /* Security type */ enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */ @@ -659,13 +652,12 @@ struct rxrpc_call { spinlock_t input_lock; /* Lock for packet input to this call */ - /* receive-phase ACK management */ + /* Receive-phase ACK management (ACKs we send). */ u8 ackr_reason; /* reason to ACK */ rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */ - rxrpc_serial_t ackr_first_seq; /* first sequence number received */ - rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ - rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */ - rxrpc_seq_t ackr_seen; /* Highest packet shown seen */ + rxrpc_seq_t ackr_highest_seq; /* Higest sequence number received */ + atomic_t ackr_nr_unacked; /* Number of unacked packets */ + atomic_t ackr_nr_consumed; /* Number of packets needing hard ACK */ /* RTT management */ rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */ @@ -675,8 +667,10 @@ struct rxrpc_call { #define RXRPC_CALL_RTT_AVAIL_MASK 0xf #define RXRPC_CALL_RTT_PEND_SHIFT 8 - /* transmission-phase ACK management */ + /* Transmission-phase ACK management (ACKs we've received). */ ktime_t acks_latest_ts; /* Timestamp of latest ACK received */ + rxrpc_seq_t acks_first_seq; /* first sequence number received */ + rxrpc_seq_t acks_prev_seq; /* Highest previousPacket received */ rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */ rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */ rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */ @@ -1000,6 +994,7 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *); extern const struct seq_operations rxrpc_call_seq_ops; extern const struct seq_operations rxrpc_connection_seq_ops; extern const struct seq_operations rxrpc_peer_seq_ops; +extern const struct seq_operations rxrpc_local_seq_ops; /* * recvmsg.c diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index a0b033954cea..2a14d69b171f 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -91,7 +91,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, (head + 1) & (size - 1)); trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service, - atomic_read(&conn->usage), here); + refcount_read(&conn->ref), here); } /* Now it gets complicated, because calls get registered with the @@ -104,7 +104,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, call->state = RXRPC_CALL_SERVER_PREALLOC; trace_rxrpc_call(call->debug_id, rxrpc_call_new_service, - atomic_read(&call->usage), + refcount_read(&call->ref), here, (const void *)user_call_ID); write_lock(&rx->call_lock); diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index 22e05de5d1ca..2a93e7b5fbd0 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -166,7 +166,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); now = ktime_get_real(); - max_age = ktime_sub(now, jiffies_to_usecs(call->peer->rto_j)); + max_age = ktime_sub_us(now, jiffies_to_usecs(call->peer->rto_j)); spin_lock_bh(&call->lock); @@ -377,9 +377,9 @@ recheck_state: if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) && (int)call->conn->hi_serial - (int)call->rx_serial > 0) { trace_rxrpc_call_reset(call); - rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ECONNRESET); + rxrpc_abort_call("EXP", call, 0, RX_CALL_DEAD, -ECONNRESET); } else { - rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME); + rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME); } set_bit(RXRPC_CALL_EV_ABORT, &call->events); goto recheck_state; @@ -406,7 +406,8 @@ recheck_state: goto recheck_state; } - if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) { + if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) && + call->state != RXRPC_CALL_CLIENT_RECV_REPLY) { rxrpc_resend(call, now); goto recheck_state; } diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 043508fd8d8a..10dad2834d5b 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -112,7 +112,7 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx, found_extant_call: rxrpc_get_call(call, rxrpc_call_got); read_unlock(&rx->call_lock); - _leave(" = %p [%d]", call, atomic_read(&call->usage)); + _leave(" = %p [%d]", call, refcount_read(&call->ref)); return call; } @@ -160,7 +160,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp, spin_lock_init(&call->notify_lock); spin_lock_init(&call->input_lock); rwlock_init(&call->state_lock); - atomic_set(&call->usage, 1); + refcount_set(&call->ref, 1); call->debug_id = debug_id; call->tx_total_len = -1; call->next_rx_timo = 20 * HZ; @@ -285,8 +285,10 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, _enter("%p,%lx", rx, p->user_call_ID); limiter = rxrpc_get_call_slot(p, gfp); - if (!limiter) + if (!limiter) { + release_sock(&rx->sk); return ERR_PTR(-ERESTARTSYS); + } call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id); if (IS_ERR(call)) { @@ -299,7 +301,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, call->interruptibility = p->interruptibility; call->tx_total_len = p->tx_total_len; trace_rxrpc_call(call->debug_id, rxrpc_call_new_client, - atomic_read(&call->usage), + refcount_read(&call->ref), here, (const void *)p->user_call_ID); if (p->kernel) __set_bit(RXRPC_CALL_KERNEL, &call->flags); @@ -352,7 +354,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, goto error_attached_to_socket; trace_rxrpc_call(call->debug_id, rxrpc_call_connected, - atomic_read(&call->usage), here, NULL); + refcount_read(&call->ref), here, NULL); rxrpc_start_call_timer(call); @@ -372,7 +374,7 @@ error_dup_user_ID: __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, RX_CALL_DEAD, -EEXIST); trace_rxrpc_call(call->debug_id, rxrpc_call_error, - atomic_read(&call->usage), here, ERR_PTR(-EEXIST)); + refcount_read(&call->ref), here, ERR_PTR(-EEXIST)); rxrpc_release_call(rx, call); mutex_unlock(&call->user_mutex); rxrpc_put_call(call, rxrpc_call_put); @@ -386,7 +388,7 @@ error_dup_user_ID: */ error_attached_to_socket: trace_rxrpc_call(call->debug_id, rxrpc_call_error, - atomic_read(&call->usage), here, ERR_PTR(ret)); + refcount_read(&call->ref), here, ERR_PTR(ret)); set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, RX_CALL_DEAD, ret); @@ -442,8 +444,9 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx, bool rxrpc_queue_call(struct rxrpc_call *call) { const void *here = __builtin_return_address(0); - int n = atomic_fetch_add_unless(&call->usage, 1, 0); - if (n == 0) + int n; + + if (!__refcount_inc_not_zero(&call->ref, &n)) return false; if (rxrpc_queue_work(&call->processor)) trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1, @@ -459,7 +462,7 @@ bool rxrpc_queue_call(struct rxrpc_call *call) bool __rxrpc_queue_call(struct rxrpc_call *call) { const void *here = __builtin_return_address(0); - int n = atomic_read(&call->usage); + int n = refcount_read(&call->ref); ASSERTCMP(n, >=, 1); if (rxrpc_queue_work(&call->processor)) trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n, @@ -476,7 +479,7 @@ void rxrpc_see_call(struct rxrpc_call *call) { const void *here = __builtin_return_address(0); if (call) { - int n = atomic_read(&call->usage); + int n = refcount_read(&call->ref); trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n, here, NULL); @@ -486,11 +489,11 @@ void rxrpc_see_call(struct rxrpc_call *call) bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) { const void *here = __builtin_return_address(0); - int n = atomic_fetch_add_unless(&call->usage, 1, 0); + int n; - if (n == 0) + if (!__refcount_inc_not_zero(&call->ref, &n)) return false; - trace_rxrpc_call(call->debug_id, op, n, here, NULL); + trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL); return true; } @@ -500,9 +503,10 @@ bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) { const void *here = __builtin_return_address(0); - int n = atomic_inc_return(&call->usage); + int n; - trace_rxrpc_call(call->debug_id, op, n, here, NULL); + __refcount_inc(&call->ref, &n); + trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL); } /* @@ -527,10 +531,10 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) struct rxrpc_connection *conn = call->conn; bool put = false; - _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); + _enter("{%d,%d}", call->debug_id, refcount_read(&call->ref)); trace_rxrpc_call(call->debug_id, rxrpc_call_release, - atomic_read(&call->usage), + refcount_read(&call->ref), here, (const void *)call->flags); ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); @@ -619,14 +623,14 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op) struct rxrpc_net *rxnet = call->rxnet; const void *here = __builtin_return_address(0); unsigned int debug_id = call->debug_id; + bool dead; int n; ASSERT(call != NULL); - n = atomic_dec_return(&call->usage); + dead = __refcount_dec_and_test(&call->ref, &n); trace_rxrpc_call(debug_id, op, n, here, NULL); - ASSERTCMP(n, >=, 0); - if (n == 0) { + if (dead) { _debug("call %d dead", call->debug_id); ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); @@ -716,7 +720,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet) list_del_init(&call->link); pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n", - call, atomic_read(&call->usage), + call, refcount_read(&call->ref), rxrpc_call_states[call->state], call->flags, call->events); diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index f5fb223aba82..f5fa5f3083bd 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -40,6 +40,8 @@ __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ; DEFINE_IDR(rxrpc_client_conn_ids); static DEFINE_SPINLOCK(rxrpc_conn_id_lock); +static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle); + /* * Get a connection ID and epoch for a client connection from the global pool. * The connection struct pointer is then recorded in the idr radix tree. The @@ -102,7 +104,7 @@ void rxrpc_destroy_client_conn_ids(void) if (!idr_is_empty(&rxrpc_client_conn_ids)) { idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) { pr_err("AF_RXRPC: Leaked client conn %p {%d}\n", - conn, atomic_read(&conn->usage)); + conn, refcount_read(&conn->ref)); } BUG(); } @@ -122,7 +124,8 @@ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp, if (bundle) { bundle->params = *cp; rxrpc_get_peer(bundle->params.peer); - atomic_set(&bundle->usage, 1); + refcount_set(&bundle->ref, 1); + atomic_set(&bundle->active, 1); spin_lock_init(&bundle->channel_lock); INIT_LIST_HEAD(&bundle->waiting_calls); } @@ -131,7 +134,7 @@ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp, struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle) { - atomic_inc(&bundle->usage); + refcount_inc(&bundle->ref); return bundle; } @@ -144,10 +147,13 @@ static void rxrpc_free_bundle(struct rxrpc_bundle *bundle) void rxrpc_put_bundle(struct rxrpc_bundle *bundle) { unsigned int d = bundle->debug_id; - unsigned int u = atomic_dec_return(&bundle->usage); + bool dead; + int r; - _debug("PUT B=%x %u", d, u); - if (u == 0) + dead = __refcount_dec_and_test(&bundle->ref, &r); + + _debug("PUT B=%x %d", d, r - 1); + if (dead) rxrpc_free_bundle(bundle); } @@ -169,7 +175,7 @@ rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp) return ERR_PTR(-ENOMEM); } - atomic_set(&conn->usage, 1); + refcount_set(&conn->ref, 1); conn->bundle = bundle; conn->params = bundle->params; conn->out_clientflag = RXRPC_CLIENT_INITIATED; @@ -199,7 +205,7 @@ rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp) key_get(conn->params.key); trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client, - atomic_read(&conn->usage), + refcount_read(&conn->ref), __builtin_return_address(0)); atomic_inc(&rxnet->nr_client_conns); @@ -341,6 +347,7 @@ found_bundle_free: rxrpc_free_bundle(candidate); found_bundle: rxrpc_get_bundle(bundle); + atomic_inc(&bundle->active); spin_unlock(&local->client_bundles_lock); _leave(" = %u [found]", bundle->debug_id); return bundle; @@ -438,6 +445,7 @@ static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp) if (old) trace_rxrpc_client(old, -1, rxrpc_client_replace); candidate->bundle_shift = shift; + atomic_inc(&bundle->active); bundle->conns[i] = candidate; for (j = 0; j < RXRPC_MAXCALLS; j++) set_bit(shift + j, &bundle->avail_chans); @@ -728,6 +736,7 @@ granted_channel: smp_rmb(); out_put_bundle: + rxrpc_deactivate_bundle(bundle); rxrpc_put_bundle(bundle); out: _leave(" = %d", ret); @@ -903,9 +912,8 @@ out: static void rxrpc_unbundle_conn(struct rxrpc_connection *conn) { struct rxrpc_bundle *bundle = conn->bundle; - struct rxrpc_local *local = bundle->params.local; unsigned int bindex; - bool need_drop = false, need_put = false; + bool need_drop = false; int i; _enter("C=%x", conn->debug_id); @@ -924,15 +932,22 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn) } spin_unlock(&bundle->channel_lock); - /* If there are no more connections, remove the bundle */ - if (!bundle->avail_chans) { - _debug("maybe unbundle"); - spin_lock(&local->client_bundles_lock); + if (need_drop) { + rxrpc_deactivate_bundle(bundle); + rxrpc_put_connection(conn); + } +} - for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) - if (bundle->conns[i]) - break; - if (i == ARRAY_SIZE(bundle->conns) && !bundle->params.exclusive) { +/* + * Drop the active count on a bundle. + */ +static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle) +{ + struct rxrpc_local *local = bundle->params.local; + bool need_put = false; + + if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) { + if (!bundle->params.exclusive) { _debug("erase bundle"); rb_erase(&bundle->local_node, &local->client_bundles); need_put = true; @@ -942,10 +957,6 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn) if (need_put) rxrpc_put_bundle(bundle); } - - if (need_drop) - rxrpc_put_connection(conn); - _leave(""); } /* @@ -972,14 +983,13 @@ void rxrpc_put_client_conn(struct rxrpc_connection *conn) { const void *here = __builtin_return_address(0); unsigned int debug_id = conn->debug_id; - int n; + bool dead; + int r; - n = atomic_dec_return(&conn->usage); - trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here); - if (n <= 0) { - ASSERTCMP(n, >=, 0); + dead = __refcount_dec_and_test(&conn->ref, &r); + trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, r - 1, here); + if (dead) rxrpc_kill_client_conn(conn); - } } /* diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 3bcbe0665f91..d829b97550cc 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -105,7 +105,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local, goto not_found; *_peer = peer; conn = rxrpc_find_service_conn_rcu(peer, skb); - if (!conn || atomic_read(&conn->usage) == 0) + if (!conn || refcount_read(&conn->ref) == 0) goto not_found; _leave(" = %p", conn); return conn; @@ -115,7 +115,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local, */ conn = idr_find(&rxrpc_client_conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT); - if (!conn || atomic_read(&conn->usage) == 0) { + if (!conn || refcount_read(&conn->ref) == 0) { _debug("no conn"); goto not_found; } @@ -184,7 +184,7 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn, chan->last_type = RXRPC_PACKET_TYPE_ABORT; break; default: - chan->last_abort = RX_USER_ABORT; + chan->last_abort = RX_CALL_DEAD; chan->last_type = RXRPC_PACKET_TYPE_ABORT; break; } @@ -264,11 +264,12 @@ void rxrpc_kill_connection(struct rxrpc_connection *conn) bool rxrpc_queue_conn(struct rxrpc_connection *conn) { const void *here = __builtin_return_address(0); - int n = atomic_fetch_add_unless(&conn->usage, 1, 0); - if (n == 0) + int r; + + if (!__refcount_inc_not_zero(&conn->ref, &r)) return false; if (rxrpc_queue_work(&conn->processor)) - trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, n + 1, here); + trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, r + 1, here); else rxrpc_put_connection(conn); return true; @@ -281,7 +282,7 @@ void rxrpc_see_connection(struct rxrpc_connection *conn) { const void *here = __builtin_return_address(0); if (conn) { - int n = atomic_read(&conn->usage); + int n = refcount_read(&conn->ref); trace_rxrpc_conn(conn->debug_id, rxrpc_conn_seen, n, here); } @@ -293,9 +294,10 @@ void rxrpc_see_connection(struct rxrpc_connection *conn) struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn) { const void *here = __builtin_return_address(0); - int n = atomic_inc_return(&conn->usage); + int r; - trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n, here); + __refcount_inc(&conn->ref, &r); + trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, r, here); return conn; } @@ -306,11 +308,11 @@ struct rxrpc_connection * rxrpc_get_connection_maybe(struct rxrpc_connection *conn) { const void *here = __builtin_return_address(0); + int r; if (conn) { - int n = atomic_fetch_add_unless(&conn->usage, 1, 0); - if (n > 0) - trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n + 1, here); + if (__refcount_inc_not_zero(&conn->ref, &r)) + trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, r + 1, here); else conn = NULL; } @@ -334,12 +336,11 @@ void rxrpc_put_service_conn(struct rxrpc_connection *conn) { const void *here = __builtin_return_address(0); unsigned int debug_id = conn->debug_id; - int n; + int r; - n = atomic_dec_return(&conn->usage); - trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, n, here); - ASSERTCMP(n, >=, 0); - if (n == 1) + __refcount_dec(&conn->ref, &r); + trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, r - 1, here); + if (r - 1 == 1) rxrpc_set_service_reap_timer(conn->params.local->rxnet, jiffies + rxrpc_connection_expiry); } @@ -352,9 +353,9 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu) struct rxrpc_connection *conn = container_of(rcu, struct rxrpc_connection, rcu); - _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage)); + _enter("{%d,u=%d}", conn->debug_id, refcount_read(&conn->ref)); - ASSERTCMP(atomic_read(&conn->usage), ==, 0); + ASSERTCMP(refcount_read(&conn->ref), ==, 0); _net("DESTROY CONN %d", conn->debug_id); @@ -394,8 +395,8 @@ void rxrpc_service_connection_reaper(struct work_struct *work) write_lock(&rxnet->conn_lock); list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { - ASSERTCMP(atomic_read(&conn->usage), >, 0); - if (likely(atomic_read(&conn->usage) > 1)) + ASSERTCMP(refcount_read(&conn->ref), >, 0); + if (likely(refcount_read(&conn->ref) > 1)) continue; if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) continue; @@ -407,7 +408,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work) expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ; _debug("reap CONN %d { u=%d,t=%ld }", - conn->debug_id, atomic_read(&conn->usage), + conn->debug_id, refcount_read(&conn->ref), (long)expire_at - (long)now); if (time_before(now, expire_at)) { @@ -420,7 +421,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work) /* The usage count sits at 1 whilst the object is unused on the * list; we reduce that to 0 to make the object unavailable. */ - if (atomic_cmpxchg(&conn->usage, 1, 0) != 1) + if (!refcount_dec_if_one(&conn->ref)) continue; trace_rxrpc_conn(conn->debug_id, rxrpc_conn_reap_service, 0, NULL); @@ -444,7 +445,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work) link); list_del_init(&conn->link); - ASSERTCMP(atomic_read(&conn->usage), ==, 0); + ASSERTCMP(refcount_read(&conn->ref), ==, 0); rxrpc_kill_connection(conn); } @@ -472,7 +473,7 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet) write_lock(&rxnet->conn_lock); list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { pr_err("AF_RXRPC: Leaked conn %p {%d}\n", - conn, atomic_read(&conn->usage)); + conn, refcount_read(&conn->ref)); leak = true; } write_unlock(&rxnet->conn_lock); diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c index 6c847720494f..68508166bbc0 100644 --- a/net/rxrpc/conn_service.c +++ b/net/rxrpc/conn_service.c @@ -9,7 +9,7 @@ #include "ar-internal.h" static struct rxrpc_bundle rxrpc_service_dummy_bundle = { - .usage = ATOMIC_INIT(1), + .ref = REFCOUNT_INIT(1), .debug_id = UINT_MAX, .channel_lock = __SPIN_LOCK_UNLOCKED(&rxrpc_service_dummy_bundle.channel_lock), }; @@ -99,7 +99,7 @@ conn_published: return; found_extant_conn: - if (atomic_read(&cursor->usage) == 0) + if (refcount_read(&cursor->ref) == 0) goto replace_old_connection; write_sequnlock_bh(&peer->service_conn_lock); /* We should not be able to get here. rxrpc_incoming_connection() is @@ -132,7 +132,7 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn * the rxrpc_connections list. */ conn->state = RXRPC_CONN_SERVICE_PREALLOC; - atomic_set(&conn->usage, 2); + refcount_set(&conn->ref, 2); conn->bundle = rxrpc_get_bundle(&rxrpc_service_dummy_bundle); atomic_inc(&rxnet->nr_conns); @@ -142,7 +142,7 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn write_unlock(&rxnet->conn_lock); trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service, - atomic_read(&conn->usage), + refcount_read(&conn->ref), __builtin_return_address(0)); } diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index dc201363f2c4..e9178115a744 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -412,8 +412,8 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); enum rxrpc_call_state state; - unsigned int j, nr_subpackets; - rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; + unsigned int j, nr_subpackets, nr_unacked = 0; + rxrpc_serial_t serial = sp->hdr.serial, ack_serial = serial; rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack; bool immediate_ack = false, jumbo_bad = false; u8 ack = 0; @@ -453,7 +453,6 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) !rxrpc_receiving_reply(call)) goto unlock; - call->ackr_prev_seq = seq0; hard_ack = READ_ONCE(call->rx_hard_ack); nr_subpackets = sp->nr_subpackets; @@ -534,6 +533,9 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) ack_serial = serial; } + if (after(seq0, call->ackr_highest_seq)) + call->ackr_highest_seq = seq0; + /* Queue the packet. We use a couple of memory barriers here as need * to make sure that rx_top is perceived to be set after the buffer * pointer and that the buffer pointer is set after the annotation and @@ -567,6 +569,8 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) sp = NULL; } + nr_unacked++; + if (last) { set_bit(RXRPC_CALL_RX_LAST, &call->flags); if (!ack) { @@ -586,9 +590,14 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) } call->rx_expect_next = seq + 1; } + if (!ack) + ack_serial = serial; } ack: + if (atomic_add_return(nr_unacked, &call->ackr_nr_unacked) > 2 && !ack) + ack = RXRPC_ACK_IDLE; + if (ack) rxrpc_propose_ACK(call, ack, ack_serial, immediate_ack, true, @@ -812,7 +821,7 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks, static bool rxrpc_is_ack_valid(struct rxrpc_call *call, rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt) { - rxrpc_seq_t base = READ_ONCE(call->ackr_first_seq); + rxrpc_seq_t base = READ_ONCE(call->acks_first_seq); if (after(first_pkt, base)) return true; /* The window advanced */ @@ -820,7 +829,7 @@ static bool rxrpc_is_ack_valid(struct rxrpc_call *call, if (before(first_pkt, base)) return false; /* firstPacket regressed */ - if (after_eq(prev_pkt, call->ackr_prev_seq)) + if (after_eq(prev_pkt, call->acks_prev_seq)) return true; /* previousPacket hasn't regressed. */ /* Some rx implementations put a serial number in previousPacket. */ @@ -906,8 +915,8 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) /* Discard any out-of-order or duplicate ACKs (outside lock). */ if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial, - first_soft_ack, call->ackr_first_seq, - prev_pkt, call->ackr_prev_seq); + first_soft_ack, call->acks_first_seq, + prev_pkt, call->acks_prev_seq); return; } @@ -922,14 +931,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) /* Discard any out-of-order or duplicate ACKs (inside lock). */ if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial, - first_soft_ack, call->ackr_first_seq, - prev_pkt, call->ackr_prev_seq); + first_soft_ack, call->acks_first_seq, + prev_pkt, call->acks_prev_seq); goto out; } call->acks_latest_ts = skb->tstamp; - call->ackr_first_seq = first_soft_ack; - call->ackr_prev_seq = prev_pkt; + call->acks_first_seq = first_soft_ack; + call->acks_prev_seq = prev_pkt; /* Parse rwind and mtu sizes if provided. */ if (buf.info.rxMTU) @@ -1154,8 +1163,6 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local, */ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) { - CHECK_SLAB_OKAY(&local->usage); - if (rxrpc_get_local_maybe(local)) { skb_queue_tail(&local->reject_queue, skb); rxrpc_queue_local(local); @@ -1413,7 +1420,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) } } - if (!call || atomic_read(&call->usage) == 0) { + if (!call || refcount_read(&call->ref) == 0) { if (rxrpc_to_client(sp) || sp->hdr.type != RXRPC_PACKET_TYPE_DATA) goto bad_message; diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 8c2881054266..2c66ee981f39 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -78,10 +78,10 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); if (local) { - atomic_set(&local->usage, 1); + refcount_set(&local->ref, 1); atomic_set(&local->active_users, 1); local->rxnet = rxnet; - INIT_LIST_HEAD(&local->link); + INIT_HLIST_NODE(&local->link); INIT_WORK(&local->processor, rxrpc_local_processor); init_rwsem(&local->defrag_sem); skb_queue_head_init(&local->reject_queue); @@ -199,7 +199,7 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, { struct rxrpc_local *local; struct rxrpc_net *rxnet = rxrpc_net(net); - struct list_head *cursor; + struct hlist_node *cursor; const char *age; long diff; int ret; @@ -209,16 +209,12 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, mutex_lock(&rxnet->local_mutex); - for (cursor = rxnet->local_endpoints.next; - cursor != &rxnet->local_endpoints; - cursor = cursor->next) { - local = list_entry(cursor, struct rxrpc_local, link); + hlist_for_each(cursor, &rxnet->local_endpoints) { + local = hlist_entry(cursor, struct rxrpc_local, link); diff = rxrpc_local_cmp_key(local, srx); - if (diff < 0) + if (diff != 0) continue; - if (diff > 0) - break; /* Services aren't allowed to share transport sockets, so * reject that here. It is possible that the object is dying - @@ -230,9 +226,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, goto addr_in_use; } - /* Found a match. We replace a dying object. Attempting to - * bind the transport socket may still fail if we're attempting - * to use a local address that the dying object is still using. + /* Found a match. We want to replace a dying object. + * Attempting to bind the transport socket may still fail if + * we're attempting to use a local address that the dying + * object is still using. */ if (!rxrpc_use_local(local)) break; @@ -249,10 +246,12 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, if (ret < 0) goto sock_error; - if (cursor != &rxnet->local_endpoints) - list_replace_init(cursor, &local->link); - else - list_add_tail(&local->link, cursor); + if (cursor) { + hlist_replace_rcu(cursor, &local->link); + cursor->pprev = NULL; + } else { + hlist_add_head_rcu(&local->link, &rxnet->local_endpoints); + } age = "new"; found: @@ -285,10 +284,10 @@ addr_in_use: struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local) { const void *here = __builtin_return_address(0); - int n; + int r; - n = atomic_inc_return(&local->usage); - trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here); + __refcount_inc(&local->ref, &r); + trace_rxrpc_local(local->debug_id, rxrpc_local_got, r + 1, here); return local; } @@ -298,12 +297,12 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local) struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) { const void *here = __builtin_return_address(0); + int r; if (local) { - int n = atomic_fetch_add_unless(&local->usage, 1, 0); - if (n > 0) + if (__refcount_inc_not_zero(&local->ref, &r)) trace_rxrpc_local(local->debug_id, rxrpc_local_got, - n + 1, here); + r + 1, here); else local = NULL; } @@ -317,10 +316,10 @@ void rxrpc_queue_local(struct rxrpc_local *local) { const void *here = __builtin_return_address(0); unsigned int debug_id = local->debug_id; - int n = atomic_read(&local->usage); + int r = refcount_read(&local->ref); if (rxrpc_queue_work(&local->processor)) - trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here); + trace_rxrpc_local(debug_id, rxrpc_local_queued, r + 1, here); else rxrpc_put_local(local); } @@ -332,15 +331,16 @@ void rxrpc_put_local(struct rxrpc_local *local) { const void *here = __builtin_return_address(0); unsigned int debug_id; - int n; + bool dead; + int r; if (local) { debug_id = local->debug_id; - n = atomic_dec_return(&local->usage); - trace_rxrpc_local(debug_id, rxrpc_local_put, n, here); + dead = __refcount_dec_and_test(&local->ref, &r); + trace_rxrpc_local(debug_id, rxrpc_local_put, r, here); - if (n == 0) + if (dead) call_rcu(&local->rcu, rxrpc_local_rcu); } } @@ -393,7 +393,7 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local) local->dead = true; mutex_lock(&rxnet->local_mutex); - list_del_init(&local->link); + hlist_del_init_rcu(&local->link); mutex_unlock(&rxnet->local_mutex); rxrpc_clean_up_local_conns(local); @@ -424,8 +424,11 @@ static void rxrpc_local_processor(struct work_struct *work) container_of(work, struct rxrpc_local, processor); bool again; + if (local->dead) + return; + trace_rxrpc_local(local->debug_id, rxrpc_local_processing, - atomic_read(&local->usage), NULL); + refcount_read(&local->ref), NULL); do { again = false; @@ -477,11 +480,11 @@ void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet) flush_workqueue(rxrpc_workqueue); - if (!list_empty(&rxnet->local_endpoints)) { + if (!hlist_empty(&rxnet->local_endpoints)) { mutex_lock(&rxnet->local_mutex); - list_for_each_entry(local, &rxnet->local_endpoints, link) { + hlist_for_each_entry(local, &rxnet->local_endpoints, link) { pr_err("AF_RXRPC: Leaked local %p {%d}\n", - local, atomic_read(&local->usage)); + local, refcount_read(&local->ref)); } mutex_unlock(&rxnet->local_mutex); BUG(); diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c index 25bbc4cc8b13..34f389975a7d 100644 --- a/net/rxrpc/net_ns.c +++ b/net/rxrpc/net_ns.c @@ -72,7 +72,7 @@ static __net_init int rxrpc_init_net(struct net *net) timer_setup(&rxnet->client_conn_reap_timer, rxrpc_client_conn_reap_timeout, 0); - INIT_LIST_HEAD(&rxnet->local_endpoints); + INIT_HLIST_HEAD(&rxnet->local_endpoints); mutex_init(&rxnet->local_mutex); hash_init(rxnet->peer_hash); @@ -98,6 +98,9 @@ static __net_init int rxrpc_init_net(struct net *net) proc_create_net("peers", 0444, rxnet->proc_net, &rxrpc_peer_seq_ops, sizeof(struct seq_net_private)); + proc_create_net("locals", 0444, rxnet->proc_net, + &rxrpc_local_seq_ops, + sizeof(struct seq_net_private)); return 0; err_proc: @@ -115,6 +118,8 @@ static __net_exit void rxrpc_exit_net(struct net *net) rxnet->live = false; del_timer_sync(&rxnet->peer_keepalive_timer); cancel_work_sync(&rxnet->peer_keepalive_work); + /* Remove the timer again as the worker may have restarted it. */ + del_timer_sync(&rxnet->peer_keepalive_timer); rxrpc_destroy_all_calls(rxnet); rxrpc_destroy_all_connections(rxnet); rxrpc_destroy_all_peers(rxnet); diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index a45c83f22236..9683617db704 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -74,11 +74,18 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, u8 reason) { rxrpc_serial_t serial; + unsigned int tmp; rxrpc_seq_t hard_ack, top, seq; int ix; u32 mtu, jmax; u8 *ackp = pkt->acks; + tmp = atomic_xchg(&call->ackr_nr_unacked, 0); + tmp |= atomic_xchg(&call->ackr_nr_consumed, 0); + if (!tmp && (reason == RXRPC_ACK_DELAY || + reason == RXRPC_ACK_IDLE)) + return 0; + /* Barrier against rxrpc_input_data(). */ serial = call->ackr_serial; hard_ack = READ_ONCE(call->rx_hard_ack); @@ -89,7 +96,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, pkt->ack.bufferSpace = htons(8); pkt->ack.maxSkew = htons(0); pkt->ack.firstPacket = htonl(hard_ack + 1); - pkt->ack.previousPacket = htonl(call->ackr_prev_seq); + pkt->ack.previousPacket = htonl(call->ackr_highest_seq); pkt->ack.serial = htonl(serial); pkt->ack.reason = reason; pkt->ack.nAcks = top - hard_ack; @@ -223,6 +230,10 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, n = rxrpc_fill_out_ack(conn, call, pkt, &hard_ack, &top, reason); spin_unlock_bh(&call->lock); + if (n == 0) { + kfree(pkt); + return 0; + } iov[0].iov_base = pkt; iov[0].iov_len = sizeof(pkt->whdr) + sizeof(pkt->ack) + n; @@ -259,13 +270,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, ntohl(pkt->ack.serial), false, true, rxrpc_propose_ack_retry_tx); - } else { - spin_lock_bh(&call->lock); - if (after(hard_ack, call->ackr_consumed)) - call->ackr_consumed = hard_ack; - if (after(top, call->ackr_seen)) - call->ackr_seen = top; - spin_unlock_bh(&call->lock); } rxrpc_set_keepalive(call); diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 0298fe2ad6d3..26d2ae9baaf2 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c @@ -121,7 +121,7 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu( hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) { if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 && - atomic_read(&peer->usage) > 0) + refcount_read(&peer->ref) > 0) return peer; } @@ -140,7 +140,7 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local, peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key); if (peer) { _net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport); - _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage)); + _leave(" = %p {u=%d}", peer, refcount_read(&peer->ref)); } return peer; } @@ -216,7 +216,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp) peer = kzalloc(sizeof(struct rxrpc_peer), gfp); if (peer) { - atomic_set(&peer->usage, 1); + refcount_set(&peer->ref, 1); peer->local = rxrpc_get_local(local); INIT_HLIST_HEAD(&peer->error_targets); peer->service_conns = RB_ROOT; @@ -378,7 +378,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx, _net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport); - _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage)); + _leave(" = %p {u=%d}", peer, refcount_read(&peer->ref)); return peer; } @@ -388,10 +388,10 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx, struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer) { const void *here = __builtin_return_address(0); - int n; + int r; - n = atomic_inc_return(&peer->usage); - trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n, here); + __refcount_inc(&peer->ref, &r); + trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, r + 1, here); return peer; } @@ -401,11 +401,11 @@ struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer) struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer) { const void *here = __builtin_return_address(0); + int r; if (peer) { - int n = atomic_fetch_add_unless(&peer->usage, 1, 0); - if (n > 0) - trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n + 1, here); + if (__refcount_inc_not_zero(&peer->ref, &r)) + trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, r + 1, here); else peer = NULL; } @@ -436,13 +436,14 @@ void rxrpc_put_peer(struct rxrpc_peer *peer) { const void *here = __builtin_return_address(0); unsigned int debug_id; - int n; + bool dead; + int r; if (peer) { debug_id = peer->debug_id; - n = atomic_dec_return(&peer->usage); - trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here); - if (n == 0) + dead = __refcount_dec_and_test(&peer->ref, &r); + trace_rxrpc_peer(debug_id, rxrpc_peer_put, r - 1, here); + if (dead) __rxrpc_put_peer(peer); } } @@ -455,11 +456,12 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *peer) { const void *here = __builtin_return_address(0); unsigned int debug_id = peer->debug_id; - int n; + bool dead; + int r; - n = atomic_dec_return(&peer->usage); - trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here); - if (n == 0) { + dead = __refcount_dec_and_test(&peer->ref, &r); + trace_rxrpc_peer(debug_id, rxrpc_peer_put, r - 1, here); + if (dead) { hash_del_rcu(&peer->hash_link); list_del_init(&peer->keepalive_link); rxrpc_free_peer(peer); @@ -481,7 +483,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet) hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) { pr_err("Leaked peer %u {%u} %pISp\n", peer->debug_id, - atomic_read(&peer->usage), + refcount_read(&peer->ref), &peer->srx.transport); } } diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c index e2f990754f88..8967201fd8e5 100644 --- a/net/rxrpc/proc.c +++ b/net/rxrpc/proc.c @@ -107,7 +107,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) call->cid, call->call_id, rxrpc_is_service_call(call) ? "Svc" : "Clt", - atomic_read(&call->usage), + refcount_read(&call->ref), rxrpc_call_states[call->state], call->abort_code, call->debug_id, @@ -189,7 +189,7 @@ print: conn->service_id, conn->proto.cid, rxrpc_conn_is_service(conn) ? "Svc" : "Clt", - atomic_read(&conn->usage), + refcount_read(&conn->ref), rxrpc_conn_states[conn->state], key_serial(conn->params.key), atomic_read(&conn->serial), @@ -239,7 +239,7 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v) " %3u %5u %6llus %8u %8u\n", lbuff, rbuff, - atomic_read(&peer->usage), + refcount_read(&peer->ref), peer->cong_cwnd, peer->mtu, now - peer->last_tx_at, @@ -334,3 +334,72 @@ const struct seq_operations rxrpc_peer_seq_ops = { .stop = rxrpc_peer_seq_stop, .show = rxrpc_peer_seq_show, }; + +/* + * Generate a list of extant virtual local endpoints in /proc/net/rxrpc/locals + */ +static int rxrpc_local_seq_show(struct seq_file *seq, void *v) +{ + struct rxrpc_local *local; + char lbuff[50]; + + if (v == SEQ_START_TOKEN) { + seq_puts(seq, + "Proto Local " + " Use Act\n"); + return 0; + } + + local = hlist_entry(v, struct rxrpc_local, link); + + sprintf(lbuff, "%pISpc", &local->srx.transport); + + seq_printf(seq, + "UDP %-47.47s %3u %3u\n", + lbuff, + refcount_read(&local->ref), + atomic_read(&local->active_users)); + + return 0; +} + +static void *rxrpc_local_seq_start(struct seq_file *seq, loff_t *_pos) + __acquires(rcu) +{ + struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); + unsigned int n; + + rcu_read_lock(); + + if (*_pos >= UINT_MAX) + return NULL; + + n = *_pos; + if (n == 0) + return SEQ_START_TOKEN; + + return seq_hlist_start_rcu(&rxnet->local_endpoints, n - 1); +} + +static void *rxrpc_local_seq_next(struct seq_file *seq, void *v, loff_t *_pos) +{ + struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); + + if (*_pos >= UINT_MAX) + return NULL; + + return seq_hlist_next_rcu(v, &rxnet->local_endpoints, _pos); +} + +static void rxrpc_local_seq_stop(struct seq_file *seq, void *v) + __releases(rcu) +{ + rcu_read_unlock(); +} + +const struct seq_operations rxrpc_local_seq_ops = { + .start = rxrpc_local_seq_start, + .next = rxrpc_local_seq_next, + .stop = rxrpc_local_seq_stop, + .show = rxrpc_local_seq_show, +}; diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 2c842851d72e..787826773937 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -260,11 +260,9 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) rxrpc_end_rx_phase(call, serial); } else { /* Check to see if there's an ACK that needs sending. */ - if (after_eq(hard_ack, call->ackr_consumed + 2) || - after_eq(top, call->ackr_seen + 2) || - (hard_ack == top && after(hard_ack, call->ackr_consumed))) - rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, - true, true, + if (atomic_inc_return(&call->ackr_nr_consumed) > 2) + rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial, + true, false, rxrpc_propose_ack_rotate_rx); if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY) rxrpc_send_ack_packet(call, false, NULL); diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index f114dc2af5cf..5345e8eefd33 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -451,7 +451,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, * directly into the target buffer. */ sg = _sg; - nsg = skb_shinfo(skb)->nr_frags; + nsg = skb_shinfo(skb)->nr_frags + 1; if (nsg <= 4) { nsg = 4; } else { diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index d27140c836cc..eef3c14fd1c1 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -51,10 +51,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx, return sock_intr_errno(*timeo); trace_rxrpc_transmit(call, rxrpc_transmit_wait); - mutex_unlock(&call->user_mutex); *timeo = schedule_timeout(*timeo); - if (mutex_lock_interruptible(&call->user_mutex) < 0) - return sock_intr_errno(*timeo); } } @@ -290,37 +287,48 @@ out: static int rxrpc_send_data(struct rxrpc_sock *rx, struct rxrpc_call *call, struct msghdr *msg, size_t len, - rxrpc_notify_end_tx_t notify_end_tx) + rxrpc_notify_end_tx_t notify_end_tx, + bool *_dropped_lock) { struct rxrpc_skb_priv *sp; struct sk_buff *skb; struct sock *sk = &rx->sk; + enum rxrpc_call_state state; long timeo; - bool more; - int ret, copied; + bool more = msg->msg_flags & MSG_MORE; + int ret, copied = 0; timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); /* this should be in poll */ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); +reload: + ret = -EPIPE; if (sk->sk_shutdown & SEND_SHUTDOWN) - return -EPIPE; - - more = msg->msg_flags & MSG_MORE; + goto maybe_error; + state = READ_ONCE(call->state); + ret = -ESHUTDOWN; + if (state >= RXRPC_CALL_COMPLETE) + goto maybe_error; + ret = -EPROTO; + if (state != RXRPC_CALL_CLIENT_SEND_REQUEST && + state != RXRPC_CALL_SERVER_ACK_REQUEST && + state != RXRPC_CALL_SERVER_SEND_REPLY) + goto maybe_error; + ret = -EMSGSIZE; if (call->tx_total_len != -1) { - if (len > call->tx_total_len) - return -EMSGSIZE; - if (!more && len != call->tx_total_len) - return -EMSGSIZE; + if (len - copied > call->tx_total_len) + goto maybe_error; + if (!more && len - copied != call->tx_total_len) + goto maybe_error; } skb = call->tx_pending; call->tx_pending = NULL; rxrpc_see_skb(skb, rxrpc_skb_seen); - copied = 0; do { /* Check to see if there's a ping ACK to reply to. */ if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE) @@ -331,16 +339,8 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, _debug("alloc"); - if (!rxrpc_check_tx_space(call, NULL)) { - ret = -EAGAIN; - if (msg->msg_flags & MSG_DONTWAIT) - goto maybe_error; - ret = rxrpc_wait_for_tx_window(rx, call, - &timeo, - msg->msg_flags & MSG_WAITALL); - if (ret < 0) - goto maybe_error; - } + if (!rxrpc_check_tx_space(call, NULL)) + goto wait_for_space; max = RXRPC_JUMBO_DATALEN; max -= call->conn->security_size; @@ -461,6 +461,12 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, success: ret = copied; + if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) { + read_lock_bh(&call->state_lock); + if (call->error < 0) + ret = call->error; + read_unlock_bh(&call->state_lock); + } out: call->tx_pending = skb; _leave(" = %d", ret); @@ -479,6 +485,27 @@ maybe_error: efault: ret = -EFAULT; goto out; + +wait_for_space: + ret = -EAGAIN; + if (msg->msg_flags & MSG_DONTWAIT) + goto maybe_error; + mutex_unlock(&call->user_mutex); + *_dropped_lock = true; + ret = rxrpc_wait_for_tx_window(rx, call, &timeo, + msg->msg_flags & MSG_WAITALL); + if (ret < 0) + goto maybe_error; + if (call->interruptibility == RXRPC_INTERRUPTIBLE) { + if (mutex_lock_interruptible(&call->user_mutex) < 0) { + ret = sock_intr_errno(timeo); + goto maybe_error; + } + } else { + mutex_lock(&call->user_mutex); + } + *_dropped_lock = false; + goto reload; } /* @@ -640,6 +667,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) enum rxrpc_call_state state; struct rxrpc_call *call; unsigned long now, j; + bool dropped_lock = false; int ret; struct rxrpc_send_params p = { @@ -748,21 +776,13 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) ret = rxrpc_send_abort_packet(call); } else if (p.command != RXRPC_CMD_SEND_DATA) { ret = -EINVAL; - } else if (rxrpc_is_client_call(call) && - state != RXRPC_CALL_CLIENT_SEND_REQUEST) { - /* request phase complete for this client call */ - ret = -EPROTO; - } else if (rxrpc_is_service_call(call) && - state != RXRPC_CALL_SERVER_ACK_REQUEST && - state != RXRPC_CALL_SERVER_SEND_REPLY) { - /* Reply phase not begun or not complete for service call. */ - ret = -EPROTO; } else { - ret = rxrpc_send_data(rx, call, msg, len, NULL); + ret = rxrpc_send_data(rx, call, msg, len, NULL, &dropped_lock); } out_put_unlock: - mutex_unlock(&call->user_mutex); + if (!dropped_lock) + mutex_unlock(&call->user_mutex); error_put: rxrpc_put_call(call, rxrpc_call_put); _leave(" = %d", ret); @@ -790,6 +810,7 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, struct msghdr *msg, size_t len, rxrpc_notify_end_tx_t notify_end_tx) { + bool dropped_lock = false; int ret; _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]); @@ -807,7 +828,7 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, case RXRPC_CALL_SERVER_ACK_REQUEST: case RXRPC_CALL_SERVER_SEND_REPLY: ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len, - notify_end_tx); + notify_end_tx, &dropped_lock); break; case RXRPC_CALL_COMPLETE: read_lock_bh(&call->state_lock); @@ -821,7 +842,8 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call, break; } - mutex_unlock(&call->user_mutex); + if (!dropped_lock) + mutex_unlock(&call->user_mutex); _leave(" = %d", ret); return ret; } diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c index 0348d2bf6f7d..580a5acffee7 100644 --- a/net/rxrpc/skbuff.c +++ b/net/rxrpc/skbuff.c @@ -71,7 +71,6 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) const void *here = __builtin_return_address(0); if (skb) { int n; - CHECK_SLAB_OKAY(&skb->users); n = atomic_dec_return(select_skb_count(skb)); trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, rxrpc_skb(skb)->rx_flags, here); diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c index 540351d6a5f4..555e0910786b 100644 --- a/net/rxrpc/sysctl.c +++ b/net/rxrpc/sysctl.c @@ -12,7 +12,7 @@ static struct ctl_table_header *rxrpc_sysctl_reg_table; static const unsigned int four = 4; -static const unsigned int thirtytwo = 32; +static const unsigned int max_backlog = RXRPC_BACKLOG_MAX - 1; static const unsigned int n_65535 = 65535; static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1; static const unsigned long one_jiffy = 1; @@ -89,7 +89,7 @@ static struct ctl_table rxrpc_sysctl_table[] = { .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = (void *)&four, - .extra2 = (void *)&thirtytwo, + .extra2 = (void *)&max_backlog, }, { .procname = "rx_window_size", diff --git a/net/sched/Kconfig b/net/sched/Kconfig index d762e89ab74f..bc4e5da76fa6 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -976,7 +976,7 @@ config NET_ACT_TUNNEL_KEY config NET_ACT_CT tristate "connection tracking tc action" - depends on NET_CLS_ACT && NF_CONNTRACK && NF_NAT && NF_FLOW_TABLE + depends on NET_CLS_ACT && NF_CONNTRACK && (!NF_NAT || NF_NAT) && NF_FLOW_TABLE help Say Y here to allow sending the packets to conntrack module. diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 7b29aa1a3ce9..4ab9c2a6f650 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -302,7 +302,8 @@ static int tcf_idr_release_unsafe(struct tc_action *p) } static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, - const struct tc_action_ops *ops) + const struct tc_action_ops *ops, + struct netlink_ext_ack *extack) { struct nlattr *nest; int n_i = 0; @@ -318,20 +319,25 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, if (nla_put_string(skb, TCA_KIND, ops->kind)) goto nla_put_failure; + ret = 0; mutex_lock(&idrinfo->lock); idr_for_each_entry_ul(idr, p, tmp, id) { if (IS_ERR(p)) continue; ret = tcf_idr_release_unsafe(p); - if (ret == ACT_P_DELETED) { + if (ret == ACT_P_DELETED) module_put(ops->owner); - n_i++; - } else if (ret < 0) { - mutex_unlock(&idrinfo->lock); - goto nla_put_failure; - } + else if (ret < 0) + break; + n_i++; } mutex_unlock(&idrinfo->lock); + if (ret < 0) { + if (n_i) + NL_SET_ERR_MSG(extack, "Unable to flush all TC actions"); + else + goto nla_put_failure; + } ret = nla_put_u32(skb, TCA_FCNT, n_i); if (ret) @@ -352,7 +358,7 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, struct tcf_idrinfo *idrinfo = tn->idrinfo; if (type == RTM_DELACTION) { - return tcf_del_walker(idrinfo, skb, ops); + return tcf_del_walker(idrinfo, skb, ops, extack); } else if (type == RTM_GETACTION) { return tcf_dump_walker(idrinfo, skb, cb); } else { diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index e19885d7fe2c..31d268eedf3f 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -62,7 +62,7 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a, c = nf_ct_get(skb, &ctinfo); if (c) { - skb->mark = c->mark; + skb->mark = READ_ONCE(c->mark); /* using overlimits stats to count how many packets marked */ ca->tcf_qstats.overlimits++; goto out; @@ -82,7 +82,7 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a, c = nf_ct_tuplehash_to_ctrack(thash); /* using overlimits stats to count how many packets marked */ ca->tcf_qstats.overlimits++; - skb->mark = c->mark; + skb->mark = READ_ONCE(c->mark); nf_ct_put(c); out: diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 825b3e9b55f7..2d41d866de3e 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -177,7 +177,7 @@ static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct, entry = tcf_ct_flow_table_flow_action_get_next(action); entry->id = FLOW_ACTION_CT_METADATA; #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) - entry->ct_metadata.mark = ct->mark; + entry->ct_metadata.mark = READ_ONCE(ct->mark); #endif ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED : IP_CT_ESTABLISHED_REPLY; @@ -843,9 +843,9 @@ static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask) if (!mask) return; - new_mark = mark | (ct->mark & ~(mask)); - if (ct->mark != new_mark) { - ct->mark = new_mark; + new_mark = mark | (READ_ONCE(ct->mark) & ~(mask)); + if (READ_ONCE(ct->mark) != new_mark) { + WRITE_ONCE(ct->mark, new_mark); if (nf_ct_is_confirmed(ct)) nf_conntrack_event_cache(IPCT_MARK, ct); } @@ -1293,7 +1293,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla, err = tcf_ct_flow_table_get(params); if (err) - goto cleanup; + goto cleanup_params; spin_lock_bh(&c->tcf_lock); goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); @@ -1308,6 +1308,9 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla, return res; +cleanup_params: + if (params->tmpl) + nf_ct_put(params->tmpl); cleanup: if (goto_ch) tcf_chain_put_by_act(goto_ch); diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c index b20c8ce59905..06c74f22ab98 100644 --- a/net/sched/act_ctinfo.c +++ b/net/sched/act_ctinfo.c @@ -33,7 +33,7 @@ static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca, { u8 dscp, newdscp; - newdscp = (((ct->mark & cp->dscpmask) >> cp->dscpmaskshift) << 2) & + newdscp = (((READ_ONCE(ct->mark) & cp->dscpmask) >> cp->dscpmaskshift) << 2) & ~INET_ECN_MASK; switch (proto) { @@ -73,7 +73,7 @@ static void tcf_ctinfo_cpmark_set(struct nf_conn *ct, struct tcf_ctinfo *ca, struct sk_buff *skb) { ca->stats_cpmark_set++; - skb->mark = ct->mark & cp->cpmarkmask; + skb->mark = READ_ONCE(ct->mark) & cp->cpmarkmask; } static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a, @@ -131,7 +131,7 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a, } if (cp->mode & CTINFO_MODE_DSCP) - if (!cp->dscpstatemask || (ct->mark & cp->dscpstatemask)) + if (!cp->dscpstatemask || (READ_ONCE(ct->mark) & cp->dscpstatemask)) tcf_ctinfo_dscp_set(ct, ca, cp, skb, wlen, proto); if (cp->mode & CTINFO_MODE_CPMARK) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index b45304446e13..0d5463ddfd62 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -149,7 +149,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, struct nlattr *pattr; struct tcf_pedit *p; int ret = 0, err; - int ksize; + int i, ksize; u32 index; if (!nla) { @@ -228,6 +228,22 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, p->tcfp_nkeys = parm->nkeys; } memcpy(p->tcfp_keys, parm->keys, ksize); + p->tcfp_off_max_hint = 0; + for (i = 0; i < p->tcfp_nkeys; ++i) { + u32 cur = p->tcfp_keys[i].off; + + /* sanitize the shift value for any later use */ + p->tcfp_keys[i].shift = min_t(size_t, BITS_PER_TYPE(int) - 1, + p->tcfp_keys[i].shift); + + /* The AT option can read a single byte, we can bound the actual + * value with uchar max. + */ + cur += (0xff & p->tcfp_keys[i].offmask) >> p->tcfp_keys[i].shift; + + /* Each key touches 4 bytes starting from the computed offset */ + p->tcfp_off_max_hint = max(p->tcfp_off_max_hint, cur + 4); + } p->tcfp_flags = parm->flags; goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); @@ -308,13 +324,18 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_pedit *p = to_pedit(a); + u32 max_offset; int i; - if (skb_unclone(skb, GFP_ATOMIC)) - return p->tcf_action; - spin_lock(&p->tcf_lock); + max_offset = (skb_transport_header_was_set(skb) ? + skb_transport_offset(skb) : + skb_network_offset(skb)) + + p->tcfp_off_max_hint; + if (skb_ensure_writable(skb, min(skb->len, max_offset))) + goto unlock; + tcf_lastuse_update(&p->tcf_tm); if (p->tcfp_nkeys > 0) { @@ -403,6 +424,7 @@ bad: p->tcf_qstats.overlimits++; done: bstats_update(&p->tcf_bstats, skb); +unlock: spin_unlock(&p->tcf_lock); return p->tcf_action; } diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 8d8452b1cdd4..380733588959 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -213,6 +213,20 @@ release_idr: return err; } +static bool tcf_police_mtu_check(struct sk_buff *skb, u32 limit) +{ + u32 len; + + if (skb_is_gso(skb)) + return skb_gso_validate_mac_len(skb, limit); + + len = qdisc_pkt_len(skb); + if (skb_at_tc_ingress(skb)) + len += skb->mac_len; + + return len <= limit; +} + static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { @@ -235,7 +249,7 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a, goto inc_overlimits; } - if (qdisc_pkt_len(skb) <= p->tcfp_mtu) { + if (tcf_police_mtu_check(skb, p->tcfp_mtu)) { if (!p->rate_present) { ret = p->tcfp_result; goto end; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 9a789a057a74..c410a736301b 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1656,10 +1656,10 @@ static int tcf_chain_tp_insert(struct tcf_chain *chain, if (chain->flushing) return -EAGAIN; + RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); if (*chain_info->pprev == chain->filter_chain) tcf_chain0_head_change(chain, tp); tcf_proto_get(tp); - RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); rcu_assign_pointer(*chain_info->pprev, tp); return 0; @@ -2124,6 +2124,7 @@ replay: } if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { + tfilter_put(tp, fh); NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); err = -EINVAL; goto errout; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 8ff6945b9f8f..35ee6d8226e6 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -998,6 +998,7 @@ static int fl_set_key_mpls(struct nlattr **tb, static void fl_set_key_vlan(struct nlattr **tb, __be16 ethertype, int vlan_id_key, int vlan_prio_key, + int vlan_next_eth_type_key, struct flow_dissector_key_vlan *key_val, struct flow_dissector_key_vlan *key_mask) { @@ -1016,6 +1017,11 @@ static void fl_set_key_vlan(struct nlattr **tb, } key_val->vlan_tpid = ethertype; key_mask->vlan_tpid = cpu_to_be16(~0); + if (tb[vlan_next_eth_type_key]) { + key_val->vlan_eth_type = + nla_get_be16(tb[vlan_next_eth_type_key]); + key_mask->vlan_eth_type = cpu_to_be16(~0); + } } static void fl_set_key_flag(u32 flower_key, u32 flower_mask, @@ -1497,8 +1503,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb, if (eth_type_vlan(ethertype)) { fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID, - TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, - &mask->vlan); + TCA_FLOWER_KEY_VLAN_PRIO, + TCA_FLOWER_KEY_VLAN_ETH_TYPE, + &key->vlan, &mask->vlan); if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) { ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]); @@ -1506,6 +1513,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb, fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_CVLAN_ID, TCA_FLOWER_KEY_CVLAN_PRIO, + TCA_FLOWER_KEY_CVLAN_ETH_TYPE, &key->cvlan, &mask->cvlan); fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, @@ -2861,13 +2869,13 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net, goto nla_put_failure; if (mask->basic.n_proto) { - if (mask->cvlan.vlan_tpid) { + if (mask->cvlan.vlan_eth_type) { if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, key->basic.n_proto)) goto nla_put_failure; - } else if (mask->vlan.vlan_tpid) { + } else if (mask->vlan.vlan_eth_type) { if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, - key->basic.n_proto)) + key->vlan.vlan_eth_type)) goto nla_put_failure; } } diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 5efa3e7ace15..b775e681cb56 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -424,6 +424,11 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp, return -EINVAL; } + if (!nhandle) { + NL_SET_ERR_MSG(extack, "Replacing with handle of 0 is invalid"); + return -EINVAL; + } + h1 = to_hash(nhandle); b = rtnl_dereference(head->table[h1]); if (!b) { @@ -477,6 +482,11 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, int err; bool new = true; + if (!handle) { + NL_SET_ERR_MSG(extack, "Creating with handle of 0 is invalid"); + return -EINVAL; + } + if (opt == NULL) return handle ? -EINVAL : 0; @@ -526,7 +536,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, rcu_assign_pointer(f->next, f1); rcu_assign_pointer(*fp, f); - if (fold && fold->handle && f->handle != fold->handle) { + if (fold) { th = to_hash(fold->handle); h = from_hash(fold->handle >> 16); b = rtnl_dereference(head->table[th]); diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index b61db335c49d..da042bc8b239 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -814,10 +814,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp, new->flags = n->flags; RCU_INIT_POINTER(new->ht_down, ht); - /* bump reference count as long as we hold pointer to structure */ - if (ht) - ht->refcnt++; - #ifdef CONFIG_CLS_U32_PERF /* Statistics may be incremented by readers during update * so we must keep them in tact. When the node is later destroyed @@ -839,6 +835,10 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp, return NULL; } + /* bump reference count as long as we hold pointer to structure */ + if (ht) + ht->refcnt++; + return new; } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 6e18aa417782..d8ffe4114385 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1081,12 +1081,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, skip: if (!ingress) { - notify_and_destroy(net, skb, n, classid, - rtnl_dereference(dev->qdisc), new); + old = rtnl_dereference(dev->qdisc); if (new && !new->ops->attach) qdisc_refcount_inc(new); rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc); + notify_and_destroy(net, skb, n, classid, old, new); + if (new && new->ops->attach) new->ops->attach(new); } else { diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 1c281cc81f57..794c7377cd7e 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -575,7 +575,6 @@ static void atm_tc_reset(struct Qdisc *sch) pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p); list_for_each_entry(flow, &p->flows, list) qdisc_reset(flow->q); - sch->q.qlen = 0; } static void atm_tc_destroy(struct Qdisc *sch) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index c580139fcede..5dc7a3c310c9 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -2224,8 +2224,12 @@ retry: static void cake_reset(struct Qdisc *sch) { + struct cake_sched_data *q = qdisc_priv(sch); u32 c; + if (!q->tins) + return; + for (c = 0; c < CAKE_MAX_TINS; c++) cake_clear_tin(sch, c); } diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 4a78fcf5d4f9..9a3dff02b7a2 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1053,7 +1053,6 @@ cbq_reset(struct Qdisc *sch) cl->cpriority = cl->priority; } } - sch->q.qlen = 0; } diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 2adbd945bf15..25d2daaa8122 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -315,8 +315,6 @@ static void choke_reset(struct Qdisc *sch) rtnl_qdisc_drop(skb, sch); } - sch->q.qlen = 0; - sch->qstats.backlog = 0; if (q->tab) memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); q->head = q->tail = 0; diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index dde564670ad8..08424aac6da8 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -443,8 +443,6 @@ static void drr_reset_qdisc(struct Qdisc *sch) qdisc_reset(cl->qdisc); } } - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void drr_destroy_qdisc(struct Qdisc *sch) diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 76ed1a05ded2..a75bc7f80cd7 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -408,8 +408,6 @@ static void dsmark_reset(struct Qdisc *sch) pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p); if (p->q) qdisc_reset(p->q); - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void dsmark_destroy(struct Qdisc *sch) diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c index c48f91075b5c..d96103b0e2bf 100644 --- a/net/sched/sch_etf.c +++ b/net/sched/sch_etf.c @@ -445,9 +445,6 @@ static void etf_reset(struct Qdisc *sch) timesortedlist_clear(sch); __qdisc_reset_queue(&sch->q); - sch->qstats.backlog = 0; - sch->q.qlen = 0; - q->last = 0; } diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index 9c224872ef03..05817c55692f 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -722,8 +722,6 @@ static void ets_qdisc_reset(struct Qdisc *sch) } for (band = 0; band < q->nbands; band++) qdisc_reset(q->classes[band].qdisc); - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void ets_qdisc_destroy(struct Qdisc *sch) diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 99e8db262198..01d6eea5b0ce 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -347,8 +347,6 @@ static void fq_codel_reset(struct Qdisc *sch) codel_vars_init(&flow->cvars); } memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); - sch->q.qlen = 0; - sch->qstats.backlog = 0; q->memory_usage = 0; } diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index c70802785518..cf04f70e96bf 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -521,9 +521,6 @@ static void fq_pie_reset(struct Qdisc *sch) INIT_LIST_HEAD(&flow->flowchain); pie_vars_init(&flow->vars); } - - sch->q.qlen = 0; - sch->qstats.backlog = 0; } static void fq_pie_destroy(struct Qdisc *sch) diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 99f7a17fd35a..0f37da3e23bf 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -403,7 +403,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets) void __qdisc_run(struct Qdisc *q) { - int quota = dev_tx_weight; + int quota = READ_ONCE(dev_tx_weight); int packets; while (qdisc_restart(q, &packets)) { @@ -1057,6 +1057,21 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, } EXPORT_SYMBOL(dev_graft_qdisc); +static void shutdown_scheduler_queue(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_qdisc_default) +{ + struct Qdisc *qdisc = dev_queue->qdisc_sleeping; + struct Qdisc *qdisc_default = _qdisc_default; + + if (qdisc) { + rcu_assign_pointer(dev_queue->qdisc, qdisc_default); + dev_queue->qdisc_sleeping = qdisc_default; + + qdisc_put(qdisc); + } +} + static void attach_one_default_qdisc(struct net_device *dev, struct netdev_queue *dev_queue, void *_unused) @@ -1104,6 +1119,7 @@ static void attach_default_qdiscs(struct net_device *dev) if (qdisc == &noop_qdisc) { netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n", default_qdisc_ops->id, noqueue_qdisc_ops.id); + netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); dev->priv_flags |= IFF_NO_QUEUE; netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); qdisc = txq->qdisc_sleeping; @@ -1348,21 +1364,6 @@ void dev_init_scheduler(struct net_device *dev) timer_setup(&dev->watchdog_timer, dev_watchdog, 0); } -static void shutdown_scheduler_queue(struct net_device *dev, - struct netdev_queue *dev_queue, - void *_qdisc_default) -{ - struct Qdisc *qdisc = dev_queue->qdisc_sleeping; - struct Qdisc *qdisc_default = _qdisc_default; - - if (qdisc) { - rcu_assign_pointer(dev_queue->qdisc, qdisc_default); - dev_queue->qdisc_sleeping = qdisc_default; - - qdisc_put(qdisc); - } -} - void dev_shutdown(struct net_device *dev) { netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index d1902fca9844..cdc43a06aa9b 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1484,8 +1484,6 @@ hfsc_reset_qdisc(struct Qdisc *sch) } q->eligible = RB_ROOT; qdisc_watchdog_cancel(&q->watchdog); - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index cd70dbcbd72f..c3ba018fd083 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -966,8 +966,6 @@ static void htb_reset(struct Qdisc *sch) } qdisc_watchdog_cancel(&q->watchdog); __qdisc_reset_queue(&q->direct_queue); - sch->q.qlen = 0; - sch->qstats.backlog = 0; memset(q->hlevel, 0, sizeof(q->hlevel)); memset(q->row_mask, 0, sizeof(q->row_mask)); } diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 5c27b4270b90..1c6dbcfa89b8 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -152,7 +152,6 @@ multiq_reset(struct Qdisc *sch) for (band = 0; band < q->bands; band++) qdisc_reset(q->queues[band]); - sch->q.qlen = 0; q->curband = 0; } diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 0c345e43a09a..adc5407fd5d5 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -1146,9 +1146,9 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) struct tc_netem_rate rate; struct tc_netem_slot slot; - qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency), + qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency), UINT_MAX); - qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter), + qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter), UINT_MAX); qopt.limit = q->limit; qopt.loss = q->loss; diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 3eabb871a1d5..1c805fe05b82 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -135,8 +135,6 @@ prio_reset(struct Qdisc *sch) for (prio = 0; prio < q->bands; prio++) qdisc_reset(q->queues[prio]); - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt) diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index af8c63a9ec18..1d1d81aeb389 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1458,8 +1458,6 @@ static void qfq_reset_qdisc(struct Qdisc *sch) qdisc_reset(cl->qdisc); } } - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void qfq_destroy_qdisc(struct Qdisc *sch) diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 40adf1f07a82..935d90874b1b 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -72,6 +72,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, { struct red_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; + unsigned int len; int ret; q->vars.qavg = red_calc_qavg(&q->parms, @@ -126,9 +127,10 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, break; } + len = qdisc_pkt_len(skb); ret = qdisc_enqueue(skb, child, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { - qdisc_qstats_backlog_inc(sch, skb); + sch->qstats.backlog += len; sch->q.qlen++; } else if (net_xmit_drop_count(ret)) { q->stats.pdrop++; @@ -176,8 +178,6 @@ static void red_reset(struct Qdisc *sch) struct red_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); - sch->qstats.backlog = 0; - sch->q.qlen = 0; red_restart(&q->vars); } diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index da047a37a3bf..9ded56228ea1 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -135,15 +135,15 @@ static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) } } -static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) +static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q) { u32 sfbhash; - sfbhash = sfb_hash(skb, 0); + sfbhash = cb->hashes[0]; if (sfbhash) increment_one_qlen(sfbhash, 0, q); - sfbhash = sfb_hash(skb, 1); + sfbhash = cb->hashes[1]; if (sfbhash) increment_one_qlen(sfbhash, 1, q); } @@ -281,8 +281,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, { struct sfb_sched_data *q = qdisc_priv(sch); + unsigned int len = qdisc_pkt_len(skb); struct Qdisc *child = q->qdisc; struct tcf_proto *fl; + struct sfb_skb_cb cb; int i; u32 p_min = ~0; u32 minqlen = ~0; @@ -399,11 +401,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, } enqueue: + memcpy(&cb, sfb_skb_cb(skb), sizeof(cb)); ret = qdisc_enqueue(skb, child, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { - qdisc_qstats_backlog_inc(sch, skb); + sch->qstats.backlog += len; sch->q.qlen++; - increment_qlen(skb, q); + increment_qlen(&cb, q); } else if (net_xmit_drop_count(ret)) { q->stats.childdrop++; qdisc_qstats_drop(sch); @@ -452,9 +455,8 @@ static void sfb_reset(struct Qdisc *sch) { struct sfb_sched_data *q = qdisc_priv(sch); - qdisc_reset(q->qdisc); - sch->qstats.backlog = 0; - sch->q.qlen = 0; + if (likely(q->qdisc)) + qdisc_reset(q->qdisc); q->slot = 0; q->double_buffering = false; sfb_zero_all_buckets(q); diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c index 7a5e4c454715..df72fb83d9c7 100644 --- a/net/sched/sch_skbprio.c +++ b/net/sched/sch_skbprio.c @@ -213,9 +213,6 @@ static void skbprio_reset(struct Qdisc *sch) struct skbprio_sched_data *q = qdisc_priv(sch); int prio; - sch->qstats.backlog = 0; - sch->q.qlen = 0; - for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++) __skb_queue_purge(&q->qdiscs[prio]); diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 806babdd838d..7f33b31c7b8b 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -65,6 +65,7 @@ struct taprio_sched { u32 flags; enum tk_offsets tk_offset; int clockid; + bool offloaded; atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+ * speeds it's sub-nanoseconds per byte */ @@ -427,7 +428,8 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (unlikely(!child)) return qdisc_drop(skb, sch, to_free); - if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) { + /* sk_flags are only safe to use on full sockets. */ + if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) { if (!is_valid_interval(skb, sch)) return qdisc_drop(skb, sch, to_free); } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { @@ -1266,6 +1268,8 @@ static int taprio_enable_offload(struct net_device *dev, goto done; } + q->offloaded = true; + done: taprio_offload_free(offload); @@ -1280,12 +1284,9 @@ static int taprio_disable_offload(struct net_device *dev, struct tc_taprio_qopt_offload *offload; int err; - if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) + if (!q->offloaded) return 0; - if (!ops->ndo_setup_tc) - return -EOPNOTSUPP; - offload = taprio_offload_alloc(0); if (!offload) { NL_SET_ERR_MSG(extack, @@ -1301,6 +1302,8 @@ static int taprio_disable_offload(struct net_device *dev, goto out; } + q->offloaded = false; + out: taprio_offload_free(offload); @@ -1623,8 +1626,6 @@ static void taprio_reset(struct Qdisc *sch) if (q->qdiscs[i]) qdisc_reset(q->qdiscs[i]); } - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void taprio_destroy(struct Qdisc *sch) @@ -1903,12 +1904,14 @@ start_error: static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) { - struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); + struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + unsigned int ntx = cl - 1; - if (!dev_queue) + if (ntx >= dev->num_tx_queues) return NULL; - return dev_queue->qdisc_sleeping; + return q->qdiscs[ntx]; } static unsigned long taprio_find(struct Qdisc *sch, u32 classid) diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 78e79029dc63..7461e5c67d50 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -316,8 +316,6 @@ static void tbf_reset(struct Qdisc *sch) struct tbf_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); - sch->qstats.backlog = 0; - sch->q.qlen = 0; q->t_c = ktime_get_ns(); q->tokens = q->buffer; q->ptokens = q->mtu; @@ -342,6 +340,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, struct nlattr *tb[TCA_TBF_MAX + 1]; struct tc_tbf_qopt *qopt; struct Qdisc *child = NULL; + struct Qdisc *old = NULL; struct psched_ratecfg rate; struct psched_ratecfg peak; u64 max_size; @@ -433,7 +432,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, sch_tree_lock(sch); if (child) { qdisc_tree_flush_backlog(q->qdisc); - qdisc_put(q->qdisc); + old = q->qdisc; q->qdisc = child; } q->limit = qopt->limit; @@ -453,6 +452,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg)); sch_tree_unlock(sch); + qdisc_put(old); err = 0; tbf_offload_change(sch); diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 6af6b95bdb67..79aaab51cbf5 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -124,7 +124,6 @@ teql_reset(struct Qdisc *sch) struct teql_sched_data *dat = qdisc_priv(sch); skb_queue_purge(&dat->q); - sch->q.qlen = 0; } static void diff --git a/net/sctp/associola.c b/net/sctp/associola.c index fdb69d46276d..2d4ec6187755 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -226,9 +226,8 @@ static struct sctp_association *sctp_association_init( if (!sctp_ulpq_init(&asoc->ulpq, asoc)) goto fail_init; - if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, - 0, gfp)) - goto fail_init; + if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp)) + goto stream_free; /* Initialize default path MTU. */ asoc->pathmtu = sp->pathmtu; diff --git a/net/sctp/auth.c b/net/sctp/auth.c index db6b7373d16c..34964145514e 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -863,12 +863,17 @@ int sctp_auth_set_key(struct sctp_endpoint *ep, } list_del_init(&shkey->key_list); - sctp_auth_shkey_release(shkey); list_add(&cur_key->key_list, sh_keys); - if (asoc && asoc->active_key_id == auth_key->sca_keynumber) - sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL); + if (asoc && asoc->active_key_id == auth_key->sca_keynumber && + sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL)) { + list_del_init(&cur_key->key_list); + sctp_auth_shkey_release(cur_key); + list_add(&shkey->key_list, sh_keys); + return -ENOMEM; + } + sctp_auth_shkey_release(shkey); return 0; } @@ -902,8 +907,13 @@ int sctp_auth_set_active_key(struct sctp_endpoint *ep, return -EINVAL; if (asoc) { + __u16 active_key_id = asoc->active_key_id; + asoc->active_key_id = key_id; - sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL); + if (sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL)) { + asoc->active_key_id = active_key_id; + return -ENOMEM; + } } else ep->active_key_id = key_id; diff --git a/net/sctp/input.c b/net/sctp/input.c index 34494a0b28bd..8f3aab6a4458 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -92,6 +92,7 @@ int sctp_rcv(struct sk_buff *skb) struct sctp_chunk *chunk; union sctp_addr src; union sctp_addr dest; + int bound_dev_if; int family; struct sctp_af *af; struct net *net = dev_net(skb->dev); @@ -169,7 +170,8 @@ int sctp_rcv(struct sk_buff *skb) * If a frame arrives on an interface and the receiving socket is * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB */ - if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { + bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); + if (bound_dev_if && (bound_dev_if != af->skb_iif(skb))) { if (transport) { sctp_transport_put(transport); asoc = NULL; diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 3fd06a27105d..83a89dcf75ed 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -384,6 +384,7 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc, { struct sctp_outq *q = &asoc->outqueue; struct sctp_chunk *chk, *temp; + struct sctp_stream_out *sout; q->sched->unsched_all(&asoc->stream); @@ -398,12 +399,14 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc, sctp_sched_dequeue_common(q, chk); asoc->sent_cnt_removable--; asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; - if (chk->sinfo.sinfo_stream < asoc->stream.outcnt) { - struct sctp_stream_out *streamout = - SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream); - streamout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; - } + sout = SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream); + sout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; + + /* clear out_curr if all frag chunks are pruned */ + if (asoc->stream.out_curr == sout && + list_is_last(&chk->frag_list, &chk->msg->chunks)) + asoc->stream.out_curr = NULL; msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk); sctp_chunk_free(chk); diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 940f1e257a90..6e4ca837e91d 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -358,7 +358,7 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp) if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) && ret != RTN_LOCAL && !sp->inet.freebind && - !net->ipv4.sysctl_ip_nonlocal_bind) + !READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind)) return 0; if (ipv6_only_sock(sctp_opt2sk(sp))) diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 0948f14ce221..d4e5969771f0 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -458,6 +458,10 @@ void sctp_generate_reconf_event(struct timer_list *t) goto out_unlock; } + /* This happens when the response arrives after the timer is triggered. */ + if (!asoc->strreset_chunk) + goto out_unlock; + error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF), asoc->state, asoc->ep, asoc, diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 89ce8764422b..2fca282cc375 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5523,7 +5523,7 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) * Set the daddr and initialize id to something more random and also * copy over any ip options. */ - sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); + sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sock->sk); sp->pf->copy_ip_options(sk, sock->sk); /* Populate the fields of the newsk from the oldsk and migrate the diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 6dc95dcc0ff4..ee6514af830f 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -52,6 +52,19 @@ static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt) } } +static void sctp_stream_free_ext(struct sctp_stream *stream, __u16 sid) +{ + struct sctp_sched_ops *sched; + + if (!SCTP_SO(stream, sid)->ext) + return; + + sched = sctp_sched_ops_from_stream(stream); + sched->free_sid(stream, sid); + kfree(SCTP_SO(stream, sid)->ext); + SCTP_SO(stream, sid)->ext = NULL; +} + /* Migrates chunks from stream queues to new stream queues if needed, * but not across associations. Also, removes those chunks to streams * higher than the new max. @@ -70,16 +83,14 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream, * sctp_stream_update will swap ->out pointers. */ for (i = 0; i < outcnt; i++) { - kfree(SCTP_SO(new, i)->ext); + sctp_stream_free_ext(new, i); SCTP_SO(new, i)->ext = SCTP_SO(stream, i)->ext; SCTP_SO(stream, i)->ext = NULL; } } - for (i = outcnt; i < stream->outcnt; i++) { - kfree(SCTP_SO(stream, i)->ext); - SCTP_SO(stream, i)->ext = NULL; - } + for (i = outcnt; i < stream->outcnt; i++) + sctp_stream_free_ext(stream, i); } static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, @@ -137,7 +148,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, ret = sctp_stream_alloc_out(stream, outcnt, gfp); if (ret) - goto out_err; + return ret; for (i = 0; i < stream->outcnt; i++) SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; @@ -145,22 +156,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, handle_in: sctp_stream_interleave_init(stream); if (!incnt) - goto out; + return 0; - ret = sctp_stream_alloc_in(stream, incnt, gfp); - if (ret) - goto in_err; - - goto out; - -in_err: - sched->free(stream); - genradix_free(&stream->in); -out_err: - genradix_free(&stream->out); - stream->outcnt = 0; -out: - return ret; + return sctp_stream_alloc_in(stream, incnt, gfp); } int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid) @@ -187,9 +185,9 @@ void sctp_stream_free(struct sctp_stream *stream) struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); int i; - sched->free(stream); + sched->unsched_all(stream); for (i = 0; i < stream->outcnt; i++) - kfree(SCTP_SO(stream, i)->ext); + sctp_stream_free_ext(stream, i); genradix_free(&stream->out); genradix_free(&stream->in); } diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c index 99e5f69fbb74..33c2630c2496 100644 --- a/net/sctp/stream_sched.c +++ b/net/sctp/stream_sched.c @@ -46,6 +46,10 @@ static int sctp_sched_fcfs_init_sid(struct sctp_stream *stream, __u16 sid, return 0; } +static void sctp_sched_fcfs_free_sid(struct sctp_stream *stream, __u16 sid) +{ +} + static void sctp_sched_fcfs_free(struct sctp_stream *stream) { } @@ -96,6 +100,7 @@ static struct sctp_sched_ops sctp_sched_fcfs = { .get = sctp_sched_fcfs_get, .init = sctp_sched_fcfs_init, .init_sid = sctp_sched_fcfs_init_sid, + .free_sid = sctp_sched_fcfs_free_sid, .free = sctp_sched_fcfs_free, .enqueue = sctp_sched_fcfs_enqueue, .dequeue = sctp_sched_fcfs_dequeue, @@ -163,7 +168,7 @@ int sctp_sched_set_sched(struct sctp_association *asoc, if (!SCTP_SO(&asoc->stream, i)->ext) continue; - ret = n->init_sid(&asoc->stream, i, GFP_KERNEL); + ret = n->init_sid(&asoc->stream, i, GFP_ATOMIC); if (ret) goto err; } diff --git a/net/sctp/stream_sched_prio.c b/net/sctp/stream_sched_prio.c index 80b5a2c4cbc7..4fc9f2923ed1 100644 --- a/net/sctp/stream_sched_prio.c +++ b/net/sctp/stream_sched_prio.c @@ -204,6 +204,24 @@ static int sctp_sched_prio_init_sid(struct sctp_stream *stream, __u16 sid, return sctp_sched_prio_set(stream, sid, 0, gfp); } +static void sctp_sched_prio_free_sid(struct sctp_stream *stream, __u16 sid) +{ + struct sctp_stream_priorities *prio = SCTP_SO(stream, sid)->ext->prio_head; + int i; + + if (!prio) + return; + + SCTP_SO(stream, sid)->ext->prio_head = NULL; + for (i = 0; i < stream->outcnt; i++) { + if (SCTP_SO(stream, i)->ext && + SCTP_SO(stream, i)->ext->prio_head == prio) + return; + } + + kfree(prio); +} + static void sctp_sched_prio_free(struct sctp_stream *stream) { struct sctp_stream_priorities *prio, *n; @@ -323,6 +341,7 @@ static struct sctp_sched_ops sctp_sched_prio = { .get = sctp_sched_prio_get, .init = sctp_sched_prio_init, .init_sid = sctp_sched_prio_init_sid, + .free_sid = sctp_sched_prio_free_sid, .free = sctp_sched_prio_free, .enqueue = sctp_sched_prio_enqueue, .dequeue = sctp_sched_prio_dequeue, diff --git a/net/sctp/stream_sched_rr.c b/net/sctp/stream_sched_rr.c index ff425aed62c7..cc444fe0d67c 100644 --- a/net/sctp/stream_sched_rr.c +++ b/net/sctp/stream_sched_rr.c @@ -90,6 +90,10 @@ static int sctp_sched_rr_init_sid(struct sctp_stream *stream, __u16 sid, return 0; } +static void sctp_sched_rr_free_sid(struct sctp_stream *stream, __u16 sid) +{ +} + static void sctp_sched_rr_free(struct sctp_stream *stream) { sctp_sched_rr_unsched_all(stream); @@ -177,6 +181,7 @@ static struct sctp_sched_ops sctp_sched_rr = { .get = sctp_sched_rr_get, .init = sctp_sched_rr_init, .init_sid = sctp_sched_rr_init_sid, + .free_sid = sctp_sched_rr_free_sid, .free = sctp_sched_rr_free, .enqueue = sctp_sched_rr_enqueue, .dequeue = sctp_sched_rr_dequeue, diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 4f16d406ad8e..41cbc7c89c9d 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1057,6 +1057,8 @@ static void smc_connect_work(struct work_struct *work) smc->sk.sk_state = SMC_CLOSED; if (rc == -EPIPE || rc == -EAGAIN) smc->sk.sk_err = EPIPE; + else if (rc == -ECONNREFUSED) + smc->sk.sk_err = ECONNREFUSED; else if (signal_pending(current)) smc->sk.sk_err = -sock_intr_errno(timeo); sock_put(&smc->sk); /* passive closing */ @@ -1116,9 +1118,9 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr, if (rc && rc != -EINPROGRESS) goto out; - sock_hold(&smc->sk); /* sock put in passive closing */ if (smc->use_fallback) goto out; + sock_hold(&smc->sk); /* sock put in passive closing */ if (flags & O_NONBLOCK) { if (queue_work(smc_hs_wq, &smc->connect_work)) smc->connect_nonblock = 1; @@ -1323,7 +1325,6 @@ static void smc_listen_out_connected(struct smc_sock *new_smc) { struct sock *newsmcsk = &new_smc->sk; - sk_refcnt_debug_inc(newsmcsk); if (newsmcsk->sk_state == SMC_INIT) newsmcsk->sk_state = SMC_ACTIVE; @@ -2144,8 +2145,10 @@ static int smc_shutdown(struct socket *sock, int how) if (smc->use_fallback) { rc = kernel_sock_shutdown(smc->clcsock, how); sk->sk_shutdown = smc->clcsock->sk->sk_shutdown; - if (sk->sk_shutdown == SHUTDOWN_MASK) + if (sk->sk_shutdown == SHUTDOWN_MASK) { sk->sk_state = SMC_CLOSED; + sock_put(sk); + } goto out; } switch (how) { diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index 0c490cdde6a4..94503f36b9a6 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -72,7 +72,7 @@ int smc_cdc_get_free_slot(struct smc_connection *conn, /* abnormal termination */ if (!rc) smc_wr_tx_put_slot(link, - (struct smc_wr_tx_pend_priv *)pend); + (struct smc_wr_tx_pend_priv *)(*pend)); rc = -EPIPE; } return rc; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index d69aac6c1fce..bf485a2017a4 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -1426,7 +1426,7 @@ static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize, */ static inline int smc_rmb_wnd_update_limit(int rmbe_size) { - return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2); + return max_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2); } /* map an rmb buf to a link */ @@ -1584,7 +1584,7 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr, static int smcr_buf_map_usable_links(struct smc_link_group *lgr, struct smc_buf_desc *buf_desc, bool is_rmb) { - int i, rc = 0; + int i, rc = 0, cnt = 0; /* protect against parallel link reconfiguration */ mutex_lock(&lgr->llc_conf_mutex); @@ -1597,9 +1597,12 @@ static int smcr_buf_map_usable_links(struct smc_link_group *lgr, rc = -ENOMEM; goto out; } + cnt++; } out: mutex_unlock(&lgr->llc_conf_mutex); + if (!rc && !cnt) + rc = -EINVAL; return rc; } diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index ee1f0fdba085..0ef15f8fba90 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -1787,7 +1787,7 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc) init_waitqueue_head(&lgr->llc_flow_waiter); init_waitqueue_head(&lgr->llc_msg_waiter); mutex_init(&lgr->llc_conf_mutex); - lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time; + lgr->llc_testlink_time = READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time); } /* called after lgr was removed from lgr_list */ diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 9007c7e3bae4..30bae60d626c 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -310,8 +310,9 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name) list_for_each_entry(ibdev, &smc_ib_devices.list, list) { if (!strncmp(ibdev->ibdev->name, ib_name, sizeof(ibdev->ibdev->name)) || - !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name, - IB_DEVICE_NAME_MAX - 1)) { + (ibdev->ibdev->dev.parent && + !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name, + IB_DEVICE_NAME_MAX - 1))) { goto out; } } diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index fcfac59f8b72..7f7e983e42b1 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -346,12 +346,12 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, } break; } + if (!timeo) + return -EAGAIN; if (signal_pending(current)) { read_done = sock_intr_errno(timeo); break; } - if (!timeo) - return -EAGAIN; } if (!smc_rx_data_available(conn)) { diff --git a/net/socket.c b/net/socket.c index 9b8213123121..938ab3a89707 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1671,7 +1671,7 @@ int __sys_listen(int fd, int backlog) sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { - somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; + somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn); if ((unsigned int)backlog > somaxconn) backlog = somaxconn; @@ -1689,30 +1689,22 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog) return __sys_listen(fd, backlog); } -int __sys_accept4_file(struct file *file, unsigned file_flags, +struct file *do_accept(struct file *file, unsigned file_flags, struct sockaddr __user *upeer_sockaddr, - int __user *upeer_addrlen, int flags, - unsigned long nofile) + int __user *upeer_addrlen, int flags) { struct socket *sock, *newsock; struct file *newfile; - int err, len, newfd; + int err, len; struct sockaddr_storage address; - if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) - return -EINVAL; - - if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) - flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; - sock = sock_from_file(file, &err); if (!sock) - goto out; + return ERR_PTR(err); - err = -ENFILE; newsock = sock_alloc(); if (!newsock) - goto out; + return ERR_PTR(-ENFILE); newsock->type = sock->type; newsock->ops = sock->ops; @@ -1723,18 +1715,9 @@ int __sys_accept4_file(struct file *file, unsigned file_flags, */ __module_get(newsock->ops->owner); - newfd = __get_unused_fd_flags(flags, nofile); - if (unlikely(newfd < 0)) { - err = newfd; - sock_release(newsock); - goto out; - } newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name); - if (IS_ERR(newfile)) { - err = PTR_ERR(newfile); - put_unused_fd(newfd); - goto out; - } + if (IS_ERR(newfile)) + return newfile; err = security_socket_accept(sock, newsock); if (err) @@ -1759,16 +1742,38 @@ int __sys_accept4_file(struct file *file, unsigned file_flags, } /* File flags are not inherited via accept() unlike another OSes. */ - - fd_install(newfd, newfile); - err = newfd; -out: - return err; + return newfile; out_fd: fput(newfile); - put_unused_fd(newfd); - goto out; + return ERR_PTR(err); +} +int __sys_accept4_file(struct file *file, unsigned file_flags, + struct sockaddr __user *upeer_sockaddr, + int __user *upeer_addrlen, int flags, + unsigned long nofile) +{ + struct file *newfile; + int newfd; + + if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) + return -EINVAL; + + if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) + flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; + + newfd = __get_unused_fd_flags(flags, nofile); + if (unlikely(newfd < 0)) + return newfd; + + newfile = do_accept(file, file_flags, upeer_sockaddr, upeer_addrlen, + flags); + if (IS_ERR(newfile)) { + put_unused_fd(newfd); + return PTR_ERR(newfile); + } + fd_install(newfd, newfile); + return newfd; } /* @@ -2182,6 +2187,17 @@ SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, * Shutdown a socket. */ +int __sys_shutdown_sock(struct socket *sock, int how) +{ + int err; + + err = security_socket_shutdown(sock, how); + if (!err) + err = sock->ops->shutdown(sock, how); + + return err; +} + int __sys_shutdown(int fd, int how) { int err, fput_needed; @@ -2189,9 +2205,7 @@ int __sys_shutdown(int fd, int how) sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { - err = security_socket_shutdown(sock, how); - if (!err) - err = sock->ops->shutdown(sock, how); + err = __sys_shutdown_sock(sock, how); fput_light(sock->file, fput_needed); } return err; @@ -2406,10 +2420,6 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, long __sys_sendmsg_sock(struct socket *sock, struct msghdr *msg, unsigned int flags) { - /* disallow ancillary data requests from this path */ - if (msg->msg_control || msg->msg_controllen) - return -EINVAL; - return ____sys_sendmsg(sock, msg, flags, NULL, 0); } @@ -2618,12 +2628,6 @@ long __sys_recvmsg_sock(struct socket *sock, struct msghdr *msg, struct user_msghdr __user *umsg, struct sockaddr __user *uaddr, unsigned int flags) { - if (msg->msg_control || msg->msg_controllen) { - /* disallow ancillary data reqs unless cmsg is plain data */ - if (!(sock->ops->flags & PROTO_CMSG_DATA_ONLY)) - return -EINVAL; - } - return ____sys_recvmsg(sock, msg, umsg, uaddr, flags, 0); } diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index a9f0d17fdb0d..1bae32c48284 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -445,7 +445,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan) * Enforce a 60 second garbage collection moratorium * Note that the cred_unused list must be time-ordered. */ - if (!time_in_range(cred->cr_expire, expired, jiffies)) + if (time_in_range(cred->cr_expire, expired, jiffies)) continue; if (!rpcauth_unhash_cred(cred)) continue; diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 22a2c235abf1..77e347a45344 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c @@ -64,6 +64,17 @@ static void xprt_free_allocation(struct rpc_rqst *req) kfree(req); } +static void xprt_bc_reinit_xdr_buf(struct xdr_buf *buf) +{ + buf->head[0].iov_len = PAGE_SIZE; + buf->tail[0].iov_len = 0; + buf->pages = NULL; + buf->page_len = 0; + buf->flags = 0; + buf->len = 0; + buf->buflen = PAGE_SIZE; +} + static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags) { struct page *page; @@ -292,6 +303,9 @@ void xprt_free_bc_rqst(struct rpc_rqst *req) */ spin_lock_bh(&xprt->bc_pa_lock); if (xprt_need_to_requeue(xprt)) { + xprt_bc_reinit_xdr_buf(&req->rq_snd_buf); + xprt_bc_reinit_xdr_buf(&req->rq_rcv_buf); + req->rq_rcv_buf.len = PAGE_SIZE; list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list); xprt->bc_alloc_count++; atomic_inc(&xprt->bc_slot_count); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 84c8a534029c..78c6648af782 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1867,7 +1867,7 @@ call_encode(struct rpc_task *task) break; case -EKEYEXPIRED: if (!task->tk_cred_retry) { - rpc_exit(task, task->tk_status); + rpc_call_rpcerror(task, task->tk_status); } else { task->tk_action = call_refresh; task->tk_cred_retry--; @@ -2175,6 +2175,7 @@ call_transmit_status(struct rpc_task *task) * socket just returned a connection error, * then hold onto the transport lock. */ + case -ENOMEM: case -ENOBUFS: rpc_delay(task, HZ>>2); fallthrough; @@ -2258,6 +2259,7 @@ call_bc_transmit_status(struct rpc_task *task) case -ENOTCONN: case -EPIPE: break; + case -ENOMEM: case -ENOBUFS: rpc_delay(task, HZ>>2); fallthrough; @@ -2340,6 +2342,11 @@ call_status(struct rpc_task *task) case -EPIPE: case -EAGAIN: break; + case -ENFILE: + case -ENOBUFS: + case -ENOMEM: + rpc_delay(task, HZ>>2); + break; case -EIO: /* shutdown or soft timeout */ goto out_exit; diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 5f854ffbab92..bb13620e6246 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -478,6 +478,7 @@ rpc_get_inode(struct super_block *sb, umode_t mode) inode->i_fop = &simple_dir_operations; inode->i_op = &simple_dir_inode_operations; inc_nlink(inode); + break; default: break; } diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index c045f63d11fa..f0f55fbd1375 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -186,11 +186,6 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, /* * Add new request to wait queue. - * - * Swapper tasks always get inserted at the head of the queue. - * This should avoid many nasty memory deadlocks and hopefully - * improve overall performance. - * Everyone else gets appended to the queue to ensure proper FIFO behavior. */ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task, @@ -199,8 +194,6 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, INIT_LIST_HEAD(&task->u.tk_wait.timer_list); if (RPC_IS_PRIORITY(queue)) __rpc_add_wait_queue_priority(queue, task, queue_priority); - else if (RPC_IS_SWAPPER(task)) - list_add(&task->u.tk_wait.list, &queue->tasks[0]); else list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); task->tk_waitqueue = queue; @@ -1012,8 +1005,10 @@ int rpc_malloc(struct rpc_task *task) struct rpc_buffer *buf; gfp_t gfp = GFP_NOFS; + if (RPC_IS_ASYNC(task)) + gfp = GFP_NOWAIT | __GFP_NOWARN; if (RPC_IS_SWAPPER(task)) - gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; + gfp |= __GFP_MEMALLOC; size += sizeof(struct rpc_buffer); if (size <= RPC_BUFFER_MAXSIZE) diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index eba1714bf09a..6d5bb8bfed38 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1091,7 +1091,9 @@ static int svc_tcp_sendmsg(struct socket *sock, struct msghdr *msg, int flags, ret; *sentp = 0; - xdr_alloc_bvec(xdr, GFP_KERNEL); + ret = xdr_alloc_bvec(xdr, GFP_KERNEL); + if (ret < 0) + return ret; msg->msg_flags = MSG_MORE; ret = kernel_sendmsg(sock, msg, &rm, 1, rm.iov_len); diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 71e03b930b70..d84bb5037bb5 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -752,7 +752,11 @@ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, */ xdr->p = (void *)p + frag2bytes; space_left = xdr->buf->buflen - xdr->buf->len; - xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE); + if (space_left - frag1bytes >= PAGE_SIZE) + xdr->end = (void *)p + PAGE_SIZE; + else + xdr->end = (void *)p + space_left - frag1bytes; + xdr->buf->page_len += frag2bytes; xdr->buf->len += nbytes; return p; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 46304e647c49..13d5323f8098 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -731,6 +731,21 @@ void xprt_disconnect_done(struct rpc_xprt *xprt) } EXPORT_SYMBOL_GPL(xprt_disconnect_done); +/** + * xprt_schedule_autoclose_locked - Try to schedule an autoclose RPC call + * @xprt: transport to disconnect + */ +static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt) +{ + if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state)) + return; + if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) + queue_work(xprtiod_workqueue, &xprt->task_cleanup); + else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state)) + rpc_wake_up_queued_task_set_status(&xprt->pending, + xprt->snd_task, -ENOTCONN); +} + /** * xprt_force_disconnect - force a transport to disconnect * @xprt: transport to disconnect @@ -742,13 +757,7 @@ void xprt_force_disconnect(struct rpc_xprt *xprt) /* Don't race with the test_bit() in xprt_clear_locked() */ spin_lock(&xprt->transport_lock); - set_bit(XPRT_CLOSE_WAIT, &xprt->state); - /* Try to schedule an autoclose RPC call */ - if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) - queue_work(xprtiod_workqueue, &xprt->task_cleanup); - else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state)) - rpc_wake_up_queued_task_set_status(&xprt->pending, - xprt->snd_task, -ENOTCONN); + xprt_schedule_autoclose_locked(xprt); spin_unlock(&xprt->transport_lock); } EXPORT_SYMBOL_GPL(xprt_force_disconnect); @@ -788,11 +797,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) goto out; if (test_bit(XPRT_CLOSING, &xprt->state)) goto out; - set_bit(XPRT_CLOSE_WAIT, &xprt->state); - /* Try to schedule an autoclose RPC call */ - if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) - queue_work(xprtiod_workqueue, &xprt->task_cleanup); - xprt_wake_pending_tasks(xprt, -EAGAIN); + xprt_schedule_autoclose_locked(xprt); out: spin_unlock(&xprt->transport_lock); } @@ -881,12 +886,7 @@ void xprt_connect(struct rpc_task *task) if (!xprt_lock_write(xprt, task)) return; - if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) { - trace_xprt_disconnect_cleanup(xprt); - xprt->ops->close(xprt); - } - - if (!xprt_connected(xprt)) { + if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; rpc_sleep_on_timeout(&xprt->pending, task, NULL, xprt_request_timeout(task->tk_rqstp)); @@ -1306,17 +1306,6 @@ xprt_request_enqueue_transmit(struct rpc_task *task) INIT_LIST_HEAD(&req->rq_xmit2); goto out; } - } else if (RPC_IS_SWAPPER(task)) { - list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { - if (pos->rq_cong || pos->rq_bytes_sent) - continue; - if (RPC_IS_SWAPPER(pos->rq_task)) - continue; - /* Note: req is added _before_ pos */ - list_add_tail(&req->rq_xmit, &pos->rq_xmit); - INIT_LIST_HEAD(&req->rq_xmit2); - goto out; - } } else if (!req->rq_seqno) { list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { if (pos->rq_task->tk_owner != task->tk_owner) @@ -1635,12 +1624,15 @@ out: static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt) { struct rpc_rqst *req = ERR_PTR(-EAGAIN); + gfp_t gfp_mask = GFP_KERNEL; if (xprt->num_reqs >= xprt->max_reqs) goto out; ++xprt->num_reqs; spin_unlock(&xprt->reserve_lock); - req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS); + if (current->flags & PF_WQ_WORKER) + gfp_mask |= __GFP_NORETRY | __GFP_NOWARN; + req = kzalloc(sizeof(*req), gfp_mask); spin_lock(&xprt->reserve_lock); if (req != NULL) goto out; diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index ca267a855a12..b8174c77dfe1 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -1137,6 +1137,7 @@ static bool rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) #if defined(CONFIG_SUNRPC_BACKCHANNEL) { + struct rpc_xprt *xprt = &r_xprt->rx_xprt; struct xdr_stream *xdr = &rep->rr_stream; __be32 *p; @@ -1160,6 +1161,10 @@ rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) if (*p != cpu_to_be32(RPC_CALL)) return false; + /* No bc service. */ + if (xprt->bc_serv == NULL) + return false; + /* Now that we are sure this is a backchannel call, * advance to the RPC header. */ diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 8e2368a0c2a2..9cf10cfb85c6 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -519,7 +519,7 @@ xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) return; out_sleep: - task->tk_status = -EAGAIN; + task->tk_status = -ENOMEM; xprt_add_backlog(xprt, task); } @@ -572,8 +572,10 @@ xprt_rdma_allocate(struct rpc_task *task) gfp_t flags; flags = RPCRDMA_DEF_GFP; + if (RPC_IS_ASYNC(task)) + flags = GFP_NOWAIT | __GFP_NOWARN; if (RPC_IS_SWAPPER(task)) - flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; + flags |= __GFP_MEMALLOC; if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize, flags)) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 16c7758e7bf3..ae5b5380f0f0 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -754,12 +754,12 @@ xs_stream_start_connect(struct sock_xprt *transport) /** * xs_nospace - handle transmit was incomplete * @req: pointer to RPC request + * @transport: pointer to struct sock_xprt * */ -static int xs_nospace(struct rpc_rqst *req) +static int xs_nospace(struct rpc_rqst *req, struct sock_xprt *transport) { - struct rpc_xprt *xprt = req->rq_xprt; - struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); + struct rpc_xprt *xprt = &transport->xprt; struct sock *sk = transport->inet; int ret = -EAGAIN; @@ -770,16 +770,6 @@ static int xs_nospace(struct rpc_rqst *req) /* Don't race with disconnect */ if (xprt_connected(xprt)) { - /* wait for more buffer space */ - sk->sk_write_pending++; - xprt_wait_for_buffer_space(xprt); - } else - ret = -ENOTCONN; - - spin_unlock(&xprt->transport_lock); - - /* Race breaker in case memory is freed before above code is called */ - if (ret == -EAGAIN) { struct socket_wq *wq; rcu_read_lock(); @@ -787,8 +777,42 @@ static int xs_nospace(struct rpc_rqst *req) set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags); rcu_read_unlock(); - sk->sk_write_space(sk); - } + /* wait for more buffer space */ + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + sk->sk_write_pending++; + xprt_wait_for_buffer_space(xprt); + } else + ret = -ENOTCONN; + + spin_unlock(&xprt->transport_lock); + return ret; +} + +static int xs_sock_nospace(struct rpc_rqst *req) +{ + struct sock_xprt *transport = + container_of(req->rq_xprt, struct sock_xprt, xprt); + struct sock *sk = transport->inet; + int ret = -EAGAIN; + + lock_sock(sk); + if (!sock_writeable(sk)) + ret = xs_nospace(req, transport); + release_sock(sk); + return ret; +} + +static int xs_stream_nospace(struct rpc_rqst *req) +{ + struct sock_xprt *transport = + container_of(req->rq_xprt, struct sock_xprt, xprt); + struct sock *sk = transport->inet; + int ret = -EAGAIN; + + lock_sock(sk); + if (!sk_stream_memory_free(sk)) + ret = xs_nospace(req, transport); + release_sock(sk); return ret; } @@ -847,7 +871,7 @@ static int xs_local_send_request(struct rpc_rqst *req) /* Close the stream if the previous transmission was incomplete */ if (xs_send_request_was_aborted(transport, req)) { - xs_close(xprt); + xprt_force_disconnect(xprt); return -ENOTCONN; } @@ -878,14 +902,14 @@ static int xs_local_send_request(struct rpc_rqst *req) case -ENOBUFS: break; case -EAGAIN: - status = xs_nospace(req); + status = xs_stream_nospace(req); break; default: dprintk("RPC: sendmsg returned unrecognized error %d\n", -status); fallthrough; case -EPIPE: - xs_close(xprt); + xprt_force_disconnect(xprt); status = -ENOTCONN; } @@ -954,7 +978,7 @@ process_status: /* Should we call xs_close() here? */ break; case -EAGAIN: - status = xs_nospace(req); + status = xs_sock_nospace(req); break; case -ENETUNREACH: case -ENOBUFS: @@ -1069,7 +1093,7 @@ static int xs_tcp_send_request(struct rpc_rqst *req) /* Should we call xs_close() here? */ break; case -EAGAIN: - status = xs_nospace(req); + status = xs_stream_nospace(req); break; case -ECONNRESET: case -ECONNREFUSED: @@ -1167,6 +1191,16 @@ static void xs_reset_transport(struct sock_xprt *transport) if (sk == NULL) return; + /* + * Make sure we're calling this in a context from which it is safe + * to call __fput_sync(). In practice that means rpciod and the + * system workqueue. + */ + if (!(current->flags & PF_WQ_WORKER)) { + WARN_ON_ONCE(1); + set_bit(XPRT_CLOSE_WAIT, &xprt->state); + return; + } if (atomic_read(&transport->xprt.swapper)) sk_clear_memalloc(sk); @@ -1190,7 +1224,7 @@ static void xs_reset_transport(struct sock_xprt *transport) mutex_unlock(&transport->recv_mutex); trace_rpc_socket_close(xprt, sock); - fput(filp); + __fput_sync(filp); xprt_disconnect_done(xprt); } @@ -1883,6 +1917,7 @@ static int xs_local_setup_socket(struct sock_xprt *transport) xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start; xprt_set_connected(xprt); + break; case -ENOBUFS: break; case -ENOENT: @@ -2236,10 +2271,14 @@ static void xs_tcp_setup_socket(struct work_struct *work) struct rpc_xprt *xprt = &transport->xprt; int status = -EIO; - if (!sock) { - sock = xs_create_sock(xprt, transport, - xs_addr(xprt)->sa_family, SOCK_STREAM, - IPPROTO_TCP, true); + if (xprt_connected(xprt)) + goto out; + if (test_and_clear_bit(XPRT_SOCK_CONNECT_SENT, + &transport->sock_state) || + !sock) { + xs_reset_transport(transport); + sock = xs_create_sock(xprt, transport, xs_addr(xprt)->sa_family, + SOCK_STREAM, IPPROTO_TCP, true); if (IS_ERR(sock)) { status = PTR_ERR(sock); goto out; @@ -2270,6 +2309,8 @@ static void xs_tcp_setup_socket(struct work_struct *work) break; case 0: case -EINPROGRESS: + set_bit(XPRT_SOCK_CONNECT_SENT, &transport->sock_state); + fallthrough; case -EALREADY: xprt_unlock_connect(xprt, transport); return; @@ -2323,11 +2364,7 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) if (transport->sock != NULL) { dprintk("RPC: xs_connect delayed xprt %p for %lu " - "seconds\n", - xprt, xprt->reestablish_timeout / HZ); - - /* Start by resetting any existing state */ - xs_reset_transport(transport); + "seconds\n", xprt, xprt->reestablish_timeout / HZ); delay = xprt_reconnect_delay(xprt); xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO); @@ -2802,9 +2839,6 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args) } xprt_set_bound(xprt); xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL); - ret = ERR_PTR(xs_local_setup_socket(transport)); - if (ret) - goto out_err; break; default: ret = ERR_PTR(-EAFNOSUPPORT); diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 6911f1cab206..72c31ef985eb 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -249,9 +249,8 @@ static int tipc_enable_bearer(struct net *net, const char *name, u32 i; if (!bearer_name_validate(name, &b_names)) { - errstr = "illegal name"; NL_SET_ERR_MSG(extack, "Illegal name"); - goto rejected; + return res; } if (prio > TIPC_MAX_LINK_PRI && prio != TIPC_MEDIA_LINK_PRI) { diff --git a/net/tipc/core.c b/net/tipc/core.c index 40c03085c0ea..7724499f516e 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -60,7 +60,7 @@ static int __net_init tipc_init_net(struct net *net) tn->trial_addr = 0; tn->addr_trial_end = 0; tn->capabilities = TIPC_NODE_CAPABILITIES; - INIT_WORK(&tn->final_work.work, tipc_net_finalize_work); + INIT_WORK(&tn->work, tipc_net_finalize_work); memset(tn->node_id, 0, sizeof(tn->node_id)); memset(tn->node_id_string, 0, sizeof(tn->node_id_string)); tn->mon_threshold = TIPC_DEF_MON_THRESHOLD; @@ -111,10 +111,9 @@ static void __net_exit tipc_exit_net(struct net *net) struct tipc_net *tn = tipc_net(net); tipc_detach_loopback(net); - /* Make sure the tipc_net_finalize_work() finished */ - cancel_work_sync(&tn->final_work.work); tipc_net_stop(net); - + /* Make sure the tipc_net_finalize_work() finished */ + cancel_work_sync(&tn->work); tipc_bcast_stop(net); tipc_nametbl_stop(net); tipc_sk_rht_destroy(net); diff --git a/net/tipc/core.h b/net/tipc/core.h index 992924a849be..73a26b0b9ca1 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -90,12 +90,6 @@ extern unsigned int tipc_net_id __read_mostly; extern int sysctl_tipc_rmem[3] __read_mostly; extern int sysctl_tipc_named_timeout __read_mostly; -struct tipc_net_work { - struct work_struct work; - struct net *net; - u32 addr; -}; - struct tipc_net { u8 node_id[NODE_ID_LEN]; u32 node_addr; @@ -150,7 +144,7 @@ struct tipc_net { struct tipc_crypto *crypto_tx; #endif /* Work item for net finalize */ - struct tipc_net_work final_work; + struct work_struct work; /* The numbers of work queues in schedule */ atomic_t wq_count; }; diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index 6f91b9a306dc..de63d6d41645 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -1975,6 +1975,9 @@ rcv: /* Ok, everything's fine, try to synch own keys according to peers' */ tipc_crypto_key_synch(rx, *skb); + /* Re-fetch skb cb as skb might be changed in tipc_msg_validate */ + skb_cb = TIPC_SKB_CB(*skb); + /* Mark skb decrypted */ skb_cb->decrypted = 1; diff --git a/net/tipc/discover.c b/net/tipc/discover.c index d4ecacddb40c..2730310249e3 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -147,8 +147,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d, { struct net *net = d->net; struct tipc_net *tn = tipc_net(net); - bool trial = time_before(jiffies, tn->addr_trial_end); u32 self = tipc_own_addr(net); + bool trial = time_before(jiffies, tn->addr_trial_end) && !self; if (mtyp == DSC_TRIAL_FAIL_MSG) { if (!trial) @@ -167,7 +167,7 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d, /* Apply trial address if we just left trial period */ if (!trial && !self) { - tipc_sched_net_finalize(net, tn->trial_addr); + schedule_work(&tn->work); msg_set_prevnode(buf_msg(d->skb), tn->trial_addr); msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); } @@ -210,7 +210,10 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb, u32 self; int err; - skb_linearize(skb); + if (skb_linearize(skb)) { + kfree_skb(skb); + return; + } hdr = buf_msg(skb); if (caps & TIPC_NODE_ID128) @@ -307,7 +310,7 @@ static void tipc_disc_timeout(struct timer_list *t) if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) { mod_timer(&d->timer, jiffies + TIPC_DISC_INIT); spin_unlock_bh(&d->lock); - tipc_sched_net_finalize(net, tn->trial_addr); + schedule_work(&tn->work); return; } diff --git a/net/tipc/link.c b/net/tipc/link.c index 7a353ff62844..c1e56d1f21b3 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -344,6 +344,11 @@ char tipc_link_plane(struct tipc_link *l) return l->net_plane; } +struct net *tipc_link_net(struct tipc_link *l) +{ + return l->net; +} + void tipc_link_update_caps(struct tipc_link *l, u16 capabilities) { l->peer_caps = capabilities; @@ -2183,7 +2188,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, if (tipc_own_addr(l->net) > msg_prevnode(hdr)) l->net_plane = msg_net_plane(hdr); - skb_linearize(skb); + if (skb_linearize(skb)) + goto exit; + hdr = buf_msg(skb); data = msg_data(hdr); diff --git a/net/tipc/link.h b/net/tipc/link.h index fc07232c9a12..a16f401fdabd 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -156,4 +156,5 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr, int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb, struct sk_buff_head *xmitq); bool tipc_link_too_silent(struct tipc_link *l); +struct net *tipc_link_net(struct tipc_link *l); #endif diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index a37190da5a50..1d90f39129ca 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c @@ -130,7 +130,7 @@ static void map_set(u64 *up_map, int i, unsigned int v) static int map_get(u64 up_map, int i) { - return (up_map & (1 << i)) >> i; + return (up_map & (1ULL << i)) >> i; } static struct tipc_peer *peer_prev(struct tipc_peer *peer) diff --git a/net/tipc/net.c b/net/tipc/net.c index 0bb2323201da..671cb4f9d563 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -41,6 +41,7 @@ #include "socket.h" #include "node.h" #include "bcast.h" +#include "link.h" #include "netlink.h" #include "monitor.h" @@ -138,19 +139,9 @@ static void tipc_net_finalize(struct net *net, u32 addr) void tipc_net_finalize_work(struct work_struct *work) { - struct tipc_net_work *fwork; + struct tipc_net *tn = container_of(work, struct tipc_net, work); - fwork = container_of(work, struct tipc_net_work, work); - tipc_net_finalize(fwork->net, fwork->addr); -} - -void tipc_sched_net_finalize(struct net *net, u32 addr) -{ - struct tipc_net *tn = tipc_net(net); - - tn->final_work.net = net; - tn->final_work.addr = addr; - schedule_work(&tn->final_work.work); + tipc_net_finalize(tipc_link_net(tn->bcl), tn->trial_addr); } void tipc_net_stop(struct net *net) diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 49e893313652..2d62932b5987 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -877,7 +877,7 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg) }; ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); - if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query)) + if (TLV_GET_DATA_LEN(msg->req) < (int)sizeof(struct tipc_name_table_query)) return -EINVAL; depth = ntohl(ntq->depth); diff --git a/net/tipc/node.c b/net/tipc/node.c index e4452d55851f..7589f2ac6fd0 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -456,8 +456,8 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id, bool preliminary) { struct tipc_net *tn = net_generic(net, tipc_net_id); + struct tipc_link *l, *snd_l = tipc_bc_sndlink(net); struct tipc_node *n, *temp_node; - struct tipc_link *l; unsigned long intv; int bearer_id; int i; @@ -472,6 +472,16 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id, goto exit; /* A preliminary node becomes "real" now, refresh its data */ tipc_node_write_lock(n); + if (!tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX, + tipc_link_min_win(snd_l), tipc_link_max_win(snd_l), + n->capabilities, &n->bc_entry.inputq1, + &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) { + pr_warn("Broadcast rcv link refresh failed, no memory\n"); + tipc_node_write_unlock_fast(n); + tipc_node_put(n); + n = NULL; + goto exit; + } n->preliminary = false; n->addr = addr; hlist_del_rcu(&n->hash); @@ -551,7 +561,16 @@ update: n->signature = INVALID_NODE_SIG; n->active_links[0] = INVALID_BEARER_ID; n->active_links[1] = INVALID_BEARER_ID; - n->bc_entry.link = NULL; + if (!preliminary && + !tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX, + tipc_link_min_win(snd_l), tipc_link_max_win(snd_l), + n->capabilities, &n->bc_entry.inputq1, + &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) { + pr_warn("Broadcast rcv link creation failed, no memory\n"); + kfree(n); + n = NULL; + goto exit; + } tipc_node_get(n); timer_setup(&n->timer, tipc_node_timeout, 0); /* Start a slow timer anyway, crypto needs it */ @@ -1128,7 +1147,7 @@ void tipc_node_check_dest(struct net *net, u32 addr, bool *respond, bool *dupl_addr) { struct tipc_node *n; - struct tipc_link *l, *snd_l; + struct tipc_link *l; struct tipc_link_entry *le; bool addr_match = false; bool sign_match = false; @@ -1148,22 +1167,6 @@ void tipc_node_check_dest(struct net *net, u32 addr, return; tipc_node_write_lock(n); - if (unlikely(!n->bc_entry.link)) { - snd_l = tipc_bc_sndlink(net); - if (!tipc_link_bc_create(net, tipc_own_addr(net), - addr, peer_id, U16_MAX, - tipc_link_min_win(snd_l), - tipc_link_max_win(snd_l), - n->capabilities, - &n->bc_entry.inputq1, - &n->bc_entry.namedq, snd_l, - &n->bc_entry.link)) { - pr_warn("Broadcast rcv link creation failed, no mem\n"); - tipc_node_write_unlock_fast(n); - tipc_node_put(n); - return; - } - } le = &n->links[b->identity]; @@ -1657,6 +1660,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list, struct tipc_node *n; struct sk_buff_head xmitq; bool node_up = false; + struct net *peer_net; int bearer_id; int rc; @@ -1673,18 +1677,23 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list, return -EHOSTUNREACH; } + rcu_read_lock(); tipc_node_read_lock(n); node_up = node_is_up(n); - if (node_up && n->peer_net && check_net(n->peer_net)) { + peer_net = n->peer_net; + tipc_node_read_unlock(n); + if (node_up && peer_net && check_net(peer_net)) { /* xmit inner linux container */ - tipc_lxc_xmit(n->peer_net, list); + tipc_lxc_xmit(peer_net, list); if (likely(skb_queue_empty(list))) { - tipc_node_read_unlock(n); + rcu_read_unlock(); tipc_node_put(n); return 0; } } + rcu_read_unlock(); + tipc_node_read_lock(n); bearer_id = n->active_links[selector & 1]; if (unlikely(bearer_id == INVALID_BEARER_ID)) { tipc_node_read_unlock(n); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 42283dc6c5b7..8f3c9fbb9916 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -489,6 +489,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock, sock_init_data(sock, sk); tipc_set_sk_state(sk, TIPC_OPEN); if (tipc_sk_insert(tsk)) { + sk_free(sk); pr_warn("Socket create failed; port number exhausted\n"); return -EINVAL; } @@ -503,7 +504,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock, timer_setup(&sk->sk_timer, tipc_sk_timeout, 0); sk->sk_shutdown = 0; sk->sk_backlog_rcv = tipc_sk_backlog_rcv; - sk->sk_rcvbuf = sysctl_tipc_rmem[1]; + sk->sk_rcvbuf = READ_ONCE(sysctl_tipc_rmem[1]); sk->sk_data_ready = tipc_data_ready; sk->sk_write_space = tipc_write_space; sk->sk_destruct = tipc_sock_destruct; diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index 13f3143609f9..89d8a2bd30cd 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -176,7 +176,7 @@ static void tipc_conn_close(struct tipc_conn *con) conn_put(con); } -static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s) +static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s, struct socket *sock) { struct tipc_conn *con; int ret; @@ -202,10 +202,12 @@ static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s) } con->conid = ret; s->idr_in_use++; - spin_unlock_bh(&s->idr_lock); set_bit(CF_CONNECTED, &con->flags); con->server = s; + con->sock = sock; + conn_get(con); + spin_unlock_bh(&s->idr_lock); return con; } @@ -450,17 +452,24 @@ static void tipc_conn_data_ready(struct sock *sk) static void tipc_topsrv_accept(struct work_struct *work) { struct tipc_topsrv *srv = container_of(work, struct tipc_topsrv, awork); - struct socket *lsock = srv->listener; - struct socket *newsock; + struct socket *newsock, *lsock; struct tipc_conn *con; struct sock *newsk; int ret; + spin_lock_bh(&srv->idr_lock); + if (!srv->listener) { + spin_unlock_bh(&srv->idr_lock); + return; + } + lsock = srv->listener; + spin_unlock_bh(&srv->idr_lock); + while (1) { ret = kernel_accept(lsock, &newsock, O_NONBLOCK); if (ret < 0) return; - con = tipc_conn_alloc(srv); + con = tipc_conn_alloc(srv, newsock); if (IS_ERR(con)) { ret = PTR_ERR(con); sock_release(newsock); @@ -472,11 +481,11 @@ static void tipc_topsrv_accept(struct work_struct *work) newsk->sk_data_ready = tipc_conn_data_ready; newsk->sk_write_space = tipc_conn_write_space; newsk->sk_user_data = con; - con->sock = newsock; write_unlock_bh(&newsk->sk_callback_lock); /* Wake up receive process in case of 'SYN+' message */ newsk->sk_data_ready(newsk); + conn_put(con); } } @@ -489,7 +498,7 @@ static void tipc_topsrv_listener_data_ready(struct sock *sk) read_lock_bh(&sk->sk_callback_lock); srv = sk->sk_user_data; - if (srv->listener) + if (srv) queue_work(srv->rcv_wq, &srv->awork); read_unlock_bh(&sk->sk_callback_lock); } @@ -568,19 +577,19 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower, sub.seq.upper = upper; sub.timeout = TIPC_WAIT_FOREVER; sub.filter = filter; - *(u32 *)&sub.usr_handle = port; + *(u64 *)&sub.usr_handle = (u64)port; - con = tipc_conn_alloc(tipc_topsrv(net)); + con = tipc_conn_alloc(tipc_topsrv(net), NULL); if (IS_ERR(con)) return false; *conid = con->conid; - con->sock = NULL; rc = tipc_conn_rcv_sub(tipc_topsrv(net), con, &sub); - if (rc >= 0) - return true; + if (rc) + conn_put(con); + conn_put(con); - return false; + return !rc; } void tipc_topsrv_kern_unsubscr(struct net *net, int conid) @@ -699,8 +708,9 @@ static void tipc_topsrv_stop(struct net *net) __module_get(lsock->sk->sk_prot_creator->owner); srv->listener = NULL; spin_unlock_bh(&srv->idr_lock); - sock_release(lsock); + tipc_topsrv_work_stop(srv); + sock_release(lsock); idr_destroy(&srv->conn_idr); kfree(srv); } diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index f718c7346088..5cb6846544cc 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -97,13 +97,16 @@ static void tls_device_queue_ctx_destruction(struct tls_context *ctx) unsigned long flags; spin_lock_irqsave(&tls_device_lock, flags); + if (unlikely(!refcount_dec_and_test(&ctx->refcount))) + goto unlock; + list_move_tail(&ctx->list, &tls_device_gc_list); /* schedule_work inside the spinlock * to make sure tls_device_down waits for that work. */ schedule_work(&tls_device_gc_work); - +unlock: spin_unlock_irqrestore(&tls_device_lock, flags); } @@ -194,8 +197,7 @@ void tls_device_sk_destruct(struct sock *sk) clean_acked_data_disable(inet_csk(sk)); } - if (refcount_dec_and_test(&tls_ctx->refcount)) - tls_device_queue_ctx_destruction(tls_ctx); + tls_device_queue_ctx_destruction(tls_ctx); } EXPORT_SYMBOL_GPL(tls_device_sk_destruct); @@ -483,11 +485,13 @@ handle_error: copy = min_t(size_t, size, (pfrag->size - pfrag->offset)); copy = min_t(size_t, copy, (max_open_record_len - record->len)); - rc = tls_device_copy_data(page_address(pfrag->page) + - pfrag->offset, copy, msg_iter); - if (rc) - goto handle_error; - tls_append_frag(record, pfrag, copy); + if (copy) { + rc = tls_device_copy_data(page_address(pfrag->page) + + pfrag->offset, copy, msg_iter); + if (rc) + goto handle_error; + tls_append_frag(record, pfrag, copy); + } size -= copy; if (!size) { @@ -1343,7 +1347,15 @@ static int tls_device_down(struct net_device *netdev) /* Device contexts for RX and TX will be freed in on sk_destruct * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW. + * Now release the ref taken above. */ + if (refcount_dec_and_test(&ctx->refcount)) { + /* sk_destruct ran after tls_device_down took a ref, and + * it returned early. Complete the destruction here. + */ + list_del(&ctx->list); + tls_device_free_ctx(ctx); + } } up_write(&device_offload_lock); @@ -1385,9 +1397,9 @@ static struct notifier_block tls_dev_notifier = { .notifier_call = tls_dev_event, }; -void __init tls_device_init(void) +int __init tls_device_init(void) { - register_netdevice_notifier(&tls_dev_notifier); + return register_netdevice_notifier(&tls_dev_notifier); } void __exit tls_device_cleanup(void) diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 58d22d6b86ae..e537085b184f 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -905,7 +905,12 @@ static int __init tls_register(void) if (err) return err; - tls_device_init(); + err = tls_device_init(); + if (err) { + unregister_pernet_subsys(&tls_proc_ops); + return err; + } + tcp_register_ulp(&tcp_tls_ulp_ops); return 0; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 8cd011ea9fbb..21f20c3cda97 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1483,7 +1483,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, } if (prot->version == TLS_1_3_VERSION) memcpy(iv + iv_offset, tls_ctx->rx.iv, - crypto_aead_ivsize(ctx->aead_recv)); + prot->iv_size + prot->salt_size); else memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index fa99fe5bcf6c..2aa01afe1b03 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -438,7 +438,7 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) * -ECONNREFUSED. Otherwise, if we haven't queued any skbs * to other and its full, we will hang waiting for POLLOUT. */ - if (unix_recvq_full(other) && !sock_flag(other, SOCK_DEAD)) + if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD)) return 1; if (connected) diff --git a/net/unix/diag.c b/net/unix/diag.c index 9ff64f9df1f3..951b33fa8f5c 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -113,14 +113,16 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql); } -static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb) +static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb, + struct user_namespace *user_ns) { - uid_t uid = from_kuid_munged(sk_user_ns(nlskb->sk), sock_i_uid(sk)); + uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk)); return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid); } static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, - u32 portid, u32 seq, u32 flags, int sk_ino) + struct user_namespace *user_ns, + u32 portid, u32 seq, u32 flags, int sk_ino) { struct nlmsghdr *nlh; struct unix_diag_msg *rep; @@ -166,7 +168,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r goto out_nlmsg_trim; if ((req->udiag_show & UDIAG_SHOW_UID) && - sk_diag_dump_uid(sk, skb)) + sk_diag_dump_uid(sk, skb, user_ns)) goto out_nlmsg_trim; nlmsg_end(skb, nlh); @@ -178,7 +180,8 @@ out_nlmsg_trim: } static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, - u32 portid, u32 seq, u32 flags) + struct user_namespace *user_ns, + u32 portid, u32 seq, u32 flags) { int sk_ino; @@ -189,7 +192,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_r if (!sk_ino) return 0; - return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino); + return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino); } static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) @@ -217,7 +220,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) goto next; if (!(req->udiag_states & (1 << sk->sk_state))) goto next; - if (sk_diag_dump(sk, skb, req, + if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk), NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0) @@ -285,7 +288,8 @@ again: if (!rep) goto out; - err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid, + err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk), + NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 0, req->udiag_ino); if (err < 0) { nlmsg_free(rep); diff --git a/net/unix/garbage.c b/net/unix/garbage.c index d45d5366115a..dc2763540393 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -204,6 +204,7 @@ void wait_for_unix_gc(void) /* The external entry point: unix_gc() */ void unix_gc(void) { + struct sk_buff *next_skb, *skb; struct unix_sock *u; struct unix_sock *next; struct sk_buff_head hitlist; @@ -297,11 +298,30 @@ void unix_gc(void) spin_unlock(&unix_gc_lock); + /* We need io_uring to clean its registered files, ignore all io_uring + * originated skbs. It's fine as io_uring doesn't keep references to + * other io_uring instances and so killing all other files in the cycle + * will put all io_uring references forcing it to go through normal + * release.path eventually putting registered files. + */ + skb_queue_walk_safe(&hitlist, skb, next_skb) { + if (skb->scm_io_uring) { + __skb_unlink(skb, &hitlist); + skb_queue_tail(&skb->sk->sk_receive_queue, skb); + } + } + /* Here we are. Hitlist is filled. Die. */ __skb_queue_purge(&hitlist); spin_lock(&unix_gc_lock); + /* There could be io_uring registered files, just push them back to + * the inflight list + */ + list_for_each_entry_safe(u, next, &gc_candidates, link) + list_move_tail(&u->link, &gc_inflight_list); + /* All candidates should have been detached by now. */ BUG_ON(!list_empty(&gc_candidates)); diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 005aa701f4d5..ab9fe75fd7aa 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1237,6 +1237,7 @@ static void vsock_connect_timeout(struct work_struct *work) if (sk->sk_state == TCP_SYN_SENT && (sk->sk_shutdown != SHUTDOWN_MASK)) { sk->sk_state = TCP_CLOSE; + sk->sk_socket->state = SS_UNCONNECTED; sk->sk_err = ETIMEDOUT; sk->sk_error_report(sk); vsock_transport_cancel_pkt(vsk); @@ -1342,7 +1343,14 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, * timeout fires. */ sock_hold(sk); - schedule_delayed_work(&vsk->connect_work, timeout); + + /* If the timeout function is already scheduled, + * reschedule it, then ungrab the socket refcount to + * keep it balanced. + */ + if (mod_delayed_work(system_wq, &vsk->connect_work, + timeout)) + sock_put(sk); /* Skip ahead to preserve error code set above. */ goto out_wait; diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 5c0439c3bcfa..3a5bf3f42274 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -1200,7 +1200,7 @@ EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt); void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt) { - kfree(pkt->buf); + kvfree(pkt->buf); kfree(pkt); } EXPORT_SYMBOL_GPL(virtio_transport_free_pkt); diff --git a/net/wireless/core.c b/net/wireless/core.c index 3f4554723761..3b25b78896a2 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -5,7 +5,7 @@ * Copyright 2006-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -918,9 +918,6 @@ int wiphy_register(struct wiphy *wiphy) return res; } - /* set up regulatory info */ - wiphy_regulatory_register(wiphy); - list_add_rcu(&rdev->list, &cfg80211_rdev_list); cfg80211_rdev_list_generation++; @@ -931,6 +928,9 @@ int wiphy_register(struct wiphy *wiphy) cfg80211_debugfs_rdev_add(rdev); nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); + /* set up regulatory info */ + wiphy_regulatory_register(wiphy); + if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) { struct regulatory_request request; diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c index 76b845f68ac8..d80b06d66959 100644 --- a/net/wireless/debugfs.c +++ b/net/wireless/debugfs.c @@ -65,9 +65,10 @@ static ssize_t ht40allow_map_read(struct file *file, { struct wiphy *wiphy = file->private_data; char *buf; - unsigned int offset = 0, buf_size = PAGE_SIZE, i, r; + unsigned int offset = 0, buf_size = PAGE_SIZE, i; enum nl80211_band band; struct ieee80211_supported_band *sband; + ssize_t r; buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5d8959a5e760..2bd8196ed719 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2958,6 +2958,15 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev, } else if (attrs[NL80211_ATTR_CHANNEL_WIDTH]) { chandef->width = nla_get_u32(attrs[NL80211_ATTR_CHANNEL_WIDTH]); + if (chandef->chan->band == NL80211_BAND_S1GHZ) { + /* User input error for channel width doesn't match channel */ + if (chandef->width != ieee80211_s1g_channel_width(chandef->chan)) { + NL_SET_ERR_MSG_ATTR(extack, + attrs[NL80211_ATTR_CHANNEL_WIDTH], + "bad channel width"); + return -EINVAL; + } + } if (attrs[NL80211_ATTR_CENTER_FREQ1]) { chandef->center_freq1 = nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1]); @@ -3479,6 +3488,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag wdev_lock(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: if (wdev->ssid_len && nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid)) goto nla_put_failure_locked; @@ -11095,18 +11105,23 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb, struct cfg80211_bitrate_mask mask; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; int err; if (!rdev->ops->set_bitrate_mask) return -EOPNOTSUPP; + wdev_lock(wdev); err = nl80211_parse_tx_bitrate_mask(info, info->attrs, NL80211_ATTR_TX_RATES, &mask, dev); if (err) - return err; + goto out; - return rdev_set_bitrate_mask(rdev, dev, NULL, &mask); + err = rdev_set_bitrate_mask(rdev, dev, NULL, &mask); +out: + wdev_unlock(wdev); + return err; } static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info) diff --git a/net/wireless/reg.c b/net/wireless/reg.c index a04fdfb35f07..a1e64d967bd3 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -787,6 +787,8 @@ static int __init load_builtin_regdb_keys(void) return 0; } +MODULE_FIRMWARE("regulatory.db.p7s"); + static bool regdb_has_valid_signature(const u8 *data, unsigned int size) { const struct firmware *sig; @@ -1058,8 +1060,12 @@ static void regdb_fw_cb(const struct firmware *fw, void *context) release_firmware(fw); } +MODULE_FIRMWARE("regulatory.db"); + static int query_regdb_file(const char *alpha2) { + int err; + ASSERT_RTNL(); if (regdb) @@ -1069,9 +1075,13 @@ static int query_regdb_file(const char *alpha2) if (!alpha2) return -ENOMEM; - return request_firmware_nowait(THIS_MODULE, true, "regulatory.db", - ®_pdev->dev, GFP_KERNEL, - (void *)alpha2, regdb_fw_cb); + err = request_firmware_nowait(THIS_MODULE, true, "regulatory.db", + ®_pdev->dev, GFP_KERNEL, + (void *)alpha2, regdb_fw_cb); + if (err) + kfree(alpha2); + + return err; } int reg_reload_regdb(void) @@ -4001,6 +4011,7 @@ void wiphy_regulatory_register(struct wiphy *wiphy) wiphy_update_regulatory(wiphy, lr->initiator); wiphy_all_share_dfs_chan_state(wiphy); + reg_process_self_managed_hints(); } void wiphy_regulatory_deregister(struct wiphy *wiphy) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index fd614a5a00b4..d09dabae5627 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -143,18 +143,12 @@ static inline void bss_ref_get(struct cfg80211_registered_device *rdev, lockdep_assert_held(&rdev->bss_lock); bss->refcount++; - if (bss->pub.hidden_beacon_bss) { - bss = container_of(bss->pub.hidden_beacon_bss, - struct cfg80211_internal_bss, - pub); - bss->refcount++; - } - if (bss->pub.transmitted_bss) { - bss = container_of(bss->pub.transmitted_bss, - struct cfg80211_internal_bss, - pub); - bss->refcount++; - } + + if (bss->pub.hidden_beacon_bss) + bss_from_pub(bss->pub.hidden_beacon_bss)->refcount++; + + if (bss->pub.transmitted_bss) + bss_from_pub(bss->pub.transmitted_bss)->refcount++; } static inline void bss_ref_put(struct cfg80211_registered_device *rdev, @@ -304,7 +298,8 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen, tmp_old = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen); tmp_old = (tmp_old) ? tmp_old + tmp_old[1] + 2 : ie; - while (tmp_old + tmp_old[1] + 2 - ie <= ielen) { + while (tmp_old + 2 - ie <= ielen && + tmp_old + tmp_old[1] + 2 - ie <= ielen) { if (tmp_old[0] == 0) { tmp_old++; continue; @@ -335,7 +330,8 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen, * determine if they are the same ie. */ if (tmp_old[0] == WLAN_EID_VENDOR_SPECIFIC) { - if (!memcmp(tmp_old + 2, tmp + 2, 5)) { + if (tmp_old[1] >= 5 && tmp[1] >= 5 && + !memcmp(tmp_old + 2, tmp + 2, 5)) { /* same vendor ie, copy from * subelement */ @@ -364,7 +360,8 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen, * copied to new ie, skip ssid, capability, bssid-index ie */ tmp_new = sub_copy; - while (tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) { + while (tmp_new + 2 - sub_copy <= subie_len && + tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) { if (!(tmp_new[0] == WLAN_EID_NON_TX_BSSID_CAP || tmp_new[0] == WLAN_EID_SSID)) { memcpy(pos, tmp_new, tmp_new[1] + 2); @@ -429,6 +426,15 @@ cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss, rcu_read_unlock(); + /* + * This is a bit weird - it's not on the list, but already on another + * one! The only way that could happen is if there's some BSSID/SSID + * shared by multiple APs in their multi-BSSID profiles, potentially + * with hidden SSID mixed in ... ignore it. + */ + if (!list_empty(&nontrans_bss->nontrans_list)) + return -EINVAL; + /* add to the list */ list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list); return 0; @@ -702,8 +708,12 @@ static bool cfg80211_find_ssid_match(struct cfg80211_colocated_ap *ap, for (i = 0; i < request->n_ssids; i++) { /* wildcard ssid in the scan request */ - if (!request->ssids[i].ssid_len) + if (!request->ssids[i].ssid_len) { + if (ap->multi_bss && !ap->transmitted_bssid) + continue; + return true; + } if (ap->ssid_len && ap->ssid_len == request->ssids[i].ssid_len) { @@ -830,6 +840,9 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev) !cfg80211_find_ssid_match(ap, request)) continue; + if (!request->n_ssids && ap->multi_bss && !ap->transmitted_bssid) + continue; + cfg80211_scan_req_add_chan(request, chan, true); memcpy(scan_6ghz_params->bssid, ap->bssid, ETH_ALEN); scan_6ghz_params->short_ssid = ap->short_ssid; @@ -1590,6 +1603,23 @@ struct cfg80211_non_tx_bss { u8 bssid_index; }; +static void cfg80211_update_hidden_bsses(struct cfg80211_internal_bss *known, + const struct cfg80211_bss_ies *new_ies, + const struct cfg80211_bss_ies *old_ies) +{ + struct cfg80211_internal_bss *bss; + + /* Assign beacon IEs to all sub entries */ + list_for_each_entry(bss, &known->hidden_list, hidden_list) { + const struct cfg80211_bss_ies *ies; + + ies = rcu_access_pointer(bss->pub.beacon_ies); + WARN_ON(ies != old_ies); + + rcu_assign_pointer(bss->pub.beacon_ies, new_ies); + } +} + static bool cfg80211_update_known_bss(struct cfg80211_registered_device *rdev, struct cfg80211_internal_bss *known, @@ -1613,7 +1643,6 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev, kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); } else if (rcu_access_pointer(new->pub.beacon_ies)) { const struct cfg80211_bss_ies *old; - struct cfg80211_internal_bss *bss; if (known->pub.hidden_beacon_bss && !list_empty(&known->hidden_list)) { @@ -1641,16 +1670,9 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev, if (old == rcu_access_pointer(known->pub.ies)) rcu_assign_pointer(known->pub.ies, new->pub.beacon_ies); - /* Assign beacon IEs to all sub entries */ - list_for_each_entry(bss, &known->hidden_list, hidden_list) { - const struct cfg80211_bss_ies *ies; - - ies = rcu_access_pointer(bss->pub.beacon_ies); - WARN_ON(ies != old); - - rcu_assign_pointer(bss->pub.beacon_ies, - new->pub.beacon_ies); - } + cfg80211_update_hidden_bsses(known, + rcu_access_pointer(new->pub.beacon_ies), + old); if (old) kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); @@ -1727,6 +1749,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, new->refcount = 1; INIT_LIST_HEAD(&new->hidden_list); INIT_LIST_HEAD(&new->pub.nontrans_list); + /* we'll set this later if it was non-NULL */ + new->pub.transmitted_bss = NULL; if (rcu_access_pointer(tmp->pub.proberesp_ies)) { hidden = rb_find_bss(rdev, tmp, BSS_CMP_HIDE_ZLEN); @@ -1961,11 +1985,18 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, /* this is a nontransmitting bss, we need to add it to * transmitting bss' list if it is not there */ + spin_lock_bh(&rdev->bss_lock); if (cfg80211_add_nontrans_list(non_tx_data->tx_bss, &res->pub)) { - if (__cfg80211_unlink_bss(rdev, res)) + if (__cfg80211_unlink_bss(rdev, res)) { rdev->bss_generation++; + res = NULL; + } } + spin_unlock_bh(&rdev->bss_lock); + + if (!res) + return NULL; } trace_cfg80211_return_bss(&res->pub); @@ -2084,6 +2115,8 @@ static void cfg80211_parse_mbssid_data(struct wiphy *wiphy, for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, ie, ielen) { if (elem->datalen < 4) continue; + if (elem->data[0] < 1 || (int)elem->data[0] > 8) + continue; for_each_element(sub, elem->data + 1, elem->datalen - 1) { u8 profile_len; @@ -2219,7 +2252,7 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy, size_t new_ie_len; struct cfg80211_bss_ies *new_ies; const struct cfg80211_bss_ies *old; - u8 cpy_len; + size_t cpy_len; lockdep_assert_held(&wiphy_to_rdev(wiphy)->bss_lock); @@ -2286,6 +2319,8 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy, } else { old = rcu_access_pointer(nontrans_bss->beacon_ies); rcu_assign_pointer(nontrans_bss->beacon_ies, new_ies); + cfg80211_update_hidden_bsses(bss_from_pub(nontrans_bss), + new_ies, old); rcu_assign_pointer(nontrans_bss->ies, new_ies); if (old) kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); @@ -2432,10 +2467,15 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, const struct cfg80211_bss_ies *ies1, *ies2; size_t ielen = len - offsetof(struct ieee80211_mgmt, u.probe_resp.variable); - struct cfg80211_non_tx_bss non_tx_data; + struct cfg80211_non_tx_bss non_tx_data = {}; res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt, len, gfp); + + /* don't do any further MBSSID handling for S1G */ + if (ieee80211_is_s1g_beacon(mgmt->frame_control)) + return res; + if (!res || !wiphy->support_mbssid || !cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) return res; diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index 25bf72ee6cad..226397add422 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c @@ -117,7 +117,7 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, if (!pskb_may_pull(skb, 1)) { x25_neigh_put(nb); - return 0; + goto drop; } switch (skb->data[0]) { diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index ca4716b92774..691841dc6d33 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -742,8 +742,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) goto out_unlock; } - err = xp_assign_dev_shared(xs->pool, umem_xs->umem, - dev, qid); + err = xp_assign_dev_shared(xs->pool, umem_xs, dev, + qid); if (err) { xp_destroy(xs->pool); xs->pool = NULL; diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 2ef6f926610e..c347e52f58df 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -198,17 +198,18 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, return __xp_assign_dev(pool, dev, queue_id, flags); } -int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem, +int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, struct net_device *dev, u16 queue_id) { u16 flags; + struct xdp_umem *umem = umem_xs->umem; /* One fill and completion ring required for each queue id. */ if (!pool->fq || !pool->cq) return -EINVAL; flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY; - if (pool->uses_need_wakeup) + if (umem_xs->pool->uses_need_wakeup) flags |= XDP_USE_NEED_WAKEUP; return __xp_assign_dev(pool, dev, queue_id, flags); @@ -318,6 +319,7 @@ static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs) for (i = 0; i < dma_map->dma_pages_cnt; i++) { dma = &dma_map->dma_pages[i]; if (*dma) { + *dma &= ~XSK_NEXT_PG_CONTIG_MASK; dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE, DMA_BIDIRECTIONAL, attrs); *dma = 0; diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c index 1f08ebf7d80c..24ca49ecebea 100644 --- a/net/xfrm/espintcp.c +++ b/net/xfrm/espintcp.c @@ -170,7 +170,7 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb) { struct espintcp_ctx *ctx = espintcp_getctx(sk); - if (skb_queue_len(&ctx->out_queue) >= netdev_max_backlog) + if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog)) return -ENOBUFS; __skb_queue_tail(&ctx->out_queue, skb); diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index c255aac6b816..8b8e957a69c3 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -97,6 +97,18 @@ static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb) } } +static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb) +{ + struct xfrm_offload *xo = xfrm_offload(skb); + __u32 seq = xo->seq.low; + + seq += skb_shinfo(skb)->gso_segs; + if (unlikely(seq < xo->seq.low)) + return true; + + return false; +} + struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again) { int err; @@ -134,7 +146,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur return skb; } - if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) { + if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) || + unlikely(xmit_xfrm_check_overflow(skb)))) { struct sk_buff *segs; /* Packet got rerouted, fixup features and segment it. */ diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 61e6220ddd5a..77e82033ad70 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -782,7 +782,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb, trans = this_cpu_ptr(&xfrm_trans_tasklet); - if (skb_queue_len(&trans->queue) >= netdev_max_backlog) + if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog)) return -ENOBUFS; BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb)); diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c index 0814320472f1..24ac6805275e 100644 --- a/net/xfrm/xfrm_ipcomp.c +++ b/net/xfrm/xfrm_ipcomp.c @@ -212,6 +212,7 @@ static void ipcomp_free_scratches(void) vfree(*per_cpu_ptr(scratches, i)); free_percpu(scratches); + ipcomp_scratches = NULL; } static void * __percpu *ipcomp_alloc_scratches(void) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 9c5811e4ebc4..7b9d21d85d48 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2685,8 +2685,10 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family, *num_xfrms = 0; return 0; } - if (IS_ERR(pols[0])) + if (IS_ERR(pols[0])) { + *num_pols = 0; return PTR_ERR(pols[0]); + } *num_xfrms = pols[0]->xfrm_nr; @@ -2701,6 +2703,7 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family, if (pols[1]) { if (IS_ERR(pols[1])) { xfrm_pols_put(pols, *num_pols); + *num_pols = 0; return PTR_ERR(pols[1]); } (*num_pols)++; @@ -3633,6 +3636,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, if (pols[1]) { if (IS_ERR(pols[1])) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); + xfrm_pol_put(pols[0]); return 0; } pols[1]->curlft.use_time = ktime_get_real_seconds(); diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index c6a4338a0d08..65d009e3b6bb 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -657,7 +657,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff oseq += skb_shinfo(skb)->gso_segs; } - if (unlikely(oseq < replay_esn->oseq)) { + if (unlikely(xo->seq.low < replay_esn->oseq)) { XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi; xo->seq.hi = oseq_hi; replay_esn->oseq_hi = oseq_hi; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 119c34717791..5f4b51e10b59 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1020,7 +1020,8 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, if ((x->sel.family && (x->sel.family != family || !xfrm_selector_match(&x->sel, fl, family))) || - !security_xfrm_state_pol_flow_match(x, pol, fl)) + !security_xfrm_state_pol_flow_match(x, pol, + &fl->u.__fl_common)) return; if (!*best || @@ -1035,7 +1036,8 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, if ((!x->sel.family || (x->sel.family == family && xfrm_selector_match(&x->sel, fl, family))) && - security_xfrm_state_pol_flow_match(x, pol, fl)) + security_xfrm_state_pol_flow_match(x, pol, + &fl->u.__fl_common)) *error = -ESRCH; } } @@ -1554,6 +1556,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, x->km.seq = orig->km.seq; x->replay = orig->replay; x->preplay = orig->preplay; + x->lastused = orig->lastused; return x; @@ -2567,7 +2570,7 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload) int err; if (family == AF_INET && - xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc) + READ_ONCE(xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc)) x->props.flags |= XFRM_STATE_NOPMTUDISC; err = -EPROTONOSUPPORT; diff --git a/samples/crypto/fips140_lab_util.c b/samples/crypto/fips140_lab_util.c index 996839dbd2e3..5f8e9018013a 100644 --- a/samples/crypto/fips140_lab_util.c +++ b/samples/crypto/fips140_lab_util.c @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -45,6 +46,8 @@ * ---------------------------------------------------------------------------*/ #define ARRAY_SIZE(A) (sizeof(A) / sizeof((A)[0])) +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) static void __attribute__((noreturn)) do_die(const char *format, va_list va, int err) @@ -109,6 +112,23 @@ static const char *bytes_to_hex(const uint8_t *bytes, size_t count) return hex; } +static void full_write(int fd, const void *buf, size_t count) +{ + while (count) { + ssize_t ret = write(fd, buf, count); + + if (ret < 0) + die_errno("write failed"); + buf += ret; + count -= ret; + } +} + +enum { + OPT_AMOUNT, + OPT_ITERATIONS, +}; + static void usage(void); /* --------------------------------------------------------------------------- @@ -226,6 +246,68 @@ static int get_req_fd(int alg_fd, const char *alg_name) return req_fd; } +/* --------------------------------------------------------------------------- + * dump_jitterentropy command + * ---------------------------------------------------------------------------*/ + +static void dump_from_jent_fd(int fd, size_t count) +{ + uint8_t buf[AF_ALG_MAX_RNG_REQUEST_SIZE]; + + while (count) { + ssize_t ret; + + memset(buf, 0, sizeof(buf)); + ret = read(fd, buf, MIN(count, sizeof(buf))); + if (ret < 0) + die_errno("error reading from jitterentropy_rng"); + full_write(STDOUT_FILENO, buf, ret); + count -= ret; + } +} + +static int cmd_dump_jitterentropy(int argc, char *argv[]) +{ + static const struct option longopts[] = { + { "amount", required_argument, NULL, OPT_AMOUNT }, + { "iterations", required_argument, NULL, OPT_ITERATIONS }, + { NULL, 0, NULL, 0 }, + }; + size_t amount = 128; + size_t iterations = 1; + size_t i; + int c; + + while ((c = getopt_long(argc, argv, "", longopts, NULL)) != -1) { + switch (c) { + case OPT_AMOUNT: + amount = strtoul(optarg, NULL, 0); + if (amount <= 0 || amount >= ULONG_MAX) + die("invalid argument to --amount"); + break; + case OPT_ITERATIONS: + iterations = strtoul(optarg, NULL, 0); + if (iterations <= 0 || iterations >= ULONG_MAX) + die("invalid argument to --iterations"); + break; + default: + usage(); + return 1; + } + } + + for (i = 0; i < iterations; i++) { + int alg_fd = get_alg_fd("rng", "jitterentropy_rng"); + int req_fd = get_req_fd(alg_fd, "jitterentropy_rng"); + + dump_from_jent_fd(req_fd, amount); + + close(req_fd); + close(alg_fd); + } + return 0; +} + /* --------------------------------------------------------------------------- * show_invalid_inputs command * ---------------------------------------------------------------------------*/ @@ -510,6 +592,7 @@ static const struct command { const char *name; int (*func)(int argc, char *argv[]); } commands[] = { + { "dump_jitterentropy", cmd_dump_jitterentropy }, { "show_invalid_inputs", cmd_show_invalid_inputs }, { "show_module_version", cmd_show_module_version }, { "show_service_indicators", cmd_show_service_indicators }, @@ -519,9 +602,14 @@ static void usage(void) { fprintf(stderr, "Usage:\n" +" fips140_lab_util dump_jitterentropy [OPTION]...\n" " fips140_lab_util show_invalid_inputs\n" " fips140_lab_util show_module_version\n" " fips140_lab_util show_service_indicators [SERVICE]...\n" +"\n" +"Options for dump_jitterentropy:\n" +" --amount=AMOUNT Amount to dump in bytes per iteration (default 128)\n" +" --iterations=COUNT Number of start-up iterations (default 1)\n" ); } diff --git a/samples/ftrace/ftrace-direct-modify.c b/samples/ftrace/ftrace-direct-modify.c index 89e6bf27cd9f..d620f3da086f 100644 --- a/samples/ftrace/ftrace-direct-modify.c +++ b/samples/ftrace/ftrace-direct-modify.c @@ -31,7 +31,7 @@ asm ( " call my_direct_func1\n" " leave\n" " .size my_tramp1, .-my_tramp1\n" -" ret\n" + ASM_RET " .type my_tramp2, @function\n" " .globl my_tramp2\n" " my_tramp2:" @@ -39,7 +39,7 @@ asm ( " movq %rsp, %rbp\n" " call my_direct_func2\n" " leave\n" -" ret\n" + ASM_RET " .size my_tramp2, .-my_tramp2\n" " .popsection\n" ); diff --git a/samples/ftrace/ftrace-direct-too.c b/samples/ftrace/ftrace-direct-too.c index 11b99325f3db..3927cb880d1a 100644 --- a/samples/ftrace/ftrace-direct-too.c +++ b/samples/ftrace/ftrace-direct-too.c @@ -31,7 +31,7 @@ asm ( " popq %rsi\n" " popq %rdi\n" " leave\n" -" ret\n" + ASM_RET " .size my_tramp, .-my_tramp\n" " .popsection\n" ); diff --git a/samples/ftrace/ftrace-direct.c b/samples/ftrace/ftrace-direct.c index 642c50b5f716..1e901bb8d729 100644 --- a/samples/ftrace/ftrace-direct.c +++ b/samples/ftrace/ftrace-direct.c @@ -24,7 +24,7 @@ asm ( " call my_direct_func\n" " popq %rdi\n" " leave\n" -" ret\n" + ASM_RET " .size my_tramp, .-my_tramp\n" " .popsection\n" ); diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 0d6e11820791..25696de8114a 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -179,8 +179,29 @@ echo-cmd = $(if $($(quiet)cmd_$(1)),\ quiet_redirect := silent_redirect := exec >/dev/null; +# Delete the target on interruption +# +# GNU Make automatically deletes the target if it has already been changed by +# the interrupted recipe. So, you can safely stop the build by Ctrl-C (Make +# will delete incomplete targets), and resume it later. +# +# However, this does not work when the stderr is piped to another program, like +# $ make >&2 | tee log +# Make dies with SIGPIPE before cleaning the targets. +# +# To address it, we clean the target in signal traps. +# +# Make deletes the target when it catches SIGHUP, SIGINT, SIGQUIT, SIGTERM. +# So, we cover them, and also SIGPIPE just in case. +# +# Of course, this is unneeded for phony targets. +delete-on-interrupt = \ + $(if $(filter-out $(PHONY), $@), \ + $(foreach sig, HUP INT QUIT TERM PIPE, \ + trap 'rm -f $@; trap - $(sig); kill -s $(sig) $$$$' $(sig);)) + # printing commands -cmd = @set -e; $(echo-cmd) $($(quiet)redirect) $(cmd_$(1)) +cmd = @set -e; $(echo-cmd) $($(quiet)redirect) $(delete-on-interrupt) $(cmd_$(1)) ### # if_changed - execute command if any prerequisite is newer than diff --git a/scripts/Makefile.build b/scripts/Makefile.build index edd7f055e0b5..f90c39a208d4 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -388,7 +388,7 @@ ifeq ($(CONFIG_LTO_CLANG) $(CONFIG_MODVERSIONS),y y) cmd_update_lto_symversions = \ rm -f $@.symversions \ $(foreach n, $(filter-out FORCE,$^), \ - $(if $(wildcard $(n).symversions), \ + $(if $(shell test -s $(n).symversions && echo y), \ ; cat $(n).symversions >> $@.symversions)) else cmd_update_lto_symversions = echo >/dev/null diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index 23d3967786b9..fe327a4532dd 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -52,6 +52,7 @@ KBUILD_CFLAGS += -Wno-format-zero-length KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast) KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access) +KBUILD_CFLAGS += $(call cc-disable-warning, cast-function-type-strict) endif endif diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins index 4aad28480035..36814be80264 100644 --- a/scripts/Makefile.gcc-plugins +++ b/scripts/Makefile.gcc-plugins @@ -6,7 +6,7 @@ gcc-plugin-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) += latent_entropy_plugin.so gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) \ += -DLATENT_ENTROPY_PLUGIN ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY - DISABLE_LATENT_ENTROPY_PLUGIN += -fplugin-arg-latent_entropy_plugin-disable + DISABLE_LATENT_ENTROPY_PLUGIN += -fplugin-arg-latent_entropy_plugin-disable -ULATENT_ENTROPY_PLUGIN endif export DISABLE_LATENT_ENTROPY_PLUGIN diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 9287cdaf7b8d..39e47362df36 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -243,7 +243,9 @@ objtool_args = \ $(if $(or $(CONFIG_GCOV_KERNEL),$(CONFIG_LTO_CLANG)), \ --no-unreachable,) \ $(if $(CONFIG_RETPOLINE), --retpoline,) \ + $(if $(CONFIG_RETHUNK), --rethunk,) \ $(if $(CONFIG_X86_SMAP), --uaccess,) \ + $(if $(CONFIG_SLS), --sls,) \ $(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount,) # Useful for describing the dependency of composite objects diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index 772e73ee455d..9d7c5e48134c 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -93,8 +93,7 @@ obj := $(KBUILD_EXTMOD) src := $(obj) # Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS -include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \ - $(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile) +include $(if $(wildcard $(src)/Kbuild), $(src)/Kbuild, $(src)/Makefile) # modpost option for external modules MODPOST += -e diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan index 9e2092fd5206..7099c603ff0a 100644 --- a/scripts/Makefile.ubsan +++ b/scripts/Makefile.ubsan @@ -8,7 +8,6 @@ ubsan-cflags-$(CONFIG_UBSAN_LOCAL_BOUNDS) += -fsanitize=local-bounds ubsan-cflags-$(CONFIG_UBSAN_SHIFT) += -fsanitize=shift ubsan-cflags-$(CONFIG_UBSAN_DIV_ZERO) += -fsanitize=integer-divide-by-zero ubsan-cflags-$(CONFIG_UBSAN_UNREACHABLE) += -fsanitize=unreachable -ubsan-cflags-$(CONFIG_UBSAN_OBJECT_SIZE) += -fsanitize=object-size ubsan-cflags-$(CONFIG_UBSAN_BOOL) += -fsanitize=bool ubsan-cflags-$(CONFIG_UBSAN_ENUM) += -fsanitize=enum ubsan-cflags-$(CONFIG_UBSAN_TRAP) += -fsanitize-undefined-trap-on-error diff --git a/scripts/adjust_autoksyms.sh b/scripts/adjust_autoksyms.sh index 2b366d945ccb..d8f6f9c63043 100755 --- a/scripts/adjust_autoksyms.sh +++ b/scripts/adjust_autoksyms.sh @@ -34,9 +34,6 @@ case "$KBUILD_VERBOSE" in ;; esac -# We need access to CONFIG_ symbols -. include/config/auto.conf - # Generate a new symbol list file $CONFIG_SHELL $srctree/scripts/gen_autoksyms.sh "$new_ksyms_file" diff --git a/scripts/dummy-tools/dummy-plugin-dir/include/plugin-version.h b/scripts/dummy-tools/dummy-plugin-dir/include/plugin-version.h new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/scripts/dummy-tools/gcc b/scripts/dummy-tools/gcc index 0d0589cf8184..346757a87dbc 100755 --- a/scripts/dummy-tools/gcc +++ b/scripts/dummy-tools/gcc @@ -77,12 +77,8 @@ fi # To set GCC_PLUGINS if arg_contain -print-file-name=plugin "$@"; then - plugin_dir=$(mktemp -d) - - mkdir -p $plugin_dir/include - touch $plugin_dir/include/plugin-version.h - - echo $plugin_dir + # Use $0 to find the in-tree dummy directory + echo "$(dirname "$(readlink -f "$0")")/dummy-plugin-dir" exit 0 fi diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c index 3034d991ba55..71b298023022 100644 --- a/scripts/extract-cert.c +++ b/scripts/extract-cert.c @@ -23,6 +23,13 @@ #include #include +/* + * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API. + * + * Remove this if/when that API is no longer used + */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + #define PKEY_ID_PKCS7 2 static __attribute__((noreturn)) diff --git a/scripts/faddr2line b/scripts/faddr2line index 6c6439f69a72..9e730b805e87 100755 --- a/scripts/faddr2line +++ b/scripts/faddr2line @@ -44,17 +44,6 @@ set -o errexit set -o nounset -READELF="${CROSS_COMPILE:-}readelf" -ADDR2LINE="${CROSS_COMPILE:-}addr2line" -SIZE="${CROSS_COMPILE:-}size" -NM="${CROSS_COMPILE:-}nm" - -command -v awk >/dev/null 2>&1 || die "awk isn't installed" -command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed" -command -v ${ADDR2LINE} >/dev/null 2>&1 || die "addr2line isn't installed" -command -v ${SIZE} >/dev/null 2>&1 || die "size isn't installed" -command -v ${NM} >/dev/null 2>&1 || die "nm isn't installed" - usage() { echo "usage: faddr2line [--list] ..." >&2 exit 1 @@ -69,6 +58,14 @@ die() { exit 1 } +READELF="${CROSS_COMPILE:-}readelf" +ADDR2LINE="${CROSS_COMPILE:-}addr2line" +AWK="awk" + +command -v ${AWK} >/dev/null 2>&1 || die "${AWK} isn't installed" +command -v ${READELF} >/dev/null 2>&1 || die "${READELF} isn't installed" +command -v ${ADDR2LINE} >/dev/null 2>&1 || die "${ADDR2LINE} isn't installed" + # Try to figure out the source directory prefix so we can remove it from the # addr2line output. HACK ALERT: This assumes that start_kernel() is in # init/main.c! This only works for vmlinux. Otherwise it falls back to @@ -76,7 +73,8 @@ die() { find_dir_prefix() { local objfile=$1 - local start_kernel_addr=$(${READELF} -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}') + local start_kernel_addr=$(${READELF} --symbols --wide $objfile | sed 's/\[.*\]//' | + ${AWK} '$8 == "start_kernel" {printf "0x%s", $2}') [[ -z $start_kernel_addr ]] && return local file_line=$(${ADDR2LINE} -e $objfile $start_kernel_addr) @@ -97,86 +95,158 @@ __faddr2line() { local dir_prefix=$3 local print_warnings=$4 - local func=${func_addr%+*} - local offset=${func_addr#*+} - offset=${offset%/*} - local size= - [[ $func_addr =~ "/" ]] && size=${func_addr#*/} + local sym_name=${func_addr%+*} + local func_offset=${func_addr#*+} + func_offset=${func_offset%/*} + local user_size= + local file_type + local is_vmlinux=0 + [[ $func_addr =~ "/" ]] && user_size=${func_addr#*/} - if [[ -z $func ]] || [[ -z $offset ]] || [[ $func = $func_addr ]]; then + if [[ -z $sym_name ]] || [[ -z $func_offset ]] || [[ $sym_name = $func_addr ]]; then warn "bad func+offset $func_addr" DONE=1 return fi - # Go through each of the object's symbols which match the func name. - # In rare cases there might be duplicates. - file_end=$(${SIZE} -Ax $objfile | awk '$1 == ".text" {print $2}') - while read symbol; do - local fields=($symbol) - local sym_base=0x${fields[0]} - local sym_type=${fields[1]} - local sym_end=${fields[3]} + # vmlinux uses absolute addresses in the section table rather than + # section offsets. + local file_type=$(${READELF} --file-header $objfile | + ${AWK} '$1 == "Type:" { print $2; exit }') + if [[ $file_type = "EXEC" ]] || [[ $file_type == "DYN" ]]; then + is_vmlinux=1 + fi - # calculate the size - local sym_size=$(($sym_end - $sym_base)) - if [[ -z $sym_size ]] || [[ $sym_size -le 0 ]]; then - warn "bad symbol size: base: $sym_base end: $sym_end" + # Go through each of the object's symbols which match the func name. + # In rare cases there might be duplicates, in which case we print all + # matches. + while read line; do + local fields=($line) + local sym_addr=0x${fields[1]} + local sym_elf_size=${fields[2]} + local sym_sec=${fields[6]} + local sec_size + local sec_name + + # Get the section size: + sec_size=$(${READELF} --section-headers --wide $objfile | + sed 's/\[ /\[/' | + ${AWK} -v sec=$sym_sec '$1 == "[" sec "]" { print "0x" $6; exit }') + + if [[ -z $sec_size ]]; then + warn "bad section size: section: $sym_sec" DONE=1 return fi + + # Get the section name: + sec_name=$(${READELF} --section-headers --wide $objfile | + sed 's/\[ /\[/' | + ${AWK} -v sec=$sym_sec '$1 == "[" sec "]" { print $2; exit }') + + if [[ -z $sec_name ]]; then + warn "bad section name: section: $sym_sec" + DONE=1 + return + fi + + # Calculate the symbol size. + # + # Unfortunately we can't use the ELF size, because kallsyms + # also includes the padding bytes in its size calculation. For + # kallsyms, the size calculation is the distance between the + # symbol and the next symbol in a sorted list. + local sym_size + local cur_sym_addr + local found=0 + while read line; do + local fields=($line) + cur_sym_addr=0x${fields[1]} + local cur_sym_elf_size=${fields[2]} + local cur_sym_name=${fields[7]:-} + + if [[ $cur_sym_addr = $sym_addr ]] && + [[ $cur_sym_elf_size = $sym_elf_size ]] && + [[ $cur_sym_name = $sym_name ]]; then + found=1 + continue + fi + + if [[ $found = 1 ]]; then + sym_size=$(($cur_sym_addr - $sym_addr)) + [[ $sym_size -lt $sym_elf_size ]] && continue; + found=2 + break + fi + done < <(${READELF} --symbols --wide $objfile | sed 's/\[.*\]//' | ${AWK} -v sec=$sym_sec '$7 == sec' | sort --key=2) + + if [[ $found = 0 ]]; then + warn "can't find symbol: sym_name: $sym_name sym_sec: $sym_sec sym_addr: $sym_addr sym_elf_size: $sym_elf_size" + DONE=1 + return + fi + + # If nothing was found after the symbol, assume it's the last + # symbol in the section. + [[ $found = 1 ]] && sym_size=$(($sec_size - $sym_addr)) + + if [[ -z $sym_size ]] || [[ $sym_size -le 0 ]]; then + warn "bad symbol size: sym_addr: $sym_addr cur_sym_addr: $cur_sym_addr" + DONE=1 + return + fi + sym_size=0x$(printf %x $sym_size) - # calculate the address - local addr=$(($sym_base + $offset)) + # Calculate the address from user-supplied offset: + local addr=$(($sym_addr + $func_offset)) if [[ -z $addr ]] || [[ $addr = 0 ]]; then - warn "bad address: $sym_base + $offset" + warn "bad address: $sym_addr + $func_offset" DONE=1 return fi addr=0x$(printf %x $addr) - # weed out non-function symbols - if [[ $sym_type != t ]] && [[ $sym_type != T ]]; then + # If the user provided a size, make sure it matches the symbol's size: + if [[ -n $user_size ]] && [[ $user_size -ne $sym_size ]]; then [[ $print_warnings = 1 ]] && - echo "skipping $func address at $addr due to non-function symbol of type '$sym_type'" - continue - fi - - # if the user provided a size, make sure it matches the symbol's size - if [[ -n $size ]] && [[ $size -ne $sym_size ]]; then - [[ $print_warnings = 1 ]] && - echo "skipping $func address at $addr due to size mismatch ($size != $sym_size)" + echo "skipping $sym_name address at $addr due to size mismatch ($user_size != $sym_size)" continue; fi - # make sure the provided offset is within the symbol's range - if [[ $offset -gt $sym_size ]]; then + # Make sure the provided offset is within the symbol's range: + if [[ $func_offset -gt $sym_size ]]; then [[ $print_warnings = 1 ]] && - echo "skipping $func address at $addr due to size mismatch ($offset > $sym_size)" + echo "skipping $sym_name address at $addr due to size mismatch ($func_offset > $sym_size)" continue fi - # separate multiple entries with a blank line + # In case of duplicates or multiple addresses specified on the + # cmdline, separate multiple entries with a blank line: [[ $FIRST = 0 ]] && echo FIRST=0 - # pass real address to addr2line - echo "$func+$offset/$sym_size:" - local file_lines=$(${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;") - [[ -z $file_lines ]] && return + echo "$sym_name+$func_offset/$sym_size:" + # Pass section address to addr2line and strip absolute paths + # from the output: + local args="--functions --pretty-print --inlines --exe=$objfile" + [[ $is_vmlinux = 0 ]] && args="$args --section=$sec_name" + local output=$(${ADDR2LINE} $args $addr | sed "s; $dir_prefix\(\./\)*; ;") + [[ -z $output ]] && continue + + # Default output (non --list): if [[ $LIST = 0 ]]; then - echo "$file_lines" | while read -r line + echo "$output" | while read -r line do echo $line done DONE=1; - return + continue fi - # show each line with context - echo "$file_lines" | while read -r line + # For --list, show each line with its corresponding source code: + echo "$output" | while read -r line do echo echo $line @@ -184,12 +254,12 @@ __faddr2line() { n1=$[$n-5] n2=$[$n+5] f=$(echo $line | sed 's/.*at \(.\+\):.*/\1/g') - awk 'NR>=strtonum("'$n1'") && NR<=strtonum("'$n2'") { if (NR=='$n') printf(">%d<", NR); else printf(" %d ", NR); printf("\t%s\n", $0)}' $f + ${AWK} 'NR>=strtonum("'$n1'") && NR<=strtonum("'$n2'") { if (NR=='$n') printf(">%d<", NR); else printf(" %d ", NR); printf("\t%s\n", $0)}' $f done DONE=1 - done < <(${NM} -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }') + done < <(${READELF} --symbols --wide $objfile | sed 's/\[.*\]//' | ${AWK} -v fn=$sym_name '$4 == "FUNC" && $8 == fn') } [[ $# -lt 2 ]] && usage diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c index cbe1d6c4b1a5..c84bef1d2895 100644 --- a/scripts/gcc-plugins/latent_entropy_plugin.c +++ b/scripts/gcc-plugins/latent_entropy_plugin.c @@ -86,25 +86,31 @@ static struct plugin_info latent_entropy_plugin_info = { .help = "disable\tturn off latent entropy instrumentation\n", }; -static unsigned HOST_WIDE_INT seed; -/* - * get_random_seed() (this is a GCC function) generates the seed. - * This is a simple random generator without any cryptographic security because - * the entropy doesn't come from here. - */ +static unsigned HOST_WIDE_INT deterministic_seed; +static unsigned HOST_WIDE_INT rnd_buf[32]; +static size_t rnd_idx = ARRAY_SIZE(rnd_buf); +static int urandom_fd = -1; + static unsigned HOST_WIDE_INT get_random_const(void) { - unsigned int i; - unsigned HOST_WIDE_INT ret = 0; - - for (i = 0; i < 8 * sizeof(ret); i++) { - ret = (ret << 1) | (seed & 1); - seed >>= 1; - if (ret & 1) - seed ^= 0xD800000000000000ULL; + if (deterministic_seed) { + unsigned HOST_WIDE_INT w = deterministic_seed; + w ^= w << 13; + w ^= w >> 7; + w ^= w << 17; + deterministic_seed = w; + return deterministic_seed; } - return ret; + if (urandom_fd < 0) { + urandom_fd = open("/dev/urandom", O_RDONLY); + gcc_assert(urandom_fd >= 0); + } + if (rnd_idx >= ARRAY_SIZE(rnd_buf)) { + gcc_assert(read(urandom_fd, rnd_buf, sizeof(rnd_buf)) == sizeof(rnd_buf)); + rnd_idx = 0; + } + return rnd_buf[rnd_idx++]; } static tree tree_get_random_const(tree type) @@ -549,8 +555,6 @@ static void latent_entropy_start_unit(void *gcc_data __unused, tree type, id; int quals; - seed = get_random_seed(false); - if (in_lto_p) return; @@ -585,6 +589,12 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, const struct plugin_argument * const argv = plugin_info->argv; int i; + /* + * Call get_random_seed() with noinit=true, so that this returns + * 0 in the case where no seed has been passed via -frandom-seed. + */ + deterministic_seed = get_random_seed(true); + static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = { { .base = &latent_entropy_decl, diff --git a/scripts/gdb/linux/config.py b/scripts/gdb/linux/config.py index 90e1565b1967..8843ab3cbadd 100644 --- a/scripts/gdb/linux/config.py +++ b/scripts/gdb/linux/config.py @@ -24,9 +24,9 @@ class LxConfigDump(gdb.Command): filename = arg try: - py_config_ptr = gdb.parse_and_eval("kernel_config_data + 8") - py_config_size = gdb.parse_and_eval( - "sizeof(kernel_config_data) - 1 - 8 * 2") + py_config_ptr = gdb.parse_and_eval("&kernel_config_data") + py_config_ptr_end = gdb.parse_and_eval("&kernel_config_data_end") + py_config_size = py_config_ptr_end - py_config_ptr except gdb.error as e: raise gdb.GdbError("Can't find config, enable CONFIG_IKCONFIG?") diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 6325bec3f66f..19af6dd160e6 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -1215,6 +1215,13 @@ sub dump_struct($$) { $members =~ s/\s*CRYPTO_MINALIGN_ATTR/ /gos; $members =~ s/\s*____cacheline_aligned_in_smp/ /gos; $members =~ s/\s*____cacheline_aligned/ /gos; + # unwrap struct_group(): + # - first eat non-declaration parameters and rewrite for final match + # - then remove macro, outer parens, and trailing semicolon + $members =~ s/\bstruct_group\s*\(([^,]*,)/STRUCT_GROUP(/gos; + $members =~ s/\bstruct_group_(attr|tagged)\s*\(([^,]*,){2}/STRUCT_GROUP(/gos; + $members =~ s/\b__struct_group\s*\(([^,]*,){3}/STRUCT_GROUP(/gos; + $members =~ s/\bSTRUCT_GROUP(\(((?:(?>[^)(]+)|(?1))*)\))[^;]*;/$2/gos; # replace DECLARE_BITMAP $members =~ s/__ETHTOOL_DECLARE_LINK_MODE_MASK\s*\(([^\)]+)\)/DECLARE_BITMAP($1, __ETHTOOL_LINK_MODE_MASK_NBITS)/gos; diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 6ae0d70089b4..2631ab4ce9fd 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -129,6 +129,9 @@ objtool_link() objtoolcmd="check" fi objtoolopt="${objtoolopt} --vmlinux" + if [ -n "${CONFIG_CPU_UNRET_ENTRY}" ]; then + objtoolopt="${objtoolopt} --unret" + fi if [ -z "${CONFIG_FRAME_POINTER}" ]; then objtoolopt="${objtoolopt} --no-fp" fi @@ -141,6 +144,9 @@ objtool_link() if [ -n "${CONFIG_X86_SMAP}" ]; then objtoolopt="${objtoolopt} --uaccess" fi + if [ -n "${CONFIG_SLS}" ]; then + objtoolopt="${objtoolopt} --sls" + fi info OBJTOOL ${1} tools/objtool/objtool ${objtoolcmd} ${objtoolopt} ${1} fi @@ -228,7 +234,7 @@ gen_btf() vmlinux_link ${1} info "BTF" ${2} - LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${1} + LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${PAHOLE_FLAGS} ${1} # Create ${2} which contains just .BTF section but no symbols. Add # SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all diff --git a/scripts/mksysmap b/scripts/mksysmap index 9aa23d15862a..ad8bbc52267d 100755 --- a/scripts/mksysmap +++ b/scripts/mksysmap @@ -41,4 +41,4 @@ # so we just ignore them to let readprofile continue to work. # (At least sparc64 has __crc_ in the middle). -$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)' > $2 +$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)\|\( L0\)' > $2 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 9c8c7511d10d..0cb8de405183 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -1115,7 +1115,7 @@ static const struct sectioncheck sectioncheck[] = { }, /* Do not export init/exit functions or data */ { - .fromsec = { "__ksymtab*", NULL }, + .fromsec = { "___ksymtab*", NULL }, .bad_tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL }, .mismatch = EXPORT_TO_INIT_EXIT, .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL }, @@ -1267,7 +1267,8 @@ static int secref_whitelist(const struct sectioncheck *mismatch, static inline int is_arm_mapping_symbol(const char *str) { - return str[0] == '$' && strchr("axtd", str[1]) + return str[0] == '$' && + (str[1] == 'a' || str[1] == 'd' || str[1] == 't' || str[1] == 'x') && (str[2] == '\0' || str[2] == '.'); } @@ -1978,7 +1979,7 @@ static char *remove_dot(char *s) if (n && s[n]) { size_t m = strspn(s + n + 1, "0123456789"); - if (m && (s[n + m] == '.' || s[n + m] == 0)) + if (m && (s[n + m + 1] == '.' || s[n + m + 1] == 0)) s[n] = 0; /* strip trailing .lto */ diff --git a/scripts/module.lds.S b/scripts/module.lds.S index 6cca5d88744e..6b7cfbb0192a 100644 --- a/scripts/module.lds.S +++ b/scripts/module.lds.S @@ -29,6 +29,8 @@ SECTIONS { .init_array 0 : ALIGN(8) { *(SORT(.init_array.*)) *(.init_array) } + .altinstructions 0 : ALIGN(8) { KEEP(*(.altinstructions)) } + __bug_table 0 : ALIGN(8) { KEEP(*(__bug_table)) } __jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) } __patchable_function_entries : { *(__patchable_function_entries) } diff --git a/scripts/package/mkspec b/scripts/package/mkspec index 7c477ca7dc98..951cc60e5a90 100755 --- a/scripts/package/mkspec +++ b/scripts/package/mkspec @@ -85,10 +85,10 @@ $S mkdir -p %{buildroot}/boot %ifarch ia64 mkdir -p %{buildroot}/boot/efi - cp \$($MAKE image_name) %{buildroot}/boot/efi/vmlinuz-$KERNELRELEASE + cp \$($MAKE -s image_name) %{buildroot}/boot/efi/vmlinuz-$KERNELRELEASE ln -s efi/vmlinuz-$KERNELRELEASE %{buildroot}/boot/ %else - cp \$($MAKE image_name) %{buildroot}/boot/vmlinuz-$KERNELRELEASE + cp \$($MAKE -s image_name) %{buildroot}/boot/vmlinuz-$KERNELRELEASE %endif $M $MAKE %{?_smp_mflags} INSTALL_MOD_PATH=%{buildroot} modules_install $MAKE %{?_smp_mflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install diff --git a/scripts/pahole-flags.sh b/scripts/pahole-flags.sh new file mode 100755 index 000000000000..8c82173e42e5 --- /dev/null +++ b/scripts/pahole-flags.sh @@ -0,0 +1,21 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +extra_paholeopt= + +if ! [ -x "$(command -v ${PAHOLE})" ]; then + exit 0 +fi + +pahole_ver=$(${PAHOLE} --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/') + +if [ "${pahole_ver}" -ge "118" ] && [ "${pahole_ver}" -le "121" ]; then + # pahole 1.18 through 1.21 can't handle zero-sized per-CPU vars + extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_vars" +fi + +if [ "${pahole_ver}" -ge "124" ]; then + extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_enum64" +fi + +echo ${extra_paholeopt} diff --git a/scripts/selinux/install_policy.sh b/scripts/selinux/install_policy.sh index 2dccf141241d..20af56ce245c 100755 --- a/scripts/selinux/install_policy.sh +++ b/scripts/selinux/install_policy.sh @@ -78,7 +78,7 @@ cd /etc/selinux/dummy/contexts/files $SF -F file_contexts / mounts=`cat /proc/$$/mounts | \ - egrep "ext[234]|jfs|xfs|reiserfs|jffs2|gfs2|btrfs|f2fs|ocfs2" | \ + grep -E "ext[234]|jfs|xfs|reiserfs|jffs2|gfs2|btrfs|f2fs|ocfs2" | \ awk '{ print $2 '}` $SF -F file_contexts $mounts diff --git a/scripts/sign-file.c b/scripts/sign-file.c index fbd34b8e8f57..7434e9ea926e 100644 --- a/scripts/sign-file.c +++ b/scripts/sign-file.c @@ -29,6 +29,13 @@ #include #include +/* + * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API. + * + * Remove this if/when that API is no longer used + */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + /* * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to * assume that it's not available and its header file is missing and that we diff --git a/security/Kconfig b/security/Kconfig index 0548db16c49d..9893c316da89 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -54,17 +54,6 @@ config SECURITY_NETWORK implement socket and networking access controls. If you are unsure how to answer this question, answer N. -config PAGE_TABLE_ISOLATION - bool "Remove the kernel mapping in user mode" - default y - depends on (X86_64 || X86_PAE) && !UML - help - This feature reduces the number of hardware side channels by - ensuring that the majority of kernel addresses are not mapped - into userspace. - - See Documentation/x86/pti.rst for more details. - config SECURITY_INFINIBAND bool "Infiniband Security Hooks" depends on SECURITY && INFINIBAND diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening index a56c36470cb1..a69055b32cac 100644 --- a/security/Kconfig.hardening +++ b/security/Kconfig.hardening @@ -22,13 +22,23 @@ menu "Memory initialization" config CC_HAS_AUTO_VAR_INIT_PATTERN def_bool $(cc-option,-ftrivial-auto-var-init=pattern) -config CC_HAS_AUTO_VAR_INIT_ZERO +config CC_HAS_AUTO_VAR_INIT_ZERO_BARE + def_bool $(cc-option,-ftrivial-auto-var-init=zero) + +config CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER + # Clang 16 and later warn about using the -enable flag, but it + # is required before then. def_bool $(cc-option,-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang) + depends on !CC_HAS_AUTO_VAR_INIT_ZERO_BARE + +config CC_HAS_AUTO_VAR_INIT_ZERO + def_bool CC_HAS_AUTO_VAR_INIT_ZERO_BARE || CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER choice prompt "Initialize kernel stack variables at function entry" default GCC_PLUGIN_STRUCTLEAK_BYREF_ALL if COMPILE_TEST && GCC_PLUGINS default INIT_STACK_ALL_PATTERN if COMPILE_TEST && CC_HAS_AUTO_VAR_INIT_PATTERN + default INIT_STACK_ALL_ZERO if CC_HAS_AUTO_VAR_INIT_ZERO default INIT_STACK_NONE help This option enables initialization of stack variables at @@ -39,11 +49,11 @@ choice syscalls. This chooses the level of coverage over classes of potentially - uninitialized variables. The selected class will be + uninitialized variables. The selected class of variable will be initialized before use in a function. config INIT_STACK_NONE - bool "no automatic initialization (weakest)" + bool "no automatic stack variable initialization (weakest)" help Disable automatic stack variable initialization. This leaves the kernel vulnerable to the standard @@ -80,7 +90,7 @@ choice and is disallowed. config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL - bool "zero-init anything passed by reference (very strong)" + bool "zero-init everything passed by reference (very strong)" depends on GCC_PLUGINS depends on !(KASAN && KASAN_STACK) select GCC_PLUGIN_STRUCTLEAK @@ -91,33 +101,44 @@ choice of uninitialized stack variable exploits and information exposures. + As a side-effect, this keeps a lot of variables on the + stack that can otherwise be optimized out, so combining + this with CONFIG_KASAN_STACK can lead to a stack overflow + and is disallowed. + config INIT_STACK_ALL_PATTERN - bool "0xAA-init everything on the stack (strongest)" + bool "pattern-init everything (strongest)" depends on CC_HAS_AUTO_VAR_INIT_PATTERN help - Initializes everything on the stack with a 0xAA - pattern. This is intended to eliminate all classes - of uninitialized stack variable exploits and information - exposures, even variables that were warned to have been - left uninitialized. + Initializes everything on the stack (including padding) + with a specific debug value. This is intended to eliminate + all classes of uninitialized stack variable exploits and + information exposures, even variables that were warned about + having been left uninitialized. Pattern initialization is known to provoke many existing bugs related to uninitialized locals, e.g. pointers receive - non-NULL values, buffer sizes and indices are very big. + non-NULL values, buffer sizes and indices are very big. The + pattern is situation-specific; Clang on 64-bit uses 0xAA + repeating for all types and padding except float and double + which use 0xFF repeating (-NaN). Clang on 32-bit uses 0xFF + repeating for all types and padding. config INIT_STACK_ALL_ZERO - bool "zero-init everything on the stack (strongest and safest)" + bool "zero-init everything (strongest and safest)" depends on CC_HAS_AUTO_VAR_INIT_ZERO help - Initializes everything on the stack with a zero - value. This is intended to eliminate all classes - of uninitialized stack variable exploits and information - exposures, even variables that were warned to have been - left uninitialized. + Initializes everything on the stack (including padding) + with a zero value. This is intended to eliminate all + classes of uninitialized stack variable exploits and + information exposures, even variables that were warned + about having been left uninitialized. - Zero initialization provides safe defaults for strings, - pointers, indices and sizes, and is therefore - more suitable as a security mitigation measure. + Zero initialization provides safe defaults for strings + (immediately NUL-terminated), pointers (NULL), indices + (index 0), and sizes (0 length), so it is therefore more + suitable as a production security mitigation than pattern + initialization. endchoice diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index 5fd4a64e431f..c173f6fd7aee 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -401,7 +401,7 @@ static struct aa_loaddata *aa_simple_write_to_buffer(const char __user *userbuf, data->size = copy_size; if (copy_from_user(data->data, userbuf, copy_size)) { - kvfree(data); + aa_put_loaddata(data); return ERR_PTR(-EFAULT); } diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c index f7e97c7e80f3..704b0c895605 100644 --- a/security/apparmor/audit.c +++ b/security/apparmor/audit.c @@ -137,7 +137,7 @@ int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa, } if (AUDIT_MODE(profile) == AUDIT_QUIET || (type == AUDIT_APPARMOR_DENIED && - AUDIT_MODE(profile) == AUDIT_QUIET)) + AUDIT_MODE(profile) == AUDIT_QUIET_DENIED)) return aad(sa)->error; if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED) diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index f919ebd042fd..87a9e6fd7908 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -465,7 +465,7 @@ restart: * xattrs, or a longer match */ candidate = profile; - candidate_len = profile->xmatch_len; + candidate_len = max(count, profile->xmatch_len); candidate_xattrs = ret; conflict = false; } diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h index 7d27db740bc2..ac5054899f6f 100644 --- a/security/apparmor/include/lib.h +++ b/security/apparmor/include/lib.h @@ -22,6 +22,11 @@ */ #define DEBUG_ON (aa_g_debug) +/* + * split individual debug cases out in preparation for finer grained + * debug controls in the future. + */ +#define AA_DEBUG_LABEL DEBUG_ON #define dbg_printk(__fmt, __args...) pr_debug(__fmt, ##__args) #define AA_DEBUG(fmt, args...) \ do { \ diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h index b5b4b8190e65..b5aa4231af68 100644 --- a/security/apparmor/include/policy.h +++ b/security/apparmor/include/policy.h @@ -135,7 +135,7 @@ struct aa_profile { const char *attach; struct aa_dfa *xmatch; - int xmatch_len; + unsigned int xmatch_len; enum audit_mode audit; long mode; u32 path_flags; diff --git a/security/apparmor/label.c b/security/apparmor/label.c index 6222fdfebe4e..66bc4704f804 100644 --- a/security/apparmor/label.c +++ b/security/apparmor/label.c @@ -1632,9 +1632,9 @@ int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns, AA_BUG(!str && size != 0); AA_BUG(!label); - if (flags & FLAG_ABS_ROOT) { + if (AA_DEBUG_LABEL && (flags & FLAG_ABS_ROOT)) { ns = root_ns; - len = snprintf(str, size, "="); + len = snprintf(str, size, "_"); update_for_len(total, len, size, str); } else if (!ns) { ns = labels_ns(label); @@ -1745,7 +1745,7 @@ void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns, if (!use_label_hname(ns, label, flags) || display_mode(ns, label, flags)) { len = aa_label_asxprint(&name, ns, label, flags, gfp); - if (len == -1) { + if (len < 0) { AA_DEBUG("label print error"); return; } @@ -1773,7 +1773,7 @@ void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns, int len; len = aa_label_asxprint(&str, ns, label, flags, gfp); - if (len == -1) { + if (len < 0) { AA_DEBUG("label print error"); return; } @@ -1796,7 +1796,7 @@ void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags, int len; len = aa_label_asxprint(&str, ns, label, flags, gfp); - if (len == -1) { + if (len < 0) { AA_DEBUG("label print error"); return; } @@ -1896,7 +1896,8 @@ struct aa_label *aa_label_strn_parse(struct aa_label *base, const char *str, AA_BUG(!str); str = skipn_spaces(str, n); - if (str == NULL || (*str == '=' && base != &root_ns->unconfined->label)) + if (str == NULL || (AA_DEBUG_LABEL && *str == '_' && + base != &root_ns->unconfined->label)) return ERR_PTR(-EINVAL); len = label_count_strn_entries(str, end - str); diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c index e0828ee7a345..e64f76d347d6 100644 --- a/security/apparmor/mount.c +++ b/security/apparmor/mount.c @@ -229,7 +229,8 @@ static const char * const mnt_info_table[] = { "failed srcname match", "failed type match", "failed flags match", - "failed data match" + "failed data match", + "failed perms check" }; /* @@ -284,8 +285,8 @@ static int do_match_mnt(struct aa_dfa *dfa, unsigned int start, return 0; } - /* failed at end of flags match */ - return 4; + /* failed at perms check, don't confuse with flags match */ + return 6; } @@ -718,6 +719,7 @@ int aa_pivotroot(struct aa_label *label, const struct path *old_path, aa_put_label(target); goto out; } + aa_put_label(target); } else /* already audited error */ error = PTR_ERR(target); diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index dc345ac93205..556ef65ab6ee 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c @@ -746,16 +746,18 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) profile->label.flags |= FLAG_HAT; if (!unpack_u32(e, &tmp, NULL)) goto fail; - if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) + if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) { profile->mode = APPARMOR_COMPLAIN; - else if (tmp == PACKED_MODE_ENFORCE) + } else if (tmp == PACKED_MODE_ENFORCE) { profile->mode = APPARMOR_ENFORCE; - else if (tmp == PACKED_MODE_KILL) + } else if (tmp == PACKED_MODE_KILL) { profile->mode = APPARMOR_KILL; - else if (tmp == PACKED_MODE_UNCONFINED) + } else if (tmp == PACKED_MODE_UNCONFINED) { profile->mode = APPARMOR_UNCONFINED; - else + profile->label.flags |= FLAG_UNCONFINED; + } else { goto fail; + } if (!unpack_u32(e, &tmp, NULL)) goto fail; if (tmp) diff --git a/security/commoncap.c b/security/commoncap.c index 4bdb44c9e698..09b402c88f46 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -392,8 +392,10 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer, &tmpbuf, size, GFP_NOFS); dput(dentry); - if (ret < 0 || !tmpbuf) - return ret; + if (ret < 0 || !tmpbuf) { + size = ret; + goto out_free; + } fs_ns = inode->i_sb->s_user_ns; cap = (struct vfs_cap_data *) tmpbuf; diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index a6dd47eb086d..168c3b78ac47 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c @@ -73,7 +73,7 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo) { long rc; const char *algo; - struct crypto_shash **tfm, *tmp_tfm = NULL; + struct crypto_shash **tfm, *tmp_tfm; struct shash_desc *desc; if (type == EVM_XATTR_HMAC) { @@ -118,16 +118,13 @@ unlock: alloc: desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm), GFP_KERNEL); - if (!desc) { - crypto_free_shash(tmp_tfm); + if (!desc) return ERR_PTR(-ENOMEM); - } desc->tfm = *tfm; rc = crypto_shash_init(desc); if (rc) { - crypto_free_shash(tmp_tfm); kfree(desc); return ERR_PTR(rc); } diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig index 9e72edb8d31a..755af0b29e75 100644 --- a/security/integrity/ima/Kconfig +++ b/security/integrity/ima/Kconfig @@ -69,10 +69,9 @@ choice hash, defined as 20 bytes, and a null terminated pathname, limited to 255 characters. The 'ima-ng' measurement list template permits both larger hash digests and longer - pathnames. + pathnames. The configured default template can be replaced + by specifying "ima_template=" on the boot command line. - config IMA_TEMPLATE - bool "ima" config IMA_NG_TEMPLATE bool "ima-ng (default)" config IMA_SIG_TEMPLATE @@ -82,7 +81,6 @@ endchoice config IMA_DEFAULT_TEMPLATE string depends on IMA - default "ima" if IMA_TEMPLATE default "ima-ng" if IMA_NG_TEMPLATE default "ima-sig" if IMA_SIG_TEMPLATE @@ -102,19 +100,19 @@ choice config IMA_DEFAULT_HASH_SHA256 bool "SHA256" - depends on CRYPTO_SHA256=y && !IMA_TEMPLATE + depends on CRYPTO_SHA256=y config IMA_DEFAULT_HASH_SHA512 bool "SHA512" - depends on CRYPTO_SHA512=y && !IMA_TEMPLATE + depends on CRYPTO_SHA512=y config IMA_DEFAULT_HASH_WP512 bool "WP512" - depends on CRYPTO_WP512=y && !IMA_TEMPLATE + depends on CRYPTO_WP512=y config IMA_DEFAULT_HASH_SM3 bool "SM3" - depends on CRYPTO_SM3=y && !IMA_TEMPLATE + depends on CRYPTO_SM3=y endchoice config IMA_DEFAULT_HASH diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 3dd8c2e4314e..7122a359a268 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -396,7 +396,8 @@ int ima_appraise_measurement(enum ima_hooks func, goto out; } - status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint); + status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, + rc < 0 ? 0 : rc, iint); switch (status) { case INTEGRITY_PASS: case INTEGRITY_PASS_IMMUTABLE: diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c index f6a7e9643b54..b1e5e7749e41 100644 --- a/security/integrity/ima/ima_crypto.c +++ b/security/integrity/ima/ima_crypto.c @@ -205,6 +205,7 @@ out_array: crypto_free_shash(ima_algo_array[i].tfm); } + kfree(ima_algo_array); out: crypto_free_shash(ima_shash_tfm); return rc; diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index e737c216efc4..18569adcb4fe 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -1805,6 +1805,10 @@ bool ima_appraise_signature(enum kernel_read_file_id id) if (id >= READING_MAX_ID) return false; + if (id == READING_KEXEC_IMAGE && !(ima_appraise & IMA_APPRAISE_ENFORCE) + && security_locked_down(LOCKDOWN_KEXEC)) + return false; + func = read_idmap[id] ?: FILE_CHECK; rcu_read_lock(); diff --git a/security/integrity/platform_certs/keyring_handler.h b/security/integrity/platform_certs/keyring_handler.h index 2462bfa08fe3..cd06bd6072be 100644 --- a/security/integrity/platform_certs/keyring_handler.h +++ b/security/integrity/platform_certs/keyring_handler.h @@ -30,3 +30,11 @@ efi_element_handler_t get_handler_for_db(const efi_guid_t *sig_type); efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type); #endif + +#ifndef UEFI_QUIRK_SKIP_CERT +#define UEFI_QUIRK_SKIP_CERT(vendor, product) \ + .matches = { \ + DMI_MATCH(DMI_BOARD_VENDOR, vendor), \ + DMI_MATCH(DMI_PRODUCT_NAME, product), \ + }, +#endif diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c index f290f78c3f30..185c609c6e38 100644 --- a/security/integrity/platform_certs/load_uefi.c +++ b/security/integrity/platform_certs/load_uefi.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -11,6 +12,31 @@ #include "../integrity.h" #include "keyring_handler.h" +/* + * On T2 Macs reading the db and dbx efi variables to load UEFI Secure Boot + * certificates causes occurrence of a page fault in Apple's firmware and + * a crash disabling EFI runtime services. The following quirk skips reading + * these variables. + */ +static const struct dmi_system_id uefi_skip_cert[] = { + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro15,1") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro15,2") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro15,3") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro15,4") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro16,1") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro16,2") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro16,3") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro16,4") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir8,1") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir8,2") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir9,1") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "Macmini8,1") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacPro7,1") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,1") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,2") }, + { } +}; + /* * Look to see if a UEFI variable called MokIgnoreDB exists and return true if * it does. @@ -137,6 +163,13 @@ static int __init load_uefi_certs(void) unsigned long dbsize = 0, dbxsize = 0, mokxsize = 0; efi_status_t status; int rc = 0; + const struct dmi_system_id *dmi_id; + + dmi_id = dmi_first_match(uefi_skip_cert); + if (dmi_id) { + pr_err("Reading UEFI Secure Boot Certs is not supported on T2 Macs.\n"); + return false; + } if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) return false; diff --git a/security/security.c b/security/security.c index baa63edb53e2..615c42becb3e 100644 --- a/security/security.c +++ b/security/security.c @@ -2195,15 +2195,16 @@ void security_sk_clone(const struct sock *sk, struct sock *newsk) } EXPORT_SYMBOL(security_sk_clone); -void security_sk_classify_flow(struct sock *sk, struct flowi *fl) +void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic) { - call_void_hook(sk_getsecid, sk, &fl->flowi_secid); + call_void_hook(sk_getsecid, sk, &flic->flowic_secid); } EXPORT_SYMBOL(security_sk_classify_flow); -void security_req_classify_flow(const struct request_sock *req, struct flowi *fl) +void security_req_classify_flow(const struct request_sock *req, + struct flowi_common *flic) { - call_void_hook(req_classify_flow, req, fl); + call_void_hook(req_classify_flow, req, flic); } EXPORT_SYMBOL(security_req_classify_flow); @@ -2395,7 +2396,7 @@ int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) int security_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, - const struct flowi *fl) + const struct flowi_common *flic) { struct security_hook_list *hp; int rc = LSM_RET_DEFAULT(xfrm_state_pol_flow_match); @@ -2411,7 +2412,7 @@ int security_xfrm_state_pol_flow_match(struct xfrm_state *x, */ hlist_for_each_entry(hp, &security_hook_heads.xfrm_state_pol_flow_match, list) { - rc = hp->hook.xfrm_state_pol_flow_match(x, xp, fl); + rc = hp->hook.xfrm_state_pol_flow_match(x, xp, flic); break; } return rc; @@ -2422,9 +2423,9 @@ int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid) return call_int_hook(xfrm_decode_session, 0, skb, secid, 1); } -void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) +void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic) { - int rc = call_int_hook(xfrm_decode_session, 0, skb, &fl->flowi_secid, + int rc = call_int_hook(xfrm_decode_session, 0, skb, &flic->flowic_secid, 0); BUG_ON(rc); diff --git a/security/selinux/Makefile b/security/selinux/Makefile index 4d8e0e8adf0b..8e8102b558b4 100644 --- a/security/selinux/Makefile +++ b/security/selinux/Makefile @@ -10,6 +10,8 @@ selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \ ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \ ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/context.o +selinux-$(CONFIG_ANDROID_VENDOR_HOOKS) += vendor_hooks.o + selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o selinux-$(CONFIG_NETLABEL) += netlabel.o diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 3024b84e6871..1235854233c9 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -5507,9 +5507,9 @@ static void selinux_secmark_refcount_dec(void) } static void selinux_req_classify_flow(const struct request_sock *req, - struct flowi *fl) + struct flowi_common *flic) { - fl->flowi_secid = req->secid; + flic->flowic_secid = req->secid; } static int selinux_tun_dev_alloc_security(void **security) diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h index a0b465316292..0a6f34a7a971 100644 --- a/security/selinux/include/xfrm.h +++ b/security/selinux/include/xfrm.h @@ -26,7 +26,7 @@ int selinux_xfrm_state_delete(struct xfrm_state *x); int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, - const struct flowi *fl); + const struct flowi_common *flic); #ifdef CONFIG_SECURITY_NETWORK_XFRM extern atomic_t selinux_xfrm_refcount; diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c index 7335f67ce54e..e8960a59586c 100644 --- a/security/selinux/ss/hashtab.c +++ b/security/selinux/ss/hashtab.c @@ -178,7 +178,8 @@ int hashtab_duplicate(struct hashtab *new, struct hashtab *orig, kmem_cache_free(hashtab_node_cachep, cur); } } - kmem_cache_free(hashtab_node_cachep, new); + kfree(new->htable); + memset(new, 0, sizeof(*new)); return -ENOMEM; } diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h index be9988ebfa1b..1d758118e224 100644 --- a/security/selinux/ss/policydb.h +++ b/security/selinux/ss/policydb.h @@ -374,6 +374,8 @@ static inline int put_entry(const void *buf, size_t bytes, int num, struct polic { size_t len = bytes * num; + if (len > fp->len) + return -EINVAL; memcpy(fp->data, buf, len); fp->data += len; fp->len -= len; diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 5df46e238244..3d63372e6dac 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -2013,7 +2013,8 @@ static inline int convert_context_handle_invalid_context( * in `newc'. Verify that the context is valid * under the new policy. */ -static int convert_context(struct context *oldc, struct context *newc, void *p) +static int convert_context(struct context *oldc, struct context *newc, void *p, + gfp_t gfp_flags) { struct convert_context_args *args; struct ocontext *oc; @@ -2027,7 +2028,7 @@ static int convert_context(struct context *oldc, struct context *newc, void *p) args = p; if (oldc->str) { - s = kstrdup(oldc->str, GFP_KERNEL); + s = kstrdup(oldc->str, gfp_flags); if (!s) return -ENOMEM; diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c index 656d50b09f76..1981c5af13e0 100644 --- a/security/selinux/ss/sidtab.c +++ b/security/selinux/ss/sidtab.c @@ -325,7 +325,7 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context, } rc = convert->func(context, &dst_convert->context, - convert->args); + convert->args, GFP_ATOMIC); if (rc) { context_destroy(&dst->context); goto out_unlock; @@ -404,7 +404,7 @@ static int sidtab_convert_tree(union sidtab_entry_inner *edst, while (i < SIDTAB_LEAF_ENTRIES && *pos < count) { rc = convert->func(&esrc->ptr_leaf->entries[i].context, &edst->ptr_leaf->entries[i].context, - convert->args); + convert->args, GFP_KERNEL); if (rc) return rc; (*pos)++; diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h index 4eff0e49dcb2..9fce0d553fe2 100644 --- a/security/selinux/ss/sidtab.h +++ b/security/selinux/ss/sidtab.h @@ -65,7 +65,7 @@ struct sidtab_isid_entry { }; struct sidtab_convert_params { - int (*func)(struct context *oldc, struct context *newc, void *args); + int (*func)(struct context *oldc, struct context *newc, void *args, gfp_t gfp_flags); void *args; struct sidtab *target; }; diff --git a/security/selinux/vendor_hooks.c b/security/selinux/vendor_hooks.c new file mode 100644 index 000000000000..3802e8233289 --- /dev/null +++ b/security/selinux/vendor_hooks.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* vendor_hook.c + * + * Copyright 2022 Google LLC + */ + +#ifndef __GENKSYMS__ +#include "security.h" +#endif + +#define CREATE_TRACE_POINTS +#include +#include +#include + +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_selinux_is_initialized); + +/* + * For type visibility + */ +struct selinux_state *GKI_struct_selinux_state; +EXPORT_SYMBOL_GPL(GKI_struct_selinux_state); diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index 00e95f8bd7c7..114245b6f7c7 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c @@ -175,9 +175,10 @@ int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) */ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, - const struct flowi *fl) + const struct flowi_common *flic) { u32 state_sid; + u32 flic_sid; if (!xp->security) if (x->security) @@ -196,17 +197,17 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, return 0; state_sid = x->security->ctx_sid; + flic_sid = flic->flowic_secid; - if (fl->flowi_secid != state_sid) + if (flic_sid != state_sid) return 0; /* We don't need a separate SA Vs. policy polmatch check since the SA * is now of the same label as the flow and a flow Vs. policy polmatch * check had already happened in selinux_xfrm_policy_lookup() above. */ - return (avc_has_perm(&selinux_state, - fl->flowi_secid, state_sid, - SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, - NULL) ? 0 : 1); + return (avc_has_perm(&selinux_state, flic_sid, state_sid, + SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, + NULL) ? 0 : 1); } static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb) diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c index faf6b03131ee..51ed2f34b276 100644 --- a/sound/aoa/soundbus/i2sbus/core.c +++ b/sound/aoa/soundbus/i2sbus/core.c @@ -147,6 +147,7 @@ static int i2sbus_get_and_fixup_rsrc(struct device_node *np, int index, return rc; } +/* Returns 1 if added, 0 for otherwise; don't return a negative value! */ /* FIXME: look at device node refcounting */ static int i2sbus_add_dev(struct macio_dev *macio, struct i2sbus_control *control, @@ -213,7 +214,7 @@ static int i2sbus_add_dev(struct macio_dev *macio, * either as the second one in that case is just a modem. */ if (!ok) { kfree(dev); - return -ENODEV; + return 0; } mutex_init(&dev->lock); @@ -302,6 +303,10 @@ static int i2sbus_add_dev(struct macio_dev *macio, if (soundbus_add_one(&dev->sound)) { printk(KERN_DEBUG "i2sbus: device registration error!\n"); + if (dev->sound.ofdev.dev.kobj.state_initialized) { + soundbus_dev_put(&dev->sound); + return 0; + } goto err; } diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c index 97467f6a32a1..980ab3580f1b 100644 --- a/sound/core/control_compat.c +++ b/sound/core/control_compat.c @@ -304,7 +304,9 @@ static int ctl_elem_read_user(struct snd_card *card, err = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (err < 0) goto error; + down_read(&card->controls_rwsem); err = snd_ctl_elem_read(card, data); + up_read(&card->controls_rwsem); if (err < 0) goto error; err = copy_ctl_value_to_user(userdata, valuep, data, type, count); @@ -332,7 +334,9 @@ static int ctl_elem_write_user(struct snd_ctl_file *file, err = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (err < 0) goto error; + down_write(&card->controls_rwsem); err = snd_ctl_elem_write(card, file, data); + up_write(&card->controls_rwsem); if (err < 0) goto error; err = copy_ctl_value_to_user(userdata, valuep, data, type, count); diff --git a/sound/core/info.c b/sound/core/info.c index 9fec3070f8ba..d6fb11c3250c 100644 --- a/sound/core/info.c +++ b/sound/core/info.c @@ -112,9 +112,9 @@ static loff_t snd_info_entry_llseek(struct file *file, loff_t offset, int orig) entry = data->entry; mutex_lock(&entry->access); if (entry->c.ops->llseek) { - offset = entry->c.ops->llseek(entry, - data->file_private_data, - file, offset, orig); + ret = entry->c.ops->llseek(entry, + data->file_private_data, + file, offset, orig); goto out; } diff --git a/sound/core/misc.c b/sound/core/misc.c index 3579dd7a161f..c3f3d94b5197 100644 --- a/sound/core/misc.c +++ b/sound/core/misc.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #ifdef CONFIG_SND_DEBUG @@ -145,3 +146,96 @@ snd_pci_quirk_lookup(struct pci_dev *pci, const struct snd_pci_quirk *list) } EXPORT_SYMBOL(snd_pci_quirk_lookup); #endif + +/* + * Deferred async signal helpers + * + * Below are a few helper functions to wrap the async signal handling + * in the deferred work. The main purpose is to avoid the messy deadlock + * around tasklist_lock and co at the kill_fasync() invocation. + * fasync_helper() and kill_fasync() are replaced with snd_fasync_helper() + * and snd_kill_fasync(), respectively. In addition, snd_fasync_free() has + * to be called at releasing the relevant file object. + */ +struct snd_fasync { + struct fasync_struct *fasync; + int signal; + int poll; + int on; + struct list_head list; +}; + +static DEFINE_SPINLOCK(snd_fasync_lock); +static LIST_HEAD(snd_fasync_list); + +static void snd_fasync_work_fn(struct work_struct *work) +{ + struct snd_fasync *fasync; + + spin_lock_irq(&snd_fasync_lock); + while (!list_empty(&snd_fasync_list)) { + fasync = list_first_entry(&snd_fasync_list, struct snd_fasync, list); + list_del_init(&fasync->list); + spin_unlock_irq(&snd_fasync_lock); + if (fasync->on) + kill_fasync(&fasync->fasync, fasync->signal, fasync->poll); + spin_lock_irq(&snd_fasync_lock); + } + spin_unlock_irq(&snd_fasync_lock); +} + +static DECLARE_WORK(snd_fasync_work, snd_fasync_work_fn); + +int snd_fasync_helper(int fd, struct file *file, int on, + struct snd_fasync **fasyncp) +{ + struct snd_fasync *fasync = NULL; + + if (on) { + fasync = kzalloc(sizeof(*fasync), GFP_KERNEL); + if (!fasync) + return -ENOMEM; + INIT_LIST_HEAD(&fasync->list); + } + + spin_lock_irq(&snd_fasync_lock); + if (*fasyncp) { + kfree(fasync); + fasync = *fasyncp; + } else { + if (!fasync) { + spin_unlock_irq(&snd_fasync_lock); + return 0; + } + *fasyncp = fasync; + } + fasync->on = on; + spin_unlock_irq(&snd_fasync_lock); + return fasync_helper(fd, file, on, &fasync->fasync); +} +EXPORT_SYMBOL_GPL(snd_fasync_helper); + +void snd_kill_fasync(struct snd_fasync *fasync, int signal, int poll) +{ + unsigned long flags; + + if (!fasync || !fasync->on) + return; + spin_lock_irqsave(&snd_fasync_lock, flags); + fasync->signal = signal; + fasync->poll = poll; + list_move(&fasync->list, &snd_fasync_list); + schedule_work(&snd_fasync_work); + spin_unlock_irqrestore(&snd_fasync_lock, flags); +} +EXPORT_SYMBOL_GPL(snd_kill_fasync); + +void snd_fasync_free(struct snd_fasync *fasync) +{ + if (!fasync) + return; + fasync->on = 0; + flush_work(&snd_fasync_work); + kfree(fasync); +} +EXPORT_SYMBOL_GPL(snd_fasync_free); diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index f88de74da1eb..de6f94bee50b 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -1662,13 +1662,14 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file) runtime = substream->runtime; if (atomic_read(&substream->mmap_count)) goto __direct; - if ((err = snd_pcm_oss_make_ready(substream)) < 0) - return err; atomic_inc(&runtime->oss.rw_ref); if (mutex_lock_interruptible(&runtime->oss.params_lock)) { atomic_dec(&runtime->oss.rw_ref); return -ERESTARTSYS; } + err = snd_pcm_oss_make_ready_locked(substream); + if (err < 0) + goto unlock; format = snd_pcm_oss_format_from(runtime->oss.format); width = snd_pcm_format_physical_width(format); if (runtime->oss.buffer_used > 0) { diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c index c54dce1e6aef..c293775ba234 100644 --- a/sound/core/pcm_dmaengine.c +++ b/sound/core/pcm_dmaengine.c @@ -133,6 +133,7 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_set_config_from_dai_data); static void dmaengine_pcm_dma_complete(void *arg) { + unsigned int new_pos; struct snd_pcm_substream *substream = arg; struct dmaengine_pcm_runtime_data *prtd; @@ -144,9 +145,10 @@ static void dmaengine_pcm_dma_complete(void *arg) prtd = substream_to_prtd(substream); - prtd->pos += snd_pcm_lib_period_bytes(substream); - if (prtd->pos >= snd_pcm_lib_buffer_bytes(substream)) - prtd->pos = 0; + new_pos = prtd->pos + snd_pcm_lib_period_bytes(substream); + if (new_pos >= snd_pcm_lib_buffer_bytes(substream)) + new_pos = 0; + prtd->pos = new_pos; snd_pcm_stream_unlock_irq(substream); snd_pcm_period_elapsed(substream); diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c index a9a0d74f3165..191883842a35 100644 --- a/sound/core/pcm_memory.c +++ b/sound/core/pcm_memory.c @@ -434,7 +434,6 @@ EXPORT_SYMBOL(snd_pcm_lib_malloc_pages); */ int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream) { - struct snd_card *card = substream->pcm->card; struct snd_pcm_runtime *runtime; if (PCM_RUNTIME_CHECK(substream)) @@ -443,6 +442,8 @@ int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream) if (runtime->dma_area == NULL) return 0; if (runtime->dma_buffer_p != &substream->dma_buffer) { + struct snd_card *card = substream->pcm->card; + /* it's a newly allocated buffer. release it now. */ do_free_pages(card, runtime->dma_buffer_p); kfree(runtime->dma_buffer_p); diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c index 257d412eac5d..30f0f96e0000 100644 --- a/sound/core/pcm_misc.c +++ b/sound/core/pcm_misc.c @@ -429,7 +429,7 @@ int snd_pcm_format_set_silence(snd_pcm_format_t format, void *data, unsigned int return 0; width = pcm_formats[(INT)format].phys; /* physical width */ pat = pcm_formats[(INT)format].silence; - if (! width) + if (!width || !pat) return -EINVAL; /* signed or 1 byte data */ if (pcm_formats[(INT)format].signd == 1 || width <= 8) { diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index 257ad5206240..0d91143eb464 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -1736,10 +1736,8 @@ static int snd_rawmidi_free(struct snd_rawmidi *rmidi) snd_info_free_entry(rmidi->proc_entry); rmidi->proc_entry = NULL; - mutex_lock(®ister_mutex); if (rmidi->ops && rmidi->ops->dev_unregister) rmidi->ops->dev_unregister(rmidi); - mutex_unlock(®ister_mutex); snd_rawmidi_free_substreams(&rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT]); snd_rawmidi_free_substreams(&rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT]); diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c index 2ddfe2226651..f73ee0798aea 100644 --- a/sound/core/seq/oss/seq_oss_midi.c +++ b/sound/core/seq/oss/seq_oss_midi.c @@ -267,7 +267,9 @@ snd_seq_oss_midi_clear_all(void) void snd_seq_oss_midi_setup(struct seq_oss_devinfo *dp) { + spin_lock_irq(®ister_lock); dp->max_mididev = max_midi_devs; + spin_unlock_irq(®ister_lock); } /* diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index cc93157fa950..0363670a56e7 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -121,13 +121,13 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid) spin_unlock_irqrestore(&clients_lock, flags); #ifdef CONFIG_MODULES if (!in_interrupt()) { - static char client_requested[SNDRV_SEQ_GLOBAL_CLIENTS]; - static char card_requested[SNDRV_CARDS]; + static DECLARE_BITMAP(client_requested, SNDRV_SEQ_GLOBAL_CLIENTS); + static DECLARE_BITMAP(card_requested, SNDRV_CARDS); + if (clientid < SNDRV_SEQ_GLOBAL_CLIENTS) { int idx; - if (!client_requested[clientid]) { - client_requested[clientid] = 1; + if (!test_and_set_bit(clientid, client_requested)) { for (idx = 0; idx < 15; idx++) { if (seq_client_load[idx] < 0) break; @@ -142,10 +142,8 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid) int card = (clientid - SNDRV_SEQ_GLOBAL_CLIENTS) / SNDRV_SEQ_CLIENTS_PER_CARD; if (card < snd_ecards_limit) { - if (! card_requested[card]) { - card_requested[card] = 1; + if (!test_and_set_bit(card, card_requested)) snd_request_card(card); - } snd_seq_device_load_drivers(); } } diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c index 65db1a7c77b7..bb76a2dd0a2f 100644 --- a/sound/core/seq/seq_memory.c +++ b/sound/core/seq/seq_memory.c @@ -112,15 +112,19 @@ EXPORT_SYMBOL(snd_seq_dump_var_event); * expand the variable length event to linear buffer space. */ -static int seq_copy_in_kernel(char **bufptr, const void *src, int size) +static int seq_copy_in_kernel(void *ptr, void *src, int size) { + char **bufptr = ptr; + memcpy(*bufptr, src, size); *bufptr += size; return 0; } -static int seq_copy_in_user(char __user **bufptr, const void *src, int size) +static int seq_copy_in_user(void *ptr, void *src, int size) { + char __user **bufptr = ptr; + if (copy_to_user(*bufptr, src, size)) return -EFAULT; *bufptr += size; @@ -149,8 +153,7 @@ int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char return newlen; } err = snd_seq_dump_var_event(event, - in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel : - (snd_seq_dump_func_t)seq_copy_in_user, + in_kernel ? seq_copy_in_kernel : seq_copy_in_user, &buf); return err < 0 ? err : newlen; } diff --git a/sound/core/sound_oss.c b/sound/core/sound_oss.c index 610f317bea9d..99874e80b682 100644 --- a/sound/core/sound_oss.c +++ b/sound/core/sound_oss.c @@ -162,7 +162,6 @@ int snd_unregister_oss_device(int type, struct snd_card *card, int dev) mutex_unlock(&sound_oss_mutex); return -ENOENT; } - unregister_sound_special(minor); switch (SNDRV_MINOR_OSS_DEVICE(minor)) { case SNDRV_MINOR_OSS_PCM: track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_AUDIO); @@ -174,12 +173,18 @@ int snd_unregister_oss_device(int type, struct snd_card *card, int dev) track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_DMMIDI1); break; } - if (track2 >= 0) { - unregister_sound_special(track2); + if (track2 >= 0) snd_oss_minors[track2] = NULL; - } snd_oss_minors[minor] = NULL; mutex_unlock(&sound_oss_mutex); + + /* call unregister_sound_special() outside sound_oss_mutex; + * otherwise may deadlock, as it can trigger the release of a card + */ + unregister_sound_special(minor); + if (track2 >= 0) + unregister_sound_special(track2); + kfree(mptr); return 0; } diff --git a/sound/core/timer.c b/sound/core/timer.c index 04cd8953605a..764d2b19344e 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -83,7 +83,7 @@ struct snd_timer_user { unsigned int filter; struct timespec64 tstamp; /* trigger tstamp */ wait_queue_head_t qchange_sleep; - struct fasync_struct *fasync; + struct snd_fasync *fasync; struct mutex ioctl_lock; }; @@ -1345,7 +1345,7 @@ static void snd_timer_user_interrupt(struct snd_timer_instance *timeri, } __wake: spin_unlock(&tu->qlock); - kill_fasync(&tu->fasync, SIGIO, POLL_IN); + snd_kill_fasync(tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } @@ -1383,7 +1383,7 @@ static void snd_timer_user_ccallback(struct snd_timer_instance *timeri, spin_lock_irqsave(&tu->qlock, flags); snd_timer_user_append_to_tqueue(tu, &r1); spin_unlock_irqrestore(&tu->qlock, flags); - kill_fasync(&tu->fasync, SIGIO, POLL_IN); + snd_kill_fasync(tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } @@ -1453,7 +1453,7 @@ static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri, spin_unlock(&tu->qlock); if (append == 0) return; - kill_fasync(&tu->fasync, SIGIO, POLL_IN); + snd_kill_fasync(tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } @@ -1521,6 +1521,7 @@ static int snd_timer_user_release(struct inode *inode, struct file *file) snd_timer_instance_free(tu->timeri); } mutex_unlock(&tu->ioctl_lock); + snd_fasync_free(tu->fasync); kfree(tu->queue); kfree(tu->tqueue); kfree(tu); @@ -2135,7 +2136,7 @@ static int snd_timer_user_fasync(int fd, struct file * file, int on) struct snd_timer_user *tu; tu = file->private_data; - return fasync_helper(fd, file, on, &tu->fasync); + return snd_fasync_helper(fd, file, on, &tu->fasync); } static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index 51821334fcee..72d554593cff 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -625,10 +625,10 @@ static unsigned int loopback_jiffies_timer_pos_update cable->streams[SNDRV_PCM_STREAM_PLAYBACK]; struct loopback_pcm *dpcm_capt = cable->streams[SNDRV_PCM_STREAM_CAPTURE]; - unsigned long delta_play = 0, delta_capt = 0; + unsigned long delta_play = 0, delta_capt = 0, cur_jiffies; unsigned int running, count1, count2; - unsigned long cur_jiffies = cycles_to_jiffies(); + cur_jiffies = cycles_to_jiffies(); running = cable->running ^ cable->pause; if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) { delta_play = cur_jiffies - dpcm_play->last_jiffies; diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c index e93eb4616c5f..c739173c668f 100644 --- a/sound/firewire/fireworks/fireworks_hwdep.c +++ b/sound/firewire/fireworks/fireworks_hwdep.c @@ -34,6 +34,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained, type = SNDRV_FIREWIRE_EVENT_EFW_RESPONSE; if (copy_to_user(buf, &type, sizeof(type))) return -EFAULT; + count += sizeof(type); remained -= sizeof(type); buf += sizeof(type); diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c index 3e9e9ac804f6..b7e5032b61c9 100644 --- a/sound/hda/hdac_device.c +++ b/sound/hda/hdac_device.c @@ -660,6 +660,7 @@ static const struct hda_vendor_id hda_vendor_ids[] = { { 0x14f1, "Conexant" }, { 0x17e8, "Chrontel" }, { 0x1854, "LG" }, + { 0x19e5, "Huawei" }, { 0x1aec, "Wolfson Microelectronics" }, { 0x1af4, "QEMU" }, { 0x434d, "C-Media" }, diff --git a/sound/hda/hdac_sysfs.c b/sound/hda/hdac_sysfs.c index e56e83325903..bcf302f5115a 100644 --- a/sound/hda/hdac_sysfs.c +++ b/sound/hda/hdac_sysfs.c @@ -346,8 +346,10 @@ static int add_widget_node(struct kobject *parent, hda_nid_t nid, return -ENOMEM; kobject_init(kobj, &widget_ktype); err = kobject_add(kobj, parent, "%02x", nid); - if (err < 0) + if (err < 0) { + kobject_put(kobj); return err; + } err = sysfs_create_group(kobj, group); if (err < 0) { kobject_put(kobj); diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c index e2237239d922..8714891f50b0 100644 --- a/sound/hda/intel-nhlt.c +++ b/sound/hda/intel-nhlt.c @@ -55,20 +55,26 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt) /* find max number of channels based on format_configuration */ if (fmt_configs->fmt_count) { - dev_dbg(dev, "%s: found %d format definitions\n", - __func__, fmt_configs->fmt_count); + struct nhlt_fmt_cfg *fmt_cfg = fmt_configs->fmt_config; + + dev_dbg(dev, "found %d format definitions\n", + fmt_configs->fmt_count); for (i = 0; i < fmt_configs->fmt_count; i++) { struct wav_fmt_ext *fmt_ext; - fmt_ext = &fmt_configs->fmt_config[i].fmt_ext; + fmt_ext = &fmt_cfg->fmt_ext; if (fmt_ext->fmt.channels > max_ch) max_ch = fmt_ext->fmt.channels; + + /* Move to the next nhlt_fmt_cfg */ + fmt_cfg = (struct nhlt_fmt_cfg *)(fmt_cfg->config.caps + + fmt_cfg->config.size); } - dev_dbg(dev, "%s: max channels found %d\n", __func__, max_ch); + dev_dbg(dev, "max channels found %d\n", max_ch); } else { - dev_dbg(dev, "%s: No format information found\n", __func__); + dev_dbg(dev, "No format information found\n"); } if (cfg->device_config.config_type != NHLT_CONFIG_TYPE_MIC_ARRAY) { @@ -95,17 +101,16 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt) } if (dmic_geo > 0) { - dev_dbg(dev, "%s: Array with %d dmics\n", __func__, dmic_geo); + dev_dbg(dev, "Array with %d dmics\n", dmic_geo); } if (max_ch > dmic_geo) { - dev_dbg(dev, "%s: max channels %d exceed dmic number %d\n", - __func__, max_ch, dmic_geo); + dev_dbg(dev, "max channels %d exceed dmic number %d\n", + max_ch, dmic_geo); } } } - dev_dbg(dev, "%s: dmic number %d max_ch %d\n", - __func__, dmic_geo, max_ch); + dev_dbg(dev, "dmic number %d max_ch %d\n", dmic_geo, max_ch); return dmic_geo; } diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c index d6420d224d09..09b368761cc0 100644 --- a/sound/isa/wavefront/wavefront_synth.c +++ b/sound/isa/wavefront/wavefront_synth.c @@ -1088,7 +1088,8 @@ wavefront_send_sample (snd_wavefront_t *dev, if (dataptr < data_end) { - __get_user (sample_short, dataptr); + if (get_user(sample_short, dataptr)) + return -EFAULT; dataptr += skip; if (data_is_unsigned) { /* GUS ? */ diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c index 963731cf0d8c..cd66632bf1c3 100644 --- a/sound/pci/ac97/ac97_codec.c +++ b/sound/pci/ac97/ac97_codec.c @@ -1946,6 +1946,7 @@ static int snd_ac97_dev_register(struct snd_device *device) snd_ac97_get_short_name(ac97)); if ((err = device_register(&ac97->dev)) < 0) { ac97_err(ac97, "Can't register ac97 bus\n"); + put_device(&ac97->dev); ac97->dev.bus = NULL; return err; } diff --git a/sound/pci/au88x0/au88x0.h b/sound/pci/au88x0/au88x0.h index 0aa7af049b1b..6cbb2bc4a048 100644 --- a/sound/pci/au88x0/au88x0.h +++ b/sound/pci/au88x0/au88x0.h @@ -141,7 +141,7 @@ struct snd_vortex { #ifndef CHIP_AU8810 stream_t dma_wt[NR_WT]; wt_voice_t wt_voice[NR_WT]; /* WT register cache. */ - char mixwt[(NR_WT / NR_WTPB) * 6]; /* WT mixin objects */ + s8 mixwt[(NR_WT / NR_WTPB) * 6]; /* WT mixin objects */ #endif /* Global resources */ @@ -235,8 +235,8 @@ static int vortex_alsafmt_aspfmt(snd_pcm_format_t alsafmt, vortex_t *v); static void vortex_connect_default(vortex_t * vortex, int en); static int vortex_adb_allocroute(vortex_t * vortex, int dma, int nr_ch, int dir, int type, int subdev); -static char vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out, - int restype); +static int vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out, + int restype); #ifndef CHIP_AU8810 static int vortex_wt_allocroute(vortex_t * vortex, int dma, int nr_ch); static void vortex_wt_connect(vortex_t * vortex, int en); diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c index 5180f1bd1326..0b04436ac017 100644 --- a/sound/pci/au88x0/au88x0_core.c +++ b/sound/pci/au88x0/au88x0_core.c @@ -1998,7 +1998,7 @@ static const int resnum[VORTEX_RESOURCE_LAST] = out: Mean checkout if != 0. Else mean Checkin resource. restype: Indicates type of resource to be checked in or out. */ -static char +static int vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out, int restype) { int i, qty = resnum[restype], resinuse = 0; diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c index f8ac96cf38a4..06775519dab0 100644 --- a/sound/pci/ctxfi/ctatc.c +++ b/sound/pci/ctxfi/ctatc.c @@ -36,6 +36,7 @@ | ((IEC958_AES3_CON_FS_48000) << 24)) static const struct snd_pci_quirk subsys_20k1_list[] = { + SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0021, "SB046x", CTSB046X), SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0022, "SB055x", CTSB055X), SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x002f, "SB055x", CTSB055X), SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0029, "SB073x", CTSB073X), @@ -64,6 +65,7 @@ static const struct snd_pci_quirk subsys_20k2_list[] = { static const char *ct_subsys_name[NUM_CTCARDS] = { /* 20k1 models */ + [CTSB046X] = "SB046x", [CTSB055X] = "SB055x", [CTSB073X] = "SB073x", [CTUAA] = "UAA", diff --git a/sound/pci/ctxfi/cthardware.h b/sound/pci/ctxfi/cthardware.h index 9e6b83bd432d..b50d61a08e28 100644 --- a/sound/pci/ctxfi/cthardware.h +++ b/sound/pci/ctxfi/cthardware.h @@ -26,8 +26,9 @@ enum CHIPTYP { enum CTCARDS { /* 20k1 models */ + CTSB046X, + CT20K1_MODEL_FIRST = CTSB046X, CTSB055X, - CT20K1_MODEL_FIRST = CTSB055X, CTSB073X, CTUAA, CT20K1_UNKNOWN, diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c index b2ddabb99438..8d2c101d66a2 100644 --- a/sound/pci/emu10k1/emupcm.c +++ b/sound/pci/emu10k1/emupcm.c @@ -123,7 +123,7 @@ static int snd_emu10k1_pcm_channel_alloc(struct snd_emu10k1_pcm * epcm, int voic epcm->voices[0]->epcm = epcm; if (voices > 1) { for (i = 1; i < voices; i++) { - epcm->voices[i] = &epcm->emu->voices[epcm->voices[0]->number + i]; + epcm->voices[i] = &epcm->emu->voices[(epcm->voices[0]->number + i) % NUM_G]; epcm->voices[i]->epcm = epcm; } } diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c index 4dc01647753c..ec821a263036 100644 --- a/sound/pci/hda/hda_auto_parser.c +++ b/sound/pci/hda/hda_auto_parser.c @@ -823,7 +823,7 @@ static void set_pin_targets(struct hda_codec *codec, snd_hda_set_pin_ctl_cache(codec, cfg->nid, cfg->val); } -static void apply_fixup(struct hda_codec *codec, int id, int action, int depth) +void __snd_hda_apply_fixup(struct hda_codec *codec, int id, int action, int depth) { const char *modelname = codec->fixup_name; @@ -833,7 +833,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth) if (++depth > 10) break; if (fix->chained_before) - apply_fixup(codec, fix->chain_id, action, depth + 1); + __snd_hda_apply_fixup(codec, fix->chain_id, action, depth + 1); switch (fix->type) { case HDA_FIXUP_PINS: @@ -874,6 +874,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth) id = fix->chain_id; } } +EXPORT_SYMBOL_GPL(__snd_hda_apply_fixup); /** * snd_hda_apply_fixup - Apply the fixup chain with the given action @@ -883,7 +884,7 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth) void snd_hda_apply_fixup(struct hda_codec *codec, int action) { if (codec->fixup_list) - apply_fixup(codec, codec->fixup_id, action, 0); + __snd_hda_apply_fixup(codec, codec->fixup_id, action, 0); } EXPORT_SYMBOL_GPL(snd_hda_apply_fixup); diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c index 53a2b89f8983..e63621bcb214 100644 --- a/sound/pci/hda/hda_beep.c +++ b/sound/pci/hda/hda_beep.c @@ -118,6 +118,12 @@ static int snd_hda_beep_event(struct input_dev *dev, unsigned int type, return 0; } +static void turn_on_beep(struct hda_beep *beep) +{ + if (beep->keep_power_at_enable) + snd_hda_power_up_pm(beep->codec); +} + static void turn_off_beep(struct hda_beep *beep) { cancel_work_sync(&beep->beep_work); @@ -125,6 +131,8 @@ static void turn_off_beep(struct hda_beep *beep) /* turn off beep */ generate_tone(beep, 0); } + if (beep->keep_power_at_enable) + snd_hda_power_down_pm(beep->codec); } /** @@ -140,7 +148,9 @@ int snd_hda_enable_beep_device(struct hda_codec *codec, int enable) enable = !!enable; if (beep->enabled != enable) { beep->enabled = enable; - if (!enable) + if (enable) + turn_on_beep(beep); + else turn_off_beep(beep); return 1; } @@ -167,7 +177,8 @@ static int beep_dev_disconnect(struct snd_device *device) input_unregister_device(beep->dev); else input_free_device(beep->dev); - turn_off_beep(beep); + if (beep->enabled) + turn_off_beep(beep); return 0; } diff --git a/sound/pci/hda/hda_beep.h b/sound/pci/hda/hda_beep.h index a25358a4807a..db76e3ddba65 100644 --- a/sound/pci/hda/hda_beep.h +++ b/sound/pci/hda/hda_beep.h @@ -25,6 +25,7 @@ struct hda_beep { unsigned int enabled:1; unsigned int linear_tone:1; /* linear tone for IDT/STAC codec */ unsigned int playing:1; + unsigned int keep_power_at_enable:1; /* set by driver */ struct work_struct beep_work; /* scheduled task for beep event */ struct mutex mutex; void (*power_hook)(struct hda_beep *beep, bool on); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 600ea241ead7..494bfd2135a9 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2584,9 +2584,12 @@ static const struct pci_device_id azx_ids[] = { /* 5 Series/3400 */ { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, + { PCI_DEVICE(0x8086, 0x3b57), + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, /* Poulsbo */ { PCI_DEVICE(0x8086, 0x811b), - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE }, + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE | + AZX_DCAPS_POSFIX_LPIB }, /* Oaktrail */ { PCI_DEVICE(0x8086, 0x080a), .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE }, @@ -2746,6 +2749,9 @@ static const struct pci_device_id azx_ids[] = { { PCI_DEVICE(0x1002, 0xab28), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS | AZX_DCAPS_PM_RUNTIME }, + { PCI_DEVICE(0x1002, 0xab30), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS | + AZX_DCAPS_PM_RUNTIME }, { PCI_DEVICE(0x1002, 0xab38), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS | AZX_DCAPS_PM_RUNTIME }, diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h index 5beb8aa44ecd..efc0c68a5442 100644 --- a/sound/pci/hda/hda_local.h +++ b/sound/pci/hda/hda_local.h @@ -357,6 +357,7 @@ void snd_hda_apply_verbs(struct hda_codec *codec); void snd_hda_apply_pincfgs(struct hda_codec *codec, const struct hda_pintbl *cfg); void snd_hda_apply_fixup(struct hda_codec *codec, int action); +void __snd_hda_apply_fixup(struct hda_codec *codec, int id, int action, int depth); void snd_hda_pick_fixup(struct hda_codec *codec, const struct hda_model_fixup *models, const struct snd_pci_quirk *quirk, diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c index 07787698b973..17b06f7b69ee 100644 --- a/sound/pci/hda/hda_tegra.c +++ b/sound/pci/hda/hda_tegra.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -70,9 +71,9 @@ struct hda_tegra { struct azx chip; struct device *dev; - struct clk *hda_clk; - struct clk *hda2codec_2x_clk; - struct clk *hda2hdmi_clk; + struct reset_control *reset; + struct clk_bulk_data clocks[3]; + unsigned int nclocks; void __iomem *regs; struct work_struct probe_work; }; @@ -113,36 +114,6 @@ static void hda_tegra_init(struct hda_tegra *hda) writel(v, hda->regs + HDA_IPFS_INTR_MASK); } -static int hda_tegra_enable_clocks(struct hda_tegra *data) -{ - int rc; - - rc = clk_prepare_enable(data->hda_clk); - if (rc) - return rc; - rc = clk_prepare_enable(data->hda2codec_2x_clk); - if (rc) - goto disable_hda; - rc = clk_prepare_enable(data->hda2hdmi_clk); - if (rc) - goto disable_codec_2x; - - return 0; - -disable_codec_2x: - clk_disable_unprepare(data->hda2codec_2x_clk); -disable_hda: - clk_disable_unprepare(data->hda_clk); - return rc; -} - -static void hda_tegra_disable_clocks(struct hda_tegra *data) -{ - clk_disable_unprepare(data->hda2hdmi_clk); - clk_disable_unprepare(data->hda2codec_2x_clk); - clk_disable_unprepare(data->hda_clk); -} - /* * power management */ @@ -186,7 +157,7 @@ static int __maybe_unused hda_tegra_runtime_suspend(struct device *dev) azx_stop_chip(chip); azx_enter_link_reset(chip); } - hda_tegra_disable_clocks(hda); + clk_bulk_disable_unprepare(hda->nclocks, hda->clocks); return 0; } @@ -198,7 +169,13 @@ static int __maybe_unused hda_tegra_runtime_resume(struct device *dev) struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip); int rc; - rc = hda_tegra_enable_clocks(hda); + if (!chip->running) { + rc = reset_control_assert(hda->reset); + if (rc) + return rc; + } + + rc = clk_bulk_prepare_enable(hda->nclocks, hda->clocks); if (rc != 0) return rc; if (chip && chip->running) { @@ -207,6 +184,12 @@ static int __maybe_unused hda_tegra_runtime_resume(struct device *dev) /* disable controller wake up event*/ azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & ~STATESTS_INT_MASK); + } else { + usleep_range(10, 100); + + rc = reset_control_deassert(hda->reset); + if (rc) + return rc; } return 0; @@ -268,29 +251,6 @@ static int hda_tegra_init_chip(struct azx *chip, struct platform_device *pdev) return 0; } -static int hda_tegra_init_clk(struct hda_tegra *hda) -{ - struct device *dev = hda->dev; - - hda->hda_clk = devm_clk_get(dev, "hda"); - if (IS_ERR(hda->hda_clk)) { - dev_err(dev, "failed to get hda clock\n"); - return PTR_ERR(hda->hda_clk); - } - hda->hda2codec_2x_clk = devm_clk_get(dev, "hda2codec_2x"); - if (IS_ERR(hda->hda2codec_2x_clk)) { - dev_err(dev, "failed to get hda2codec_2x clock\n"); - return PTR_ERR(hda->hda2codec_2x_clk); - } - hda->hda2hdmi_clk = devm_clk_get(dev, "hda2hdmi"); - if (IS_ERR(hda->hda2hdmi_clk)) { - dev_err(dev, "failed to get hda2hdmi clock\n"); - return PTR_ERR(hda->hda2hdmi_clk); - } - - return 0; -} - static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev) { struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip); @@ -479,7 +439,8 @@ MODULE_DEVICE_TABLE(of, hda_tegra_match); static int hda_tegra_probe(struct platform_device *pdev) { const unsigned int driver_flags = AZX_DCAPS_CORBRP_SELF_CLEAR | - AZX_DCAPS_PM_RUNTIME; + AZX_DCAPS_PM_RUNTIME | + AZX_DCAPS_4K_BDLE_BOUNDARY; struct snd_card *card; struct azx *chip; struct hda_tegra *hda; @@ -498,7 +459,17 @@ static int hda_tegra_probe(struct platform_device *pdev) return err; } - err = hda_tegra_init_clk(hda); + hda->reset = devm_reset_control_array_get_exclusive(&pdev->dev); + if (IS_ERR(hda->reset)) { + err = PTR_ERR(hda->reset); + goto out_free; + } + + hda->clocks[hda->nclocks++].id = "hda"; + hda->clocks[hda->nclocks++].id = "hda2hdmi"; + hda->clocks[hda->nclocks++].id = "hda2codec_2x"; + + err = devm_clk_bulk_get(&pdev->dev, hda->nclocks, hda->clocks); if (err < 0) goto out_free; diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index f774b2ac9720..82f14c3f642b 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -1272,6 +1272,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = { SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI), SND_PCI_QUIRK(0x3842, 0x1038, "EVGA X99 Classified", QUIRK_R3DI), + SND_PCI_QUIRK(0x3842, 0x1055, "EVGA Z390 DARK", QUIRK_R3DI), SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D), SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D), SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5), diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index f46204ab0b90..c10a264e9567 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -396,6 +396,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = { /* codec SSID */ SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122), + SND_PCI_QUIRK(0x106b, 0x0900, "iMac 12,1", CS420X_IMAC27_122), SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 8098088b0056..2bd0a5839e80 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -215,6 +215,7 @@ enum { CXT_PINCFG_LEMOTE_A1205, CXT_PINCFG_COMPAQ_CQ60, CXT_FIXUP_STEREO_DMIC, + CXT_PINCFG_LENOVO_NOTEBOOK, CXT_FIXUP_INC_MIC_BOOST, CXT_FIXUP_HEADPHONE_MIC_PIN, CXT_FIXUP_HEADPHONE_MIC, @@ -765,6 +766,14 @@ static const struct hda_fixup cxt_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = cxt_fixup_stereo_dmic, }, + [CXT_PINCFG_LENOVO_NOTEBOOK] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x1a, 0x05d71030 }, + { } + }, + .chain_id = CXT_FIXUP_STEREO_DMIC, + }, [CXT_FIXUP_INC_MIC_BOOST] = { .type = HDA_FIXUP_FUNC, .v.func = cxt5066_increase_mic_boost, @@ -937,6 +946,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK), SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x82b4, "HP ProDesk 600 G3", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK), @@ -963,7 +973,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x17aa, 0x3905, "Lenovo G50-30", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), - SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_PINCFG_LENOVO_NOTEBOOK), SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI), @@ -1045,6 +1055,13 @@ static int patch_conexant_auto(struct hda_codec *codec) snd_hda_pick_fixup(codec, cxt5051_fixup_models, cxt5051_fixups, cxt_fixups); break; + case 0x14f15098: + codec->pin_amp_workaround = 1; + spec->gen.mixer_nid = 0x22; + spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO; + snd_hda_pick_fixup(codec, cxt5066_fixup_models, + cxt5066_fixups, cxt_fixups); + break; case 0x14f150f2: codec->power_save_node = 1; fallthrough; @@ -1065,11 +1082,11 @@ static int patch_conexant_auto(struct hda_codec *codec) if (err < 0) goto error; - err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg); + err = cx_auto_parse_beep(codec); if (err < 0) goto error; - err = cx_auto_parse_beep(codec); + err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg); if (err < 0) goto error; diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 71e11481ba41..b1c57c65f6cd 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -157,6 +157,9 @@ struct hdmi_spec { bool dyn_pin_out; bool dyn_pcm_assign; + bool dyn_pcm_no_legacy; + bool nv_dp_workaround; /* workaround DP audio infoframe for Nvidia */ + bool intel_hsw_fixup; /* apply Intel platform-specific fixups */ /* * Non-generic VIA/NVIDIA specific @@ -666,15 +669,24 @@ static void hdmi_pin_setup_infoframe(struct hda_codec *codec, int ca, int active_channels, int conn_type) { + struct hdmi_spec *spec = codec->spec; union audio_infoframe ai; memset(&ai, 0, sizeof(ai)); - if (conn_type == 0) { /* HDMI */ + if ((conn_type == 0) || /* HDMI */ + /* Nvidia DisplayPort: Nvidia HW expects same layout as HDMI */ + (conn_type == 1 && spec->nv_dp_workaround)) { struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi; - hdmi_ai->type = 0x84; - hdmi_ai->ver = 0x01; - hdmi_ai->len = 0x0a; + if (conn_type == 0) { /* HDMI */ + hdmi_ai->type = 0x84; + hdmi_ai->ver = 0x01; + hdmi_ai->len = 0x0a; + } else {/* Nvidia DP */ + hdmi_ai->type = 0x84; + hdmi_ai->ver = 0x1b; + hdmi_ai->len = 0x11 << 2; + } hdmi_ai->CC02_CT47 = active_channels - 1; hdmi_ai->CA = ca; hdmi_checksum_audio_infoframe(hdmi_ai); @@ -1257,6 +1269,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, set_bit(pcm_idx, &spec->pcm_in_use); per_pin = get_pin(spec, pin_idx); per_pin->cvt_nid = per_cvt->cvt_nid; + per_pin->silent_stream = false; hinfo->nid = per_cvt->cvt_nid; /* flip stripe flag for the assigned stream if supported */ @@ -1348,6 +1361,12 @@ static int hdmi_find_pcm_slot(struct hdmi_spec *spec, { int i; + /* on the new machines, try to assign the pcm slot dynamically, + * not use the preferred fixed map (legacy way) anymore. + */ + if (spec->dyn_pcm_no_legacy) + goto last_try; + /* * generic_hdmi_build_pcms() may allocate extra PCMs on some * platforms (with maximum of 'num_nids + dev_num - 1') @@ -1377,8 +1396,9 @@ static int hdmi_find_pcm_slot(struct hdmi_spec *spec, return i; } + last_try: /* the last try; check the empty slots in pins */ - for (i = 0; i < spec->num_nids; i++) { + for (i = 0; i < spec->pcm_used; i++) { if (!test_bit(i, &spec->pcm_bitmap)) return i; } @@ -2254,7 +2274,9 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec) * dev_num is the device entry number in a pin */ - if (codec->mst_no_extra_pcms) + if (spec->dyn_pcm_no_legacy && codec->mst_no_extra_pcms) + pcm_num = spec->num_cvts; + else if (codec->mst_no_extra_pcms) pcm_num = spec->num_nids; else pcm_num = spec->num_nids + spec->dev_num - 1; @@ -2662,9 +2684,6 @@ static void generic_acomp_pin_eld_notify(void *audio_ptr, int port, int dev_id) */ if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND) return; - /* ditto during suspend/resume process itself */ - if (snd_hdac_is_in_pm(&codec->core)) - return; check_presence_and_report(codec, pin_nid, dev_id); } @@ -2848,9 +2867,6 @@ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe) */ if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND) return; - /* ditto during suspend/resume process itself */ - if (snd_hdac_is_in_pm(&codec->core)) - return; snd_hdac_i915_set_bclk(&codec->bus->core); check_presence_and_report(codec, pin_nid, dev_id); @@ -3010,8 +3026,16 @@ static int patch_i915_tgl_hdmi(struct hda_codec *codec) * the index indicate the port number. */ static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf}; + int ret; - return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map)); + ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map)); + if (!ret) { + struct hdmi_spec *spec = codec->spec; + + spec->dyn_pcm_no_legacy = true; + } + + return ret; } /* Intel Baytrail and Braswell; with eld notifier */ @@ -3510,6 +3534,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec) spec->pcm_playback.rates = SUPPORTED_RATES; spec->pcm_playback.maxbps = SUPPORTED_MAXBPS; spec->pcm_playback.formats = SUPPORTED_FORMATS; + spec->nv_dp_workaround = true; return 0; } @@ -3649,6 +3674,7 @@ static int patch_nvhdmi(struct hda_codec *codec) spec->chmap.ops.chmap_cea_alloc_validate_get_type = nvhdmi_chmap_cea_alloc_validate_get_type; spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate; + spec->nv_dp_workaround = true; codec->link_down_at_suspend = 1; @@ -3672,6 +3698,7 @@ static int patch_nvhdmi_legacy(struct hda_codec *codec) spec->chmap.ops.chmap_cea_alloc_validate_get_type = nvhdmi_chmap_cea_alloc_validate_get_type; spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate; + spec->nv_dp_workaround = true; codec->link_down_at_suspend = 1; @@ -3839,11 +3866,13 @@ static int patch_tegra_hdmi(struct hda_codec *codec) if (err) return err; + codec->depop_delay = 10; codec->patch_ops.build_pcms = tegra_hdmi_build_pcms; spec = codec->spec; spec->chmap.ops.chmap_cea_alloc_validate_get_type = nvhdmi_chmap_cea_alloc_validate_get_type; spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate; + spec->nv_dp_workaround = true; return 0; } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index b886326ce9b9..8011b451902a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -439,6 +439,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0245: case 0x10ec0255: case 0x10ec0256: + case 0x19e58326: case 0x10ec0257: case 0x10ec0282: case 0x10ec0283: @@ -576,6 +577,7 @@ static void alc_shutup_pins(struct hda_codec *codec) switch (codec->core.vendor_id) { case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: case 0x10ec0283: case 0x10ec0286: case 0x10ec0288: @@ -1990,6 +1992,7 @@ enum { ALC1220_FIXUP_CLEVO_PB51ED_PINS, ALC887_FIXUP_ASUS_AUDIO, ALC887_FIXUP_ASUS_HMIC, + ALCS1200A_FIXUP_MIC_VREF, }; static void alc889_fixup_coef(struct hda_codec *codec, @@ -2535,6 +2538,14 @@ static const struct hda_fixup alc882_fixups[] = { .chained = true, .chain_id = ALC887_FIXUP_ASUS_AUDIO, }, + [ALCS1200A_FIXUP_MIC_VREF] = { + .type = HDA_FIXUP_PINCTLS, + .v.pins = (const struct hda_pintbl[]) { + { 0x18, PIN_VREF50 }, /* rear mic */ + { 0x19, PIN_VREF50 }, /* front mic */ + {} + } + }, }; static const struct snd_pci_quirk alc882_fixup_tbl[] = { @@ -2572,6 +2583,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS), SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3), + SND_PCI_QUIRK(0x1043, 0x8797, "ASUS TUF B550M-PLUS", ALCS1200A_FIXUP_MIC_VREF), SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), @@ -2626,10 +2638,12 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x65f5, "Clevo PD50PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x67f1, "Clevo PC70H[PRS]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x67f5, "Clevo PD70PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170SM", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x7715, "Clevo X170KM-G", ALC1220_FIXUP_CLEVO_PB51ED), @@ -3241,6 +3255,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_write_coef_idx(codec, 0x48, 0x0); alc_update_coef_idx(codec, 0x49, 0x0045, 0x0); break; @@ -3269,6 +3284,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_write_coef_idx(codec, 0x48, 0xd011); alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045); break; @@ -4612,6 +4628,48 @@ static void alc236_fixup_hp_mute_led_micmute_vref(struct hda_codec *codec, alc236_fixup_hp_micmute_led_vref(codec, fix, action); } +static inline void alc298_samsung_write_coef_pack(struct hda_codec *codec, + const unsigned short coefs[2]) +{ + alc_write_coef_idx(codec, 0x23, coefs[0]); + alc_write_coef_idx(codec, 0x25, coefs[1]); + alc_write_coef_idx(codec, 0x26, 0xb011); +} + +struct alc298_samsung_amp_desc { + unsigned char nid; + unsigned short init_seq[2][2]; +}; + +static void alc298_fixup_samsung_amp(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + int i, j; + static const unsigned short init_seq[][2] = { + { 0x19, 0x00 }, { 0x20, 0xc0 }, { 0x22, 0x44 }, { 0x23, 0x08 }, + { 0x24, 0x85 }, { 0x25, 0x41 }, { 0x35, 0x40 }, { 0x36, 0x01 }, + { 0x38, 0x81 }, { 0x3a, 0x03 }, { 0x3b, 0x81 }, { 0x40, 0x3e }, + { 0x41, 0x07 }, { 0x400, 0x1 } + }; + static const struct alc298_samsung_amp_desc amps[] = { + { 0x3a, { { 0x18, 0x1 }, { 0x26, 0x0 } } }, + { 0x39, { { 0x18, 0x2 }, { 0x26, 0x1 } } } + }; + + if (action != HDA_FIXUP_ACT_INIT) + return; + + for (i = 0; i < ARRAY_SIZE(amps); i++) { + alc_write_coef_idx(codec, 0x22, amps[i].nid); + + for (j = 0; j < ARRAY_SIZE(amps[i].init_seq); j++) + alc298_samsung_write_coef_pack(codec, amps[i].init_seq[j]); + + for (j = 0; j < ARRAY_SIZE(init_seq); j++) + alc298_samsung_write_coef_pack(codec, init_seq[j]); + } +} + #if IS_REACHABLE(CONFIG_INPUT) static void gpio2_mic_hotkey_event(struct hda_codec *codec, struct hda_jack_callback *event) @@ -4838,6 +4896,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_process_coef_fw(codec, coef0256); break; case 0x10ec0234: @@ -4953,6 +5012,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_write_coef_idx(codec, 0x45, 0xc489); snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0256); @@ -5103,6 +5163,7 @@ static void alc_headset_mode_default(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_write_coef_idx(codec, 0x1b, 0x0e4b); alc_write_coef_idx(codec, 0x45, 0xc089); msleep(50); @@ -5202,6 +5263,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_process_coef_fw(codec, coef0256); break; case 0x10ec0234: @@ -5316,6 +5378,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_process_coef_fw(codec, coef0256); break; case 0x10ec0234: @@ -5417,6 +5480,7 @@ static void alc_determine_headset_type(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_write_coef_idx(codec, 0x1b, 0x0e4b); alc_write_coef_idx(codec, 0x06, 0x6104); alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3); @@ -5711,6 +5775,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_process_coef_fw(codec, alc256fw); break; } @@ -6314,6 +6379,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec) case 0x10ec0236: case 0x10ec0255: case 0x10ec0256: + case 0x19e58326: alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */ alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15); break; @@ -6642,6 +6708,7 @@ enum { ALC269_FIXUP_LIMIT_INT_MIC_BOOST, ALC269VB_FIXUP_ASUS_ZENBOOK, ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A, + ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE, ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED, ALC269VB_FIXUP_ORDISSIMO_EVE2, ALC283_FIXUP_CHROME_BOOK, @@ -6701,6 +6768,7 @@ enum { ALC298_FIXUP_LENOVO_SPK_VOLUME, ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER, ALC269_FIXUP_ATIV_BOOK_8, + ALC221_FIXUP_HP_288PRO_MIC_NO_PRESENCE, ALC221_FIXUP_HP_MIC_NO_PRESENCE, ALC256_FIXUP_ASUS_HEADSET_MODE, ALC256_FIXUP_ASUS_MIC, @@ -6756,11 +6824,14 @@ enum { ALC294_FIXUP_ASUS_GU502_HP, ALC294_FIXUP_ASUS_GU502_PINS, ALC294_FIXUP_ASUS_GU502_VERBS, + ALC294_FIXUP_ASUS_G513_PINS, + ALC285_FIXUP_ASUS_G533Z_PINS, ALC285_FIXUP_HP_GPIO_LED, ALC285_FIXUP_HP_MUTE_LED, ALC236_FIXUP_HP_GPIO_LED, ALC236_FIXUP_HP_MUTE_LED, ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, + ALC298_FIXUP_SAMSUNG_AMP, ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, @@ -6804,6 +6875,7 @@ enum { ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS, ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE, ALC287_FIXUP_YOGA7_14ITL_SPEAKERS, + ALC298_FIXUP_LENOVO_C940_DUET7, ALC287_FIXUP_13S_GEN2_SPEAKERS, ALC256_FIXUP_SET_COEF_DEFAULTS, ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE, @@ -6811,8 +6883,26 @@ enum { ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME, ALC285_FIXUP_LEGION_Y9000X_SPEAKERS, ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE, + ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED, }; +/* A special fixup for Lenovo C940 and Yoga Duet 7; + * both have the very same PCI SSID, and we need to apply different fixups + * depending on the codec ID + */ +static void alc298_fixup_lenovo_c940_duet7(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + int id; + + if (codec->core.vendor_id == 0x10ec0298) + id = ALC298_FIXUP_LENOVO_SPK_VOLUME; /* C940 */ + else + id = ALC287_FIXUP_YOGA7_14ITL_SPEAKERS; /* Duet 7 */ + __snd_hda_apply_fixup(codec, id, action, 0); +} + static const struct hda_fixup alc269_fixups[] = { [ALC269_FIXUP_GPIO2] = { .type = HDA_FIXUP_FUNC, @@ -7198,6 +7288,15 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269VB_FIXUP_ASUS_ZENBOOK, }, + [ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x18, 0x01a110f0 }, /* use as headset mic */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MIC + }, [ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_limit_int_mic_boost, @@ -7609,6 +7708,16 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_NO_SHUTUP }, + [ALC221_FIXUP_HP_288PRO_MIC_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ + { 0x1a, 0x01813030 }, /* use as headphone mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MODE + }, [ALC221_FIXUP_HP_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -8043,6 +8152,26 @@ static const struct hda_fixup alc269_fixups[] = { [ALC294_FIXUP_ASUS_GU502_HP] = { .type = HDA_FIXUP_FUNC, .v.func = alc294_fixup_gu502_hp, + }, + [ALC294_FIXUP_ASUS_G513_PINS] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x03a11050 }, /* front HP mic */ + { 0x1a, 0x03a11c30 }, /* rear external mic */ + { 0x21, 0x03211420 }, /* front HP out */ + { } + }, + }, + [ALC285_FIXUP_ASUS_G533Z_PINS] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x14, 0x90170152 }, /* Speaker Surround Playback Switch */ + { 0x19, 0x03a19020 }, /* Mic Boost Volume */ + { 0x1a, 0x03a11c30 }, /* Mic Boost Volume */ + { 0x1e, 0x90170151 }, /* Rear jack, IN OUT EAPD Detect */ + { 0x21, 0x03211420 }, + { } + }, }, [ALC294_FIXUP_ASUS_COEF_1B] = { .type = HDA_FIXUP_VERBS, @@ -8077,6 +8206,12 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc236_fixup_hp_mute_led_micmute_vref, }, + [ALC298_FIXUP_SAMSUNG_AMP] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc298_fixup_samsung_amp, + .chained = true, + .chain_id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET + }, [ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { @@ -8506,6 +8641,10 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE, }, + [ALC298_FIXUP_LENOVO_C940_DUET7] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc298_fixup_lenovo_c940_duet7, + }, [ALC287_FIXUP_13S_GEN2_SPEAKERS] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { @@ -8555,6 +8694,16 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC }, + [ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + { 0x20, AC_VERB_SET_COEF_INDEX, 0x19 }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x8e11 }, + { } + }, + .chained = true, + .chain_id = ALC285_FIXUP_HP_MUTE_LED, + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -8587,6 +8736,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x129c, "Acer SWIFT SF314-55", ALC256_FIXUP_ACER_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x129d, "Acer SWIFT SF313-51", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1300, "Acer SWIFT SF314-56", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC), @@ -8596,6 +8746,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC), SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), + SND_PCI_QUIRK(0x1028, 0x053c, "Dell Latitude E5430", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x05be, "Dell Latitude E6540", ALC292_FIXUP_DELL_E7X), @@ -8650,6 +8801,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0a62, "Dell Precision 5560", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK), + SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), @@ -8709,6 +8862,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x2b5e, "HP 288 Pro G2 MT", ALC221_FIXUP_HP_288PRO_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC), @@ -8729,6 +8883,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO), + SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), + SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED), @@ -8744,6 +8900,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8783, "HP ZBook Fury 15 G7 Mobile Workstation", ALC285_FIXUP_HP_GPIO_AMP_INIT), + SND_PCI_QUIRK(0x103c, 0x8786, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x8787, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), @@ -8768,7 +8926,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8895, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED), SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x89aa, "HP EliteBook 630 G9", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), @@ -8783,13 +8943,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), + SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), + SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS), SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), - SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS), SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC), @@ -8804,14 +8966,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS), SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS), + SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS), SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), - SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), - SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), @@ -8833,17 +8998,20 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE), SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), + SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE), SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), - SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), - SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), - SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), - SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_AMP), + SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_AMP), + SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_AMP), + SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_AMP), + SND_PCI_QUIRK(0x144d, 0xc1a3, "Samsung Galaxy Book Pro (NP935XDB-KC1SE)", ALC298_FIXUP_SAMSUNG_AMP), + SND_PCI_QUIRK(0x144d, 0xc1a6, "Samsung Galaxy Book Pro 360 (NP930QBD)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), - SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), - SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_AMP), + SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), @@ -8858,6 +9026,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0x4018, "Clevo NV40M[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x4019, "Clevo NV40MZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x4020, "Clevo NV40MB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x4041, "Clevo NV4[15]PZ", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x40a1, "Clevo NL40GU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x40c1, "Clevo NL40[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x40d1, "Clevo NL41DU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), @@ -8884,6 +9053,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0x70f3, "Clevo NH77DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x7716, "Clevo NS50PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x7717, "Clevo NS70PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x7718, "Clevo L140PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), @@ -8896,6 +9068,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[5|7][0-9]RZ[Q]", ALC269_FIXUP_DMIC), SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x866d, "Clevo NP5[05]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x867c, "Clevo NP7[01]PNP", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x867d, "Clevo NP7[01]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME), @@ -8964,9 +9137,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340), + SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), - SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME), + SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7), SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF), SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), @@ -8994,12 +9169,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x508b, "Thinkpad X12 Gen 1", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK), + SND_PCI_QUIRK(0x1849, 0xa233, "Positivo Master C6300", ALC269_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS), + SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20), SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI), SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101), @@ -9007,9 +9185,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802), SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X), SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS), + SND_PCI_QUIRK(0x1d05, 0x1096, "TongFang GMxMRxx", ALC269_FIXUP_NO_SHUTUP), + SND_PCI_QUIRK(0x1d05, 0x1100, "TongFang GKxNRxx", ALC269_FIXUP_NO_SHUTUP), + SND_PCI_QUIRK(0x1d05, 0x1111, "TongFang GMxZGxx", ALC269_FIXUP_NO_SHUTUP), + SND_PCI_QUIRK(0x1d05, 0x1119, "TongFang GMxZGxx", ALC269_FIXUP_NO_SHUTUP), + SND_PCI_QUIRK(0x1d05, 0x1129, "TongFang GMxZGxx", ALC269_FIXUP_NO_SHUTUP), + SND_PCI_QUIRK(0x1d05, 0x1147, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP), + SND_PCI_QUIRK(0x1d05, 0x115c, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP), + SND_PCI_QUIRK(0x1d05, 0x121b, "TongFang GMxAGxx", ALC269_FIXUP_NO_SHUTUP), SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC), SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC), SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED), @@ -9186,7 +9373,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"}, {.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"}, {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"}, - {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"}, + {.id = ALC298_FIXUP_SAMSUNG_AMP, .name = "alc298-samsung-amp"}, {.id = ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc256-samsung-headphone"}, {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"}, {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"}, @@ -9789,6 +9976,7 @@ static int patch_alc269(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: spec->codec_variant = ALC269_TYPE_ALC256; spec->shutup = alc256_shutup; spec->init_hook = alc256_init; @@ -10408,6 +10596,7 @@ enum { ALC668_FIXUP_MIC_DET_COEF, ALC897_FIXUP_LENOVO_HEADSET_MIC, ALC897_FIXUP_HEADSET_MIC_PIN, + ALC897_FIXUP_HP_HSMIC_VERB, }; static const struct hda_fixup alc662_fixups[] = { @@ -10827,6 +11016,13 @@ static const struct hda_fixup alc662_fixups[] = { .chained = true, .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MIC }, + [ALC897_FIXUP_HP_HSMIC_VERB] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ + { } + }, + }, }; static const struct snd_pci_quirk alc662_fixup_tbl[] = { @@ -10852,7 +11048,9 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), + SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB), SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2), + SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2), SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2), SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE), SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50), @@ -10872,6 +11070,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE), SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS), + SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN), @@ -11230,6 +11429,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882), HDA_CODEC_ENTRY(0x10ec1168, "ALC1220", patch_alc882), HDA_CODEC_ENTRY(0x10ec1220, "ALC1220", patch_alc882), + HDA_CODEC_ENTRY(0x19e58326, "HW8326", patch_alc269), {} /* terminator */ }; MODULE_DEVICE_TABLE(hdaudio, snd_hda_id_realtek); diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index c662431bf13a..6fc0c4e77cd1 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -209,6 +209,7 @@ struct sigmatel_spec { /* beep widgets */ hda_nid_t anabeep_nid; + bool beep_power_on; /* SPDIF-out mux */ const char * const *spdif_labels; @@ -4307,6 +4308,8 @@ static int stac_parse_auto_config(struct hda_codec *codec) if (codec->beep) { /* IDT/STAC codecs have linear beep tone parameter */ codec->beep->linear_tone = spec->linear_tone_beep; + /* keep power up while beep is enabled */ + codec->beep->keep_power_at_enable = 1; /* if no beep switch is available, make its own one */ caps = query_amp_caps(codec, nid, HDA_OUTPUT); if (!(caps & AC_AMPCAP_MUTE)) { diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 773a136161f1..a188901a83bb 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -520,11 +520,11 @@ static int via_parse_auto_config(struct hda_codec *codec) if (err < 0) return err; - err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg); + err = auto_parse_beep(codec); if (err < 0) return err; - err = auto_parse_beep(codec); + err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg); if (err < 0) return err; diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c index 4aee30db034d..954347424500 100644 --- a/sound/pci/rme9652/hdsp.c +++ b/sound/pci/rme9652/hdsp.c @@ -436,7 +436,7 @@ struct hdsp_midi { struct snd_rawmidi *rmidi; struct snd_rawmidi_substream *input; struct snd_rawmidi_substream *output; - char istimer; /* timer in use */ + signed char istimer; /* timer in use */ struct timer_list timer; spinlock_t lock; int pending; @@ -479,7 +479,7 @@ struct hdsp { pid_t playback_pid; int running; int system_sample_rate; - const char *channel_map; + const signed char *channel_map; int dev; int irq; unsigned long port; @@ -501,7 +501,7 @@ struct hdsp { where the data for that channel can be read/written from/to. */ -static const char channel_map_df_ss[HDSP_MAX_CHANNELS] = { +static const signed char channel_map_df_ss[HDSP_MAX_CHANNELS] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 }; @@ -516,7 +516,7 @@ static const char channel_map_mf_ss[HDSP_MAX_CHANNELS] = { /* Multiface */ -1, -1, -1, -1, -1, -1, -1, -1 }; -static const char channel_map_ds[HDSP_MAX_CHANNELS] = { +static const signed char channel_map_ds[HDSP_MAX_CHANNELS] = { /* ADAT channels are remapped */ 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, /* channels 12 and 13 are S/PDIF */ @@ -525,7 +525,7 @@ static const char channel_map_ds[HDSP_MAX_CHANNELS] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; -static const char channel_map_H9632_ss[HDSP_MAX_CHANNELS] = { +static const signed char channel_map_H9632_ss[HDSP_MAX_CHANNELS] = { /* ADAT channels */ 0, 1, 2, 3, 4, 5, 6, 7, /* SPDIF */ @@ -539,7 +539,7 @@ static const char channel_map_H9632_ss[HDSP_MAX_CHANNELS] = { -1, -1 }; -static const char channel_map_H9632_ds[HDSP_MAX_CHANNELS] = { +static const signed char channel_map_H9632_ds[HDSP_MAX_CHANNELS] = { /* ADAT */ 1, 3, 5, 7, /* SPDIF */ @@ -553,7 +553,7 @@ static const char channel_map_H9632_ds[HDSP_MAX_CHANNELS] = { -1, -1, -1, -1, -1, -1 }; -static const char channel_map_H9632_qs[HDSP_MAX_CHANNELS] = { +static const signed char channel_map_H9632_qs[HDSP_MAX_CHANNELS] = { /* ADAT is disabled in this mode */ /* SPDIF */ 8, 9, @@ -3869,7 +3869,7 @@ static snd_pcm_uframes_t snd_hdsp_hw_pointer(struct snd_pcm_substream *substream return hdsp_hw_pointer(hdsp); } -static char *hdsp_channel_buffer_location(struct hdsp *hdsp, +static signed char *hdsp_channel_buffer_location(struct hdsp *hdsp, int stream, int channel) @@ -3893,7 +3893,7 @@ static int snd_hdsp_playback_copy(struct snd_pcm_substream *substream, void __user *src, unsigned long count) { struct hdsp *hdsp = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; if (snd_BUG_ON(pos + count > HDSP_CHANNEL_BUFFER_BYTES)) return -EINVAL; @@ -3911,7 +3911,7 @@ static int snd_hdsp_playback_copy_kernel(struct snd_pcm_substream *substream, void *src, unsigned long count) { struct hdsp *hdsp = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; channel_buf = hdsp_channel_buffer_location(hdsp, substream->pstr->stream, channel); if (snd_BUG_ON(!channel_buf)) @@ -3925,7 +3925,7 @@ static int snd_hdsp_capture_copy(struct snd_pcm_substream *substream, void __user *dst, unsigned long count) { struct hdsp *hdsp = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; if (snd_BUG_ON(pos + count > HDSP_CHANNEL_BUFFER_BYTES)) return -EINVAL; @@ -3943,7 +3943,7 @@ static int snd_hdsp_capture_copy_kernel(struct snd_pcm_substream *substream, void *dst, unsigned long count) { struct hdsp *hdsp = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; channel_buf = hdsp_channel_buffer_location(hdsp, substream->pstr->stream, channel); if (snd_BUG_ON(!channel_buf)) @@ -3957,7 +3957,7 @@ static int snd_hdsp_hw_silence(struct snd_pcm_substream *substream, unsigned long count) { struct hdsp *hdsp = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; channel_buf = hdsp_channel_buffer_location (hdsp, substream->pstr->stream, channel); if (snd_BUG_ON(!channel_buf)) diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c index 8def24673f35..459696844b8c 100644 --- a/sound/pci/rme9652/rme9652.c +++ b/sound/pci/rme9652/rme9652.c @@ -229,7 +229,7 @@ struct snd_rme9652 { int last_spdif_sample_rate; /* so that we can catch externally ... */ int last_adat_sample_rate; /* ... induced rate changes */ - const char *channel_map; + const signed char *channel_map; struct snd_card *card; struct snd_pcm *pcm; @@ -246,12 +246,12 @@ struct snd_rme9652 { where the data for that channel can be read/written from/to. */ -static const char channel_map_9652_ss[26] = { +static const signed char channel_map_9652_ss[26] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 }; -static const char channel_map_9636_ss[26] = { +static const signed char channel_map_9636_ss[26] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* channels 16 and 17 are S/PDIF */ 24, 25, @@ -259,7 +259,7 @@ static const char channel_map_9636_ss[26] = { -1, -1, -1, -1, -1, -1, -1, -1 }; -static const char channel_map_9652_ds[26] = { +static const signed char channel_map_9652_ds[26] = { /* ADAT channels are remapped */ 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, /* channels 12 and 13 are S/PDIF */ @@ -268,7 +268,7 @@ static const char channel_map_9652_ds[26] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; -static const char channel_map_9636_ds[26] = { +static const signed char channel_map_9636_ds[26] = { /* ADAT channels are remapped */ 1, 3, 5, 7, 9, 11, 13, 15, /* channels 8 and 9 are S/PDIF */ @@ -1841,7 +1841,7 @@ static snd_pcm_uframes_t snd_rme9652_hw_pointer(struct snd_pcm_substream *substr return rme9652_hw_pointer(rme9652); } -static char *rme9652_channel_buffer_location(struct snd_rme9652 *rme9652, +static signed char *rme9652_channel_buffer_location(struct snd_rme9652 *rme9652, int stream, int channel) @@ -1869,7 +1869,7 @@ static int snd_rme9652_playback_copy(struct snd_pcm_substream *substream, void __user *src, unsigned long count) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; if (snd_BUG_ON(pos + count > RME9652_CHANNEL_BUFFER_BYTES)) return -EINVAL; @@ -1889,7 +1889,7 @@ static int snd_rme9652_playback_copy_kernel(struct snd_pcm_substream *substream, void *src, unsigned long count) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; channel_buf = rme9652_channel_buffer_location(rme9652, substream->pstr->stream, @@ -1905,7 +1905,7 @@ static int snd_rme9652_capture_copy(struct snd_pcm_substream *substream, void __user *dst, unsigned long count) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; if (snd_BUG_ON(pos + count > RME9652_CHANNEL_BUFFER_BYTES)) return -EINVAL; @@ -1925,7 +1925,7 @@ static int snd_rme9652_capture_copy_kernel(struct snd_pcm_substream *substream, void *dst, unsigned long count) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; channel_buf = rme9652_channel_buffer_location(rme9652, substream->pstr->stream, @@ -1941,7 +1941,7 @@ static int snd_rme9652_hw_silence(struct snd_pcm_substream *substream, unsigned long count) { struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream); - char *channel_buf; + signed char *channel_buf; channel_buf = rme9652_channel_buffer_location (rme9652, substream->pstr->stream, diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c index b1a28a9382fb..f91a0e728ed5 100644 --- a/sound/soc/atmel/atmel-classd.c +++ b/sound/soc/atmel/atmel-classd.c @@ -458,7 +458,6 @@ static const struct snd_soc_component_driver atmel_classd_cpu_dai_component = { .num_controls = ARRAY_SIZE(atmel_classd_snd_controls), .idle_bias_on = 1, .use_pmdown_time = 1, - .endianness = 1, }; /* ASoC sound card */ diff --git a/sound/soc/atmel/atmel-pdmic.c b/sound/soc/atmel/atmel-pdmic.c index 8e1d8230b180..049383e5405e 100644 --- a/sound/soc/atmel/atmel-pdmic.c +++ b/sound/soc/atmel/atmel-pdmic.c @@ -481,7 +481,6 @@ static const struct snd_soc_component_driver atmel_pdmic_cpu_dai_component = { .num_controls = ARRAY_SIZE(atmel_pdmic_snd_controls), .idle_bias_on = 1, .use_pmdown_time = 1, - .endianness = 1, }; /* ASoC sound card */ diff --git a/sound/soc/atmel/mchp-spdifrx.c b/sound/soc/atmel/mchp-spdifrx.c index e6ded6f8453f..46f3407ed0e8 100644 --- a/sound/soc/atmel/mchp-spdifrx.c +++ b/sound/soc/atmel/mchp-spdifrx.c @@ -288,15 +288,17 @@ static void mchp_spdifrx_isr_blockend_en(struct mchp_spdifrx_dev *dev) spin_unlock_irqrestore(&dev->blockend_lock, flags); } -/* called from atomic context only */ +/* called from atomic/non-atomic context */ static void mchp_spdifrx_isr_blockend_dis(struct mchp_spdifrx_dev *dev) { - spin_lock(&dev->blockend_lock); + unsigned long flags; + + spin_lock_irqsave(&dev->blockend_lock, flags); dev->blockend_refcount--; /* don't enable BLOCKEND interrupt if it's already enabled */ if (dev->blockend_refcount == 0) regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_BLOCKEND); - spin_unlock(&dev->blockend_lock); + spin_unlock_irqrestore(&dev->blockend_lock, flags); } static irqreturn_t mchp_spdif_interrupt(int irq, void *dev_id) @@ -575,6 +577,7 @@ static int mchp_spdifrx_subcode_ch_get(struct mchp_spdifrx_dev *dev, if (ret <= 0) { dev_dbg(dev->dev, "user data for channel %d timeout\n", channel); + mchp_spdifrx_isr_blockend_dis(dev); return ret; } diff --git a/sound/soc/atmel/mchp-spdiftx.c b/sound/soc/atmel/mchp-spdiftx.c index 3bd350afb743..0d2e3fa21519 100644 --- a/sound/soc/atmel/mchp-spdiftx.c +++ b/sound/soc/atmel/mchp-spdiftx.c @@ -196,8 +196,7 @@ struct mchp_spdiftx_dev { struct clk *pclk; struct clk *gclk; unsigned int fmt; - const struct mchp_i2s_caps *caps; - int gclk_enabled:1; + unsigned int gclk_enabled:1; }; static inline int mchp_spdiftx_is_running(struct mchp_spdiftx_dev *dev) @@ -766,8 +765,6 @@ static const struct of_device_id mchp_spdiftx_dt_ids[] = { MODULE_DEVICE_TABLE(of, mchp_spdiftx_dt_ids); static int mchp_spdiftx_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; - const struct of_device_id *match; struct mchp_spdiftx_dev *dev; struct resource *mem; struct regmap *regmap; @@ -781,11 +778,6 @@ static int mchp_spdiftx_probe(struct platform_device *pdev) if (!dev) return -ENOMEM; - /* Get hardware capabilities. */ - match = of_match_node(mchp_spdiftx_dt_ids, np); - if (match) - dev->caps = match->data; - /* Map I/O registers. */ base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem); if (IS_ERR(base)) diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c index 8a55d59a6c2a..d243de5f23dc 100644 --- a/sound/soc/atmel/sam9g20_wm8731.c +++ b/sound/soc/atmel/sam9g20_wm8731.c @@ -46,35 +46,6 @@ */ #undef ENABLE_MIC_INPUT -static struct clk *mclk; - -static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card, - struct snd_soc_dapm_context *dapm, - enum snd_soc_bias_level level) -{ - static int mclk_on; - int ret = 0; - - switch (level) { - case SND_SOC_BIAS_ON: - case SND_SOC_BIAS_PREPARE: - if (!mclk_on) - ret = clk_enable(mclk); - if (ret == 0) - mclk_on = 1; - break; - - case SND_SOC_BIAS_OFF: - case SND_SOC_BIAS_STANDBY: - if (mclk_on) - clk_disable(mclk); - mclk_on = 0; - break; - } - - return ret; -} - static const struct snd_soc_dapm_widget at91sam9g20ek_dapm_widgets[] = { SND_SOC_DAPM_MIC("Int Mic", NULL), SND_SOC_DAPM_SPK("Ext Spk", NULL), @@ -135,7 +106,6 @@ static struct snd_soc_card snd_soc_at91sam9g20ek = { .owner = THIS_MODULE, .dai_link = &at91sam9g20ek_dai, .num_links = 1, - .set_bias_level = at91sam9g20ek_set_bias_level, .dapm_widgets = at91sam9g20ek_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(at91sam9g20ek_dapm_widgets), @@ -148,7 +118,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct device_node *codec_np, *cpu_np; - struct clk *pllb; struct snd_soc_card *card = &snd_soc_at91sam9g20ek; int ret; @@ -162,31 +131,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev) return -EINVAL; } - /* - * Codec MCLK is supplied by PCK0 - set it up. - */ - mclk = clk_get(NULL, "pck0"); - if (IS_ERR(mclk)) { - dev_err(&pdev->dev, "Failed to get MCLK\n"); - ret = PTR_ERR(mclk); - goto err; - } - - pllb = clk_get(NULL, "pllb"); - if (IS_ERR(pllb)) { - dev_err(&pdev->dev, "Failed to get PLLB\n"); - ret = PTR_ERR(pllb); - goto err_mclk; - } - ret = clk_set_parent(mclk, pllb); - clk_put(pllb); - if (ret != 0) { - dev_err(&pdev->dev, "Failed to set MCLK parent\n"); - goto err_mclk; - } - - clk_set_rate(mclk, MCLK_RATE); - card->dev = &pdev->dev; /* Parse device node info */ @@ -230,9 +174,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev) return ret; -err_mclk: - clk_put(mclk); - mclk = NULL; err: atmel_ssc_put_audio(0); return ret; @@ -242,8 +183,6 @@ static int at91sam9g20ek_audio_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); - clk_disable(mclk); - mclk = NULL; snd_soc_unregister_card(card); atmel_ssc_put_audio(0); diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 9a9173ff39c0..8121978a31a8 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -915,7 +915,6 @@ config SND_SOC_MAX98095 config SND_SOC_MAX98357A tristate "Maxim MAX98357A CODEC" - depends on GPIOLIB config SND_SOC_MAX98371 tristate @@ -1192,7 +1191,6 @@ config SND_SOC_RT1015 config SND_SOC_RT1015P tristate - depends on GPIOLIB config SND_SOC_RT1305 tristate diff --git a/sound/soc/codecs/cros_ec_codec.c b/sound/soc/codecs/cros_ec_codec.c index 5c3b7e5e55d2..dedbaba83792 100644 --- a/sound/soc/codecs/cros_ec_codec.c +++ b/sound/soc/codecs/cros_ec_codec.c @@ -994,6 +994,7 @@ static int cros_ec_codec_platform_probe(struct platform_device *pdev) dev_dbg(dev, "ap_shm_phys_addr=%#llx len=%#x\n", priv->ap_shm_phys_addr, priv->ap_shm_len); } + of_node_put(node); } #endif diff --git a/sound/soc/codecs/cs35l36.c b/sound/soc/codecs/cs35l36.c index e9b5f76f27a8..aa32b8c26578 100644 --- a/sound/soc/codecs/cs35l36.c +++ b/sound/soc/codecs/cs35l36.c @@ -444,7 +444,8 @@ static bool cs35l36_volatile_reg(struct device *dev, unsigned int reg) } } -static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10200, 25, 0); +static const DECLARE_TLV_DB_RANGE(dig_vol_tlv, 0, 912, + TLV_DB_MINMAX_ITEM(-10200, 1200)); static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 0, 1, 1); static const char * const cs35l36_pcm_sftramp_text[] = { diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c index f772628f233e..38223641bdf6 100644 --- a/sound/soc/codecs/cs42l52.c +++ b/sound/soc/codecs/cs42l52.c @@ -137,7 +137,9 @@ static DECLARE_TLV_DB_SCALE(mic_tlv, 1600, 100, 0); static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0); -static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0); +static DECLARE_TLV_DB_SCALE(pass_tlv, -6000, 50, 0); + +static DECLARE_TLV_DB_SCALE(mix_tlv, -5150, 50, 0); static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0); @@ -351,7 +353,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = { CS42L52_SPKB_VOL, 0, 0x40, 0xC0, hl_tlv), SOC_DOUBLE_R_SX_TLV("Bypass Volume", CS42L52_PASSTHRUA_VOL, - CS42L52_PASSTHRUB_VOL, 0, 0x88, 0x90, pga_tlv), + CS42L52_PASSTHRUB_VOL, 0, 0x88, 0x90, pass_tlv), SOC_DOUBLE("Bypass Mute", CS42L52_MISC_CTL, 4, 5, 1, 0), @@ -364,7 +366,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = { CS42L52_ADCB_VOL, 0, 0xA0, 0x78, ipd_tlv), SOC_DOUBLE_R_SX_TLV("ADC Mixer Volume", CS42L52_ADCA_MIXER_VOL, CS42L52_ADCB_MIXER_VOL, - 0, 0x19, 0x7F, ipd_tlv), + 0, 0x19, 0x7F, mix_tlv), SOC_DOUBLE("ADC Switch", CS42L52_ADC_MISC_CTL, 0, 1, 1, 0), diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c index 06dcfae9dfe7..d41e03193106 100644 --- a/sound/soc/codecs/cs42l56.c +++ b/sound/soc/codecs/cs42l56.c @@ -391,9 +391,9 @@ static const struct snd_kcontrol_new cs42l56_snd_controls[] = { SOC_DOUBLE("ADC Boost Switch", CS42L56_GAIN_BIAS_CTL, 3, 2, 1, 1), SOC_DOUBLE_R_SX_TLV("Headphone Volume", CS42L56_HPA_VOLUME, - CS42L56_HPB_VOLUME, 0, 0x84, 0x48, hl_tlv), + CS42L56_HPB_VOLUME, 0, 0x44, 0x48, hl_tlv), SOC_DOUBLE_R_SX_TLV("LineOut Volume", CS42L56_LOA_VOLUME, - CS42L56_LOB_VOLUME, 0, 0x84, 0x48, hl_tlv), + CS42L56_LOB_VOLUME, 0, 0x44, 0x48, hl_tlv), SOC_SINGLE_TLV("Bass Shelving Volume", CS42L56_TONE_CTL, 0, 0x00, 1, tone_tlv), diff --git a/sound/soc/codecs/cs47l15.c b/sound/soc/codecs/cs47l15.c index 254f9d96e766..7c20642f160a 100644 --- a/sound/soc/codecs/cs47l15.c +++ b/sound/soc/codecs/cs47l15.c @@ -122,6 +122,9 @@ static int cs47l15_in1_adc_put(struct snd_kcontrol *kcontrol, snd_soc_kcontrol_component(kcontrol); struct cs47l15 *cs47l15 = snd_soc_component_get_drvdata(component); + if (!!ucontrol->value.integer.value[0] == cs47l15->in1_lp_mode) + return 0; + switch (ucontrol->value.integer.value[0]) { case 0: /* Set IN1 to normal mode */ @@ -150,7 +153,7 @@ static int cs47l15_in1_adc_put(struct snd_kcontrol *kcontrol, break; } - return 0; + return 1; } static const struct snd_kcontrol_new cs47l15_snd_controls[] = { diff --git a/sound/soc/codecs/cs53l30.c b/sound/soc/codecs/cs53l30.c index ed22361b35c1..a5a383b92305 100644 --- a/sound/soc/codecs/cs53l30.c +++ b/sound/soc/codecs/cs53l30.c @@ -347,22 +347,22 @@ static const struct snd_kcontrol_new cs53l30_snd_controls[] = { SOC_ENUM("ADC2 NG Delay", adc2_ng_delay_enum), SOC_SINGLE_SX_TLV("ADC1A PGA Volume", - CS53L30_ADC1A_AFE_CTL, 0, 0x34, 0x18, pga_tlv), + CS53L30_ADC1A_AFE_CTL, 0, 0x34, 0x24, pga_tlv), SOC_SINGLE_SX_TLV("ADC1B PGA Volume", - CS53L30_ADC1B_AFE_CTL, 0, 0x34, 0x18, pga_tlv), + CS53L30_ADC1B_AFE_CTL, 0, 0x34, 0x24, pga_tlv), SOC_SINGLE_SX_TLV("ADC2A PGA Volume", - CS53L30_ADC2A_AFE_CTL, 0, 0x34, 0x18, pga_tlv), + CS53L30_ADC2A_AFE_CTL, 0, 0x34, 0x24, pga_tlv), SOC_SINGLE_SX_TLV("ADC2B PGA Volume", - CS53L30_ADC2B_AFE_CTL, 0, 0x34, 0x18, pga_tlv), + CS53L30_ADC2B_AFE_CTL, 0, 0x34, 0x24, pga_tlv), SOC_SINGLE_SX_TLV("ADC1A Digital Volume", - CS53L30_ADC1A_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv), + CS53L30_ADC1A_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv), SOC_SINGLE_SX_TLV("ADC1B Digital Volume", - CS53L30_ADC1B_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv), + CS53L30_ADC1B_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv), SOC_SINGLE_SX_TLV("ADC2A Digital Volume", - CS53L30_ADC2A_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv), + CS53L30_ADC2A_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv), SOC_SINGLE_SX_TLV("ADC2B Digital Volume", - CS53L30_ADC2B_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv), + CS53L30_ADC2B_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv), }; static const struct snd_soc_dapm_widget cs53l30_dapm_widgets[] = { diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c index 3d05c37f676e..4544ed8741b6 100644 --- a/sound/soc/codecs/da7210.c +++ b/sound/soc/codecs/da7210.c @@ -1336,6 +1336,8 @@ static int __init da7210_modinit(void) int ret = 0; #if IS_ENABLED(CONFIG_I2C) ret = i2c_add_driver(&da7210_i2c_driver); + if (ret) + return ret; #endif #if defined(CONFIG_SPI_MASTER) ret = spi_register_driver(&da7210_spi_driver); diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c index 0b3b7909efc9..f9e58d6509a8 100644 --- a/sound/soc/codecs/da7219.c +++ b/sound/soc/codecs/da7219.c @@ -446,7 +446,7 @@ static int da7219_tonegen_freq_put(struct snd_kcontrol *kcontrol, struct soc_mixer_control *mixer_ctrl = (struct soc_mixer_control *) kcontrol->private_value; unsigned int reg = mixer_ctrl->reg; - __le16 val; + __le16 val_new, val_old; int ret; /* @@ -454,13 +454,19 @@ static int da7219_tonegen_freq_put(struct snd_kcontrol *kcontrol, * Therefore we need to convert to little endian here to align with * HW registers. */ - val = cpu_to_le16(ucontrol->value.integer.value[0]); + val_new = cpu_to_le16(ucontrol->value.integer.value[0]); mutex_lock(&da7219->ctrl_lock); - ret = regmap_raw_write(da7219->regmap, reg, &val, sizeof(val)); + ret = regmap_raw_read(da7219->regmap, reg, &val_old, sizeof(val_old)); + if (ret == 0 && (val_old != val_new)) + ret = regmap_raw_write(da7219->regmap, reg, + &val_new, sizeof(val_new)); mutex_unlock(&da7219->ctrl_lock); - return ret; + if (ret < 0) + return ret; + + return val_old != val_new; } @@ -2188,6 +2194,7 @@ static int da7219_register_dai_clks(struct snd_soc_component *component) dai_clk_lookup = clkdev_hw_create(dai_clk_hw, init.name, "%s", dev_name(dev)); if (!dai_clk_lookup) { + clk_hw_unregister(dai_clk_hw); ret = -ENOMEM; goto err; } else { @@ -2209,12 +2216,12 @@ static int da7219_register_dai_clks(struct snd_soc_component *component) return 0; err: - do { + while (--i >= 0) { if (da7219->dai_clks_lookup[i]) clkdev_drop(da7219->dai_clks_lookup[i]); clk_hw_unregister(&da7219->dai_clks_hw[i]); - } while (i-- > 0); + } if (np) kfree(da7219->clk_hw_data); diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c index 1d27a26e0907..3e2de70340cc 100644 --- a/sound/soc/codecs/es8328.c +++ b/sound/soc/codecs/es8328.c @@ -171,13 +171,16 @@ static int es8328_put_deemph(struct snd_kcontrol *kcontrol, if (deemph > 1) return -EINVAL; + if (es8328->deemph == deemph) + return 0; + ret = es8328_set_deemph(component); if (ret < 0) return ret; es8328->deemph = deemph; - return 0; + return 1; } diff --git a/sound/soc/codecs/hdac_hda.h b/sound/soc/codecs/hdac_hda.h index d0efc5e254ae..da0ed74758b0 100644 --- a/sound/soc/codecs/hdac_hda.h +++ b/sound/soc/codecs/hdac_hda.h @@ -14,7 +14,7 @@ enum { HDAC_HDMI_1_DAI_ID, HDAC_HDMI_2_DAI_ID, HDAC_HDMI_3_DAI_ID, - HDAC_LAST_DAI_ID = HDAC_HDMI_3_DAI_ID, + HDAC_DAI_ID_NUM }; struct hdac_hda_pcm { @@ -24,7 +24,7 @@ struct hdac_hda_pcm { struct hdac_hda_priv { struct hda_codec codec; - struct hdac_hda_pcm pcm[HDAC_LAST_DAI_ID]; + struct hdac_hda_pcm pcm[HDAC_DAI_ID_NUM]; bool need_display_power; }; diff --git a/sound/soc/codecs/jz4725b.c b/sound/soc/codecs/jz4725b.c index e49374c72e70..8a830d0ad950 100644 --- a/sound/soc/codecs/jz4725b.c +++ b/sound/soc/codecs/jz4725b.c @@ -136,14 +136,17 @@ enum { #define REG_CGR3_GO1L_OFFSET 0 #define REG_CGR3_GO1L_MASK (0x1f << REG_CGR3_GO1L_OFFSET) +#define REG_CGR10_GIL_OFFSET 0 +#define REG_CGR10_GIR_OFFSET 4 + struct jz_icdc { struct regmap *regmap; void __iomem *base; struct clk *clk; }; -static const SNDRV_CTL_TLVD_DECLARE_DB_LINEAR(jz4725b_dac_tlv, -2250, 0); -static const SNDRV_CTL_TLVD_DECLARE_DB_LINEAR(jz4725b_line_tlv, -1500, 600); +static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(jz4725b_adc_tlv, 0, 150, 0); +static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(jz4725b_dac_tlv, -2250, 150, 0); static const struct snd_kcontrol_new jz4725b_codec_controls[] = { SOC_DOUBLE_TLV("Master Playback Volume", @@ -151,11 +154,11 @@ static const struct snd_kcontrol_new jz4725b_codec_controls[] = { REG_CGR1_GODL_OFFSET, REG_CGR1_GODR_OFFSET, 0xf, 1, jz4725b_dac_tlv), - SOC_DOUBLE_R_TLV("Master Capture Volume", - JZ4725B_CODEC_REG_CGR3, - JZ4725B_CODEC_REG_CGR2, - REG_CGR2_GO1R_OFFSET, - 0x1f, 1, jz4725b_line_tlv), + SOC_DOUBLE_TLV("Master Capture Volume", + JZ4725B_CODEC_REG_CGR10, + REG_CGR10_GIL_OFFSET, + REG_CGR10_GIR_OFFSET, + 0xf, 0, jz4725b_adc_tlv), SOC_SINGLE("Master Playback Switch", JZ4725B_CODEC_REG_CR1, REG_CR1_DAC_MUTE_OFFSET, 1, 1), @@ -180,7 +183,7 @@ static SOC_VALUE_ENUM_SINGLE_DECL(jz4725b_codec_adc_src_enum, jz4725b_codec_adc_src_texts, jz4725b_codec_adc_src_values); static const struct snd_kcontrol_new jz4725b_codec_adc_src_ctrl = - SOC_DAPM_ENUM("Route", jz4725b_codec_adc_src_enum); + SOC_DAPM_ENUM("ADC Source Capture Route", jz4725b_codec_adc_src_enum); static const struct snd_kcontrol_new jz4725b_codec_mixer_controls[] = { SOC_DAPM_SINGLE("Line In Bypass", JZ4725B_CODEC_REG_CR1, @@ -225,7 +228,7 @@ static const struct snd_soc_dapm_widget jz4725b_codec_dapm_widgets[] = { SND_SOC_DAPM_ADC("ADC", "Capture", JZ4725B_CODEC_REG_PMR1, REG_PMR1_SB_ADC_OFFSET, 1), - SND_SOC_DAPM_MUX("ADC Source", SND_SOC_NOPM, 0, 0, + SND_SOC_DAPM_MUX("ADC Source Capture Route", SND_SOC_NOPM, 0, 0, &jz4725b_codec_adc_src_ctrl), /* Mixer */ @@ -236,7 +239,8 @@ static const struct snd_soc_dapm_widget jz4725b_codec_dapm_widgets[] = { SND_SOC_DAPM_MIXER("DAC to Mixer", JZ4725B_CODEC_REG_CR1, REG_CR1_DACSEL_OFFSET, 0, NULL, 0), - SND_SOC_DAPM_MIXER("Line In", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_MIXER("Line In", JZ4725B_CODEC_REG_PMR1, + REG_PMR1_SB_LIN_OFFSET, 1, NULL, 0), SND_SOC_DAPM_MIXER("HP Out", JZ4725B_CODEC_REG_CR1, REG_CR1_HP_DIS_OFFSET, 1, NULL, 0), @@ -283,11 +287,11 @@ static const struct snd_soc_dapm_route jz4725b_codec_dapm_routes[] = { {"Mixer", NULL, "DAC to Mixer"}, {"Mixer to ADC", NULL, "Mixer"}, - {"ADC Source", "Mixer", "Mixer to ADC"}, - {"ADC Source", "Line In", "Line In"}, - {"ADC Source", "Mic 1", "Mic 1"}, - {"ADC Source", "Mic 2", "Mic 2"}, - {"ADC", NULL, "ADC Source"}, + {"ADC Source Capture Route", "Mixer", "Mixer to ADC"}, + {"ADC Source Capture Route", "Line In", "Line In"}, + {"ADC Source Capture Route", "Mic 1", "Mic 1"}, + {"ADC Source Capture Route", "Mic 2", "Mic 2"}, + {"ADC", NULL, "ADC Source Capture Route"}, {"Out Stage", NULL, "Mixer"}, {"HP Out", NULL, "Out Stage"}, diff --git a/sound/soc/codecs/madera.c b/sound/soc/codecs/madera.c index 680f31a6493a..bbab4bc1f6b5 100644 --- a/sound/soc/codecs/madera.c +++ b/sound/soc/codecs/madera.c @@ -618,7 +618,13 @@ int madera_out1_demux_put(struct snd_kcontrol *kcontrol, end: snd_soc_dapm_mutex_unlock(dapm); - return snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL); + ret = snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL); + if (ret < 0) { + dev_err(madera->dev, "Failed to update demux power state: %d\n", ret); + return ret; + } + + return change; } EXPORT_SYMBOL_GPL(madera_out1_demux_put); @@ -893,7 +899,7 @@ static int madera_adsp_rate_put(struct snd_kcontrol *kcontrol, struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; const int adsp_num = e->shift_l; const unsigned int item = ucontrol->value.enumerated.item[0]; - int ret; + int ret = 0; if (item >= e->items) return -EINVAL; @@ -910,10 +916,10 @@ static int madera_adsp_rate_put(struct snd_kcontrol *kcontrol, "Cannot change '%s' while in use by active audio paths\n", kcontrol->id.name); ret = -EBUSY; - } else { + } else if (priv->adsp_rate_cache[adsp_num] != e->values[item]) { /* Volatile register so defer until the codec is powered up */ priv->adsp_rate_cache[adsp_num] = e->values[item]; - ret = 0; + ret = 1; } mutex_unlock(&priv->rate_lock); diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index 945a79e4f3eb..0c73979cad4a 100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c @@ -393,7 +393,8 @@ static int max98090_put_enab_tlv(struct snd_kcontrol *kcontrol, struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int mask = (1 << fls(mc->max)) - 1; - unsigned int sel = ucontrol->value.integer.value[0]; + int sel_unchecked = ucontrol->value.integer.value[0]; + unsigned int sel; unsigned int val = snd_soc_component_read(component, mc->reg); unsigned int *select; @@ -413,6 +414,10 @@ static int max98090_put_enab_tlv(struct snd_kcontrol *kcontrol, val = (val >> mc->shift) & mask; + if (sel_unchecked < 0 || sel_unchecked > mc->max) + return -EINVAL; + sel = sel_unchecked; + *select = sel; /* Setting a volume is only valid if it is already On */ @@ -427,7 +432,7 @@ static int max98090_put_enab_tlv(struct snd_kcontrol *kcontrol, mask << mc->shift, sel << mc->shift); - return 0; + return *select != val; } static const char *max98090_perf_pwr_text[] = diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c index 9ad7fc0baf07..098a58990f07 100644 --- a/sound/soc/codecs/msm8916-wcd-digital.c +++ b/sound/soc/codecs/msm8916-wcd-digital.c @@ -328,8 +328,8 @@ static const struct snd_kcontrol_new rx1_mix2_inp1_mux = SOC_DAPM_ENUM( static const struct snd_kcontrol_new rx2_mix2_inp1_mux = SOC_DAPM_ENUM( "RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum); -/* Digital Gain control -38.4 dB to +38.4 dB in 0.3 dB steps */ -static const DECLARE_TLV_DB_SCALE(digital_gain, -3840, 30, 0); +/* Digital Gain control -84 dB to +40 dB in 1 dB steps */ +static const DECLARE_TLV_DB_SCALE(digital_gain, -8400, 100, -8400); /* Cutoff Freq for High Pass Filter at -3dB */ static const char * const hpf_cutoff_text[] = { @@ -510,15 +510,15 @@ static int wcd_iir_filter_info(struct snd_kcontrol *kcontrol, static const struct snd_kcontrol_new msm8916_wcd_digital_snd_controls[] = { SOC_SINGLE_S8_TLV("RX1 Digital Volume", LPASS_CDC_RX1_VOL_CTL_B2_CTL, - -128, 127, digital_gain), + -84, 40, digital_gain), SOC_SINGLE_S8_TLV("RX2 Digital Volume", LPASS_CDC_RX2_VOL_CTL_B2_CTL, - -128, 127, digital_gain), + -84, 40, digital_gain), SOC_SINGLE_S8_TLV("RX3 Digital Volume", LPASS_CDC_RX3_VOL_CTL_B2_CTL, - -128, 127, digital_gain), + -84, 40, digital_gain), SOC_SINGLE_S8_TLV("TX1 Digital Volume", LPASS_CDC_TX1_VOL_CTL_GAIN, - -128, 127, digital_gain), + -84, 40, digital_gain), SOC_SINGLE_S8_TLV("TX2 Digital Volume", LPASS_CDC_TX2_VOL_CTL_GAIN, - -128, 127, digital_gain), + -84, 40, digital_gain), SOC_ENUM("TX1 HPF Cutoff", tx1_hpf_cutoff_enum), SOC_ENUM("TX2 HPF Cutoff", tx2_hpf_cutoff_enum), SOC_SINGLE("TX1 HPF Switch", LPASS_CDC_TX1_MUX_CTL, 3, 1, 0), @@ -553,22 +553,22 @@ static const struct snd_kcontrol_new msm8916_wcd_digital_snd_controls[] = { WCD_IIR_FILTER_CTL("IIR2 Band3", IIR2, BAND3), WCD_IIR_FILTER_CTL("IIR2 Band4", IIR2, BAND4), WCD_IIR_FILTER_CTL("IIR2 Band5", IIR2, BAND5), - SOC_SINGLE_SX_TLV("IIR1 INP1 Volume", LPASS_CDC_IIR1_GAIN_B1_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("IIR1 INP2 Volume", LPASS_CDC_IIR1_GAIN_B2_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("IIR1 INP3 Volume", LPASS_CDC_IIR1_GAIN_B3_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("IIR1 INP4 Volume", LPASS_CDC_IIR1_GAIN_B4_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("IIR2 INP1 Volume", LPASS_CDC_IIR2_GAIN_B1_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("IIR2 INP2 Volume", LPASS_CDC_IIR2_GAIN_B2_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("IIR2 INP3 Volume", LPASS_CDC_IIR2_GAIN_B3_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("IIR2 INP4 Volume", LPASS_CDC_IIR2_GAIN_B4_CTL, - 0, -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("IIR1 INP1 Volume", LPASS_CDC_IIR1_GAIN_B1_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("IIR1 INP2 Volume", LPASS_CDC_IIR1_GAIN_B2_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("IIR1 INP3 Volume", LPASS_CDC_IIR1_GAIN_B3_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("IIR1 INP4 Volume", LPASS_CDC_IIR1_GAIN_B4_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("IIR2 INP1 Volume", LPASS_CDC_IIR2_GAIN_B1_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("IIR2 INP2 Volume", LPASS_CDC_IIR2_GAIN_B2_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("IIR2 INP3 Volume", LPASS_CDC_IIR2_GAIN_B3_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("IIR2 INP4 Volume", LPASS_CDC_IIR2_GAIN_B4_CTL, + -84, 40, digital_gain), }; @@ -1206,9 +1206,16 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev) dev_set_drvdata(dev, priv); - return devm_snd_soc_register_component(dev, &msm8916_wcd_digital, + ret = devm_snd_soc_register_component(dev, &msm8916_wcd_digital, msm8916_wcd_digital_dai, ARRAY_SIZE(msm8916_wcd_digital_dai)); + if (ret) + goto err_mclk; + + return 0; + +err_mclk: + clk_disable_unprepare(priv->mclk); err_clk: clk_disable_unprepare(priv->ahbclk); return ret; diff --git a/sound/soc/codecs/mt6660.c b/sound/soc/codecs/mt6660.c index d1797003c83d..3cee2ea4b85d 100644 --- a/sound/soc/codecs/mt6660.c +++ b/sound/soc/codecs/mt6660.c @@ -510,7 +510,11 @@ static int mt6660_i2c_probe(struct i2c_client *client, ret = devm_snd_soc_register_component(chip->dev, &mt6660_component_driver, &mt6660_codec_dai, 1); + if (ret) + pm_runtime_disable(chip->dev); + return ret; + probe_fail: _mt6660_chip_power_on(chip, 0); mutex_destroy(&chip->io_lock); diff --git a/sound/soc/codecs/nau8822.c b/sound/soc/codecs/nau8822.c index 609aeeb27818..d831959d8ff7 100644 --- a/sound/soc/codecs/nau8822.c +++ b/sound/soc/codecs/nau8822.c @@ -740,6 +740,8 @@ static int nau8822_set_pll(struct snd_soc_dai *dai, int pll_id, int source, pll_param->pll_int, pll_param->pll_frac, pll_param->mclk_scaler, pll_param->pre_factor); + snd_soc_component_update_bits(component, + NAU8822_REG_POWER_MANAGEMENT_1, NAU8822_PLL_EN_MASK, NAU8822_PLL_OFF); snd_soc_component_update_bits(component, NAU8822_REG_PLL_N, NAU8822_PLLMCLK_DIV2 | NAU8822_PLLN_MASK, (pll_param->pre_factor ? NAU8822_PLLMCLK_DIV2 : 0) | @@ -757,6 +759,8 @@ static int nau8822_set_pll(struct snd_soc_dai *dai, int pll_id, int source, pll_param->mclk_scaler << NAU8822_MCLKSEL_SFT); snd_soc_component_update_bits(component, NAU8822_REG_CLOCKING, NAU8822_CLKM_MASK, NAU8822_CLKM_PLL); + snd_soc_component_update_bits(component, + NAU8822_REG_POWER_MANAGEMENT_1, NAU8822_PLL_EN_MASK, NAU8822_PLL_ON); return 0; } diff --git a/sound/soc/codecs/nau8822.h b/sound/soc/codecs/nau8822.h index 489191ff187e..b45d42c15de6 100644 --- a/sound/soc/codecs/nau8822.h +++ b/sound/soc/codecs/nau8822.h @@ -90,6 +90,9 @@ #define NAU8822_REFIMP_3K 0x3 #define NAU8822_IOBUF_EN (0x1 << 2) #define NAU8822_ABIAS_EN (0x1 << 3) +#define NAU8822_PLL_EN_MASK (0x1 << 5) +#define NAU8822_PLL_ON (0x1 << 5) +#define NAU8822_PLL_OFF (0x0 << 5) /* NAU8822_REG_AUDIO_INTERFACE (0x4) */ #define NAU8822_AIFMT_MASK (0x3 << 3) diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c index c8ccfa2fff84..a95fe3fff1db 100644 --- a/sound/soc/codecs/nau8824.c +++ b/sound/soc/codecs/nau8824.c @@ -1072,6 +1072,7 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream, struct snd_soc_component *component = dai->component; struct nau8824 *nau8824 = snd_soc_component_get_drvdata(component); unsigned int val_len = 0, osr, ctrl_val, bclk_fs, bclk_div; + int err = -EINVAL; nau8824_sema_acquire(nau8824, HZ); @@ -1088,7 +1089,7 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream, osr &= NAU8824_DAC_OVERSAMPLE_MASK; if (nau8824_clock_check(nau8824, substream->stream, nau8824->fs, osr)) - return -EINVAL; + goto error; regmap_update_bits(nau8824->regmap, NAU8824_REG_CLK_DIVIDER, NAU8824_CLK_DAC_SRC_MASK, osr_dac_sel[osr].clk_src << NAU8824_CLK_DAC_SRC_SFT); @@ -1098,7 +1099,7 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream, osr &= NAU8824_ADC_SYNC_DOWN_MASK; if (nau8824_clock_check(nau8824, substream->stream, nau8824->fs, osr)) - return -EINVAL; + goto error; regmap_update_bits(nau8824->regmap, NAU8824_REG_CLK_DIVIDER, NAU8824_CLK_ADC_SRC_MASK, osr_adc_sel[osr].clk_src << NAU8824_CLK_ADC_SRC_SFT); @@ -1119,7 +1120,7 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream, else if (bclk_fs <= 256) bclk_div = 0; else - return -EINVAL; + goto error; regmap_update_bits(nau8824->regmap, NAU8824_REG_PORT0_I2S_PCM_CTRL_2, NAU8824_I2S_LRC_DIV_MASK | NAU8824_I2S_BLK_DIV_MASK, @@ -1140,15 +1141,17 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream, val_len |= NAU8824_I2S_DL_32; break; default: - return -EINVAL; + goto error; } regmap_update_bits(nau8824->regmap, NAU8824_REG_PORT0_I2S_PCM_CTRL_1, NAU8824_I2S_DL_MASK, val_len); + err = 0; + error: nau8824_sema_release(nau8824); - return 0; + return err; } static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) @@ -1157,8 +1160,6 @@ static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) struct nau8824 *nau8824 = snd_soc_component_get_drvdata(component); unsigned int ctrl1_val = 0, ctrl2_val = 0; - nau8824_sema_acquire(nau8824, HZ); - switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: ctrl2_val |= NAU8824_I2S_MS_MASTER; @@ -1200,6 +1201,8 @@ static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) return -EINVAL; } + nau8824_sema_acquire(nau8824, HZ); + regmap_update_bits(nau8824->regmap, NAU8824_REG_PORT0_I2S_PCM_CTRL_1, NAU8824_I2S_DF_MASK | NAU8824_I2S_BP_MASK | NAU8824_I2S_PCMB_EN, ctrl1_val); diff --git a/sound/soc/codecs/rk3328_codec.c b/sound/soc/codecs/rk3328_codec.c index aed18cbb9f68..e40b97929f6c 100644 --- a/sound/soc/codecs/rk3328_codec.c +++ b/sound/soc/codecs/rk3328_codec.c @@ -481,7 +481,7 @@ static int rk3328_platform_probe(struct platform_device *pdev) ret = clk_prepare_enable(rk3328->pclk); if (ret < 0) { dev_err(&pdev->dev, "failed to enable acodec pclk\n"); - return ret; + goto err_unprepare_mclk; } base = devm_platform_ioremap_resource(pdev, 0); diff --git a/sound/soc/codecs/rt1308-sdw.h b/sound/soc/codecs/rt1308-sdw.h index c5ce75666dcc..98293d73ebab 100644 --- a/sound/soc/codecs/rt1308-sdw.h +++ b/sound/soc/codecs/rt1308-sdw.h @@ -139,9 +139,11 @@ static const struct reg_default rt1308_reg_defaults[] = { { 0x3005, 0x23 }, { 0x3008, 0x02 }, { 0x300a, 0x00 }, + { 0xc000 | (RT1308_DATA_PATH << 4), 0x00 }, { 0xc003 | (RT1308_DAC_SET << 4), 0x00 }, { 0xc001 | (RT1308_POWER << 4), 0x00 }, { 0xc002 | (RT1308_POWER << 4), 0x00 }, + { 0xc000 | (RT1308_POWER_STATUS << 4), 0x00 }, }; #define RT1308_SDW_OFFSET 0xc000 diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c index 7081142a355e..c444a56df95b 100644 --- a/sound/soc/codecs/rt5514.c +++ b/sound/soc/codecs/rt5514.c @@ -419,7 +419,7 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol, } } - return 0; + return 1; } static const struct snd_kcontrol_new rt5514_snd_controls[] = { diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index 420003d062c7..d1533e95a74f 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -4095,9 +4095,14 @@ static int rt5645_i2c_remove(struct i2c_client *i2c) if (i2c->irq) free_irq(i2c->irq, rt5645); + /* + * Since the rt5645_btn_check_callback() can queue jack_detect_work, + * the timer need to be delted first + */ + del_timer_sync(&rt5645->btn_check_timer); + cancel_delayed_work_sync(&rt5645->jack_detect_work); cancel_delayed_work_sync(&rt5645->rcclock_work); - del_timer_sync(&rt5645->btn_check_timer); regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies); diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 4c0e87e22b97..edde0323799a 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -1797,6 +1797,10 @@ static int sgtl5000_i2c_remove(struct i2c_client *client) { struct sgtl5000_priv *sgtl5000 = i2c_get_clientdata(client); + regmap_write(sgtl5000->regmap, SGTL5000_CHIP_CLK_CTRL, SGTL5000_CHIP_CLK_CTRL_DEFAULT); + regmap_write(sgtl5000->regmap, SGTL5000_CHIP_DIG_POWER, SGTL5000_DIG_POWER_DEFAULT); + regmap_write(sgtl5000->regmap, SGTL5000_CHIP_ANA_POWER, SGTL5000_ANA_POWER_DEFAULT); + clk_disable_unprepare(sgtl5000->mclk); regulator_bulk_disable(sgtl5000->num_supplies, sgtl5000->supplies); regulator_bulk_free(sgtl5000->num_supplies, sgtl5000->supplies); @@ -1804,6 +1808,11 @@ static int sgtl5000_i2c_remove(struct i2c_client *client) return 0; } +static void sgtl5000_i2c_shutdown(struct i2c_client *client) +{ + sgtl5000_i2c_remove(client); +} + static const struct i2c_device_id sgtl5000_id[] = { {"sgtl5000", 0}, {}, @@ -1824,6 +1833,7 @@ static struct i2c_driver sgtl5000_i2c_driver = { }, .probe = sgtl5000_i2c_probe, .remove = sgtl5000_i2c_remove, + .shutdown = sgtl5000_i2c_shutdown, .id_table = sgtl5000_id, }; diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h index 56ec5863f250..3a808c762299 100644 --- a/sound/soc/codecs/sgtl5000.h +++ b/sound/soc/codecs/sgtl5000.h @@ -80,6 +80,7 @@ /* * SGTL5000_CHIP_DIG_POWER */ +#define SGTL5000_DIG_POWER_DEFAULT 0x0000 #define SGTL5000_ADC_EN 0x0040 #define SGTL5000_DAC_EN 0x0020 #define SGTL5000_DAP_POWERUP 0x0010 diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c index 14a193e48dc7..c8f6f5122cac 100644 --- a/sound/soc/codecs/tas2764.c +++ b/sound/soc/codecs/tas2764.c @@ -34,6 +34,9 @@ struct tas2764_priv { int v_sense_slot; int i_sense_slot; + + bool dac_powered; + bool unmuted; }; static void tas2764_reset(struct tas2764_priv *tas2764) @@ -42,40 +45,30 @@ static void tas2764_reset(struct tas2764_priv *tas2764) gpiod_set_value_cansleep(tas2764->reset_gpio, 0); msleep(20); gpiod_set_value_cansleep(tas2764->reset_gpio, 1); + usleep_range(1000, 2000); } snd_soc_component_write(tas2764->component, TAS2764_SW_RST, TAS2764_RST); + usleep_range(1000, 2000); } -static int tas2764_set_bias_level(struct snd_soc_component *component, - enum snd_soc_bias_level level) +static int tas2764_update_pwr_ctrl(struct tas2764_priv *tas2764) { - struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component); + struct snd_soc_component *component = tas2764->component; + unsigned int val; + int ret; - switch (level) { - case SND_SOC_BIAS_ON: - snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, - TAS2764_PWR_CTRL_MASK, - TAS2764_PWR_CTRL_ACTIVE); - break; - case SND_SOC_BIAS_STANDBY: - case SND_SOC_BIAS_PREPARE: - snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, - TAS2764_PWR_CTRL_MASK, - TAS2764_PWR_CTRL_MUTE); - break; - case SND_SOC_BIAS_OFF: - snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, - TAS2764_PWR_CTRL_MASK, - TAS2764_PWR_CTRL_SHUTDOWN); - break; + if (tas2764->dac_powered) + val = tas2764->unmuted ? + TAS2764_PWR_CTRL_ACTIVE : TAS2764_PWR_CTRL_MUTE; + else + val = TAS2764_PWR_CTRL_SHUTDOWN; - default: - dev_err(tas2764->dev, - "wrong power level setting %d\n", level); - return -EINVAL; - } + ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, + TAS2764_PWR_CTRL_MASK, val); + if (ret < 0) + return ret; return 0; } @@ -107,12 +100,12 @@ static int tas2764_codec_resume(struct snd_soc_component *component) struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component); int ret; - if (tas2764->sdz_gpio) + if (tas2764->sdz_gpio) { gpiod_set_value_cansleep(tas2764->sdz_gpio, 1); + usleep_range(1000, 2000); + } - ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, - TAS2764_PWR_CTRL_MASK, - TAS2764_PWR_CTRL_ACTIVE); + ret = tas2764_update_pwr_ctrl(tas2764); if (ret < 0) return ret; @@ -131,7 +124,8 @@ static const char * const tas2764_ASI1_src[] = { }; static SOC_ENUM_SINGLE_DECL( - tas2764_ASI1_src_enum, TAS2764_TDM_CFG2, 4, tas2764_ASI1_src); + tas2764_ASI1_src_enum, TAS2764_TDM_CFG2, TAS2764_TDM_CFG2_SCFG_SHIFT, + tas2764_ASI1_src); static const struct snd_kcontrol_new tas2764_asi1_mux = SOC_DAPM_ENUM("ASI1 Source", tas2764_ASI1_src_enum); @@ -145,14 +139,12 @@ static int tas2764_dac_event(struct snd_soc_dapm_widget *w, switch (event) { case SND_SOC_DAPM_POST_PMU: - ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, - TAS2764_PWR_CTRL_MASK, - TAS2764_PWR_CTRL_MUTE); + tas2764->dac_powered = true; + ret = tas2764_update_pwr_ctrl(tas2764); break; case SND_SOC_DAPM_PRE_PMD: - ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, - TAS2764_PWR_CTRL_MASK, - TAS2764_PWR_CTRL_SHUTDOWN); + tas2764->dac_powered = false; + ret = tas2764_update_pwr_ctrl(tas2764); break; default: dev_err(tas2764->dev, "Unsupported event\n"); @@ -197,17 +189,11 @@ static const struct snd_soc_dapm_route tas2764_audio_map[] = { static int tas2764_mute(struct snd_soc_dai *dai, int mute, int direction) { - struct snd_soc_component *component = dai->component; - int ret; + struct tas2764_priv *tas2764 = + snd_soc_component_get_drvdata(dai->component); - ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, - TAS2764_PWR_CTRL_MASK, - mute ? TAS2764_PWR_CTRL_MUTE : 0); - - if (ret < 0) - return ret; - - return 0; + tas2764->unmuted = !mute; + return tas2764_update_pwr_ctrl(tas2764); } static int tas2764_set_bitwidth(struct tas2764_priv *tas2764, int bitwidth) @@ -329,20 +315,22 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct snd_soc_component *component = dai->component; struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component); - u8 tdm_rx_start_slot = 0, asi_cfg_1 = 0; - int iface; + u8 tdm_rx_start_slot = 0, asi_cfg_0 = 0, asi_cfg_1 = 0; int ret; switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_NB_IF: + asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START; + fallthrough; case SND_SOC_DAIFMT_NB_NF: asi_cfg_1 = TAS2764_TDM_CFG1_RX_RISING; break; + case SND_SOC_DAIFMT_IB_IF: + asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START; + fallthrough; case SND_SOC_DAIFMT_IB_NF: asi_cfg_1 = TAS2764_TDM_CFG1_RX_FALLING; break; - default: - dev_err(tas2764->dev, "ASI format Inverse is not found\n"); - return -EINVAL; } ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG1, @@ -353,13 +341,13 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: + asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START; + fallthrough; case SND_SOC_DAIFMT_DSP_A: - iface = TAS2764_TDM_CFG2_SCFG_I2S; tdm_rx_start_slot = 1; break; case SND_SOC_DAIFMT_DSP_B: case SND_SOC_DAIFMT_LEFT_J: - iface = TAS2764_TDM_CFG2_SCFG_LEFT_J; tdm_rx_start_slot = 0; break; default: @@ -368,14 +356,15 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) return -EINVAL; } - ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG1, - TAS2764_TDM_CFG1_MASK, - (tdm_rx_start_slot << TAS2764_TDM_CFG1_51_SHIFT)); + ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG0, + TAS2764_TDM_CFG0_FRAME_START, + asi_cfg_0); if (ret < 0) return ret; - ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG2, - TAS2764_TDM_CFG2_SCFG_MASK, iface); + ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG1, + TAS2764_TDM_CFG1_MASK, + (tdm_rx_start_slot << TAS2764_TDM_CFG1_51_SHIFT)); if (ret < 0) return ret; @@ -397,20 +386,13 @@ static int tas2764_set_dai_tdm_slot(struct snd_soc_dai *dai, if (tx_mask == 0 || rx_mask != 0) return -EINVAL; - if (slots == 1) { - if (tx_mask != 1) - return -EINVAL; - left_slot = 0; - right_slot = 0; + left_slot = __ffs(tx_mask); + tx_mask &= ~(1 << left_slot); + if (tx_mask == 0) { + right_slot = left_slot; } else { - left_slot = __ffs(tx_mask); - tx_mask &= ~(1 << left_slot); - if (tx_mask == 0) { - right_slot = left_slot; - } else { - right_slot = __ffs(tx_mask); - tx_mask &= ~(1 << right_slot); - } + right_slot = __ffs(tx_mask); + tx_mask &= ~(1 << right_slot); } if (tx_mask != 0 || left_slot >= slots || right_slot >= slots) @@ -477,7 +459,7 @@ static struct snd_soc_dai_driver tas2764_dai_driver[] = { .id = 0, .playback = { .stream_name = "ASI1 Playback", - .channels_min = 2, + .channels_min = 1, .channels_max = 2, .rates = TAS2764_RATES, .formats = TAS2764_FORMATS, @@ -501,8 +483,10 @@ static int tas2764_codec_probe(struct snd_soc_component *component) tas2764->component = component; - if (tas2764->sdz_gpio) + if (tas2764->sdz_gpio) { gpiod_set_value_cansleep(tas2764->sdz_gpio, 1); + usleep_range(1000, 2000); + } tas2764_reset(tas2764); @@ -516,22 +500,16 @@ static int tas2764_codec_probe(struct snd_soc_component *component) if (ret < 0) return ret; - ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, - TAS2764_PWR_CTRL_MASK, - TAS2764_PWR_CTRL_MUTE); - if (ret < 0) - return ret; - return 0; } static DECLARE_TLV_DB_SCALE(tas2764_digital_tlv, 1100, 50, 0); -static DECLARE_TLV_DB_SCALE(tas2764_playback_volume, -10000, 50, 0); +static DECLARE_TLV_DB_SCALE(tas2764_playback_volume, -10050, 50, 1); static const struct snd_kcontrol_new tas2764_snd_controls[] = { SOC_SINGLE_TLV("Speaker Volume", TAS2764_DVC, 0, TAS2764_DVC_MAX, 1, tas2764_playback_volume), - SOC_SINGLE_TLV("Amp Gain Volume", TAS2764_CHNL_0, 0, 0x14, 0, + SOC_SINGLE_TLV("Amp Gain Volume", TAS2764_CHNL_0, 1, 0x14, 0, tas2764_digital_tlv), }; @@ -539,7 +517,6 @@ static const struct snd_soc_component_driver soc_component_driver_tas2764 = { .probe = tas2764_codec_probe, .suspend = tas2764_codec_suspend, .resume = tas2764_codec_resume, - .set_bias_level = tas2764_set_bias_level, .controls = tas2764_snd_controls, .num_controls = ARRAY_SIZE(tas2764_snd_controls), .dapm_widgets = tas2764_dapm_widgets, @@ -556,7 +533,7 @@ static const struct reg_default tas2764_reg_defaults[] = { { TAS2764_SW_RST, 0x00 }, { TAS2764_PWR_CTRL, 0x1a }, { TAS2764_DVC, 0x00 }, - { TAS2764_CHNL_0, 0x00 }, + { TAS2764_CHNL_0, 0x28 }, { TAS2764_TDM_CFG0, 0x09 }, { TAS2764_TDM_CFG1, 0x02 }, { TAS2764_TDM_CFG2, 0x0a }, diff --git a/sound/soc/codecs/tas2764.h b/sound/soc/codecs/tas2764.h index 67d6fd903c42..f015f22a083b 100644 --- a/sound/soc/codecs/tas2764.h +++ b/sound/soc/codecs/tas2764.h @@ -47,6 +47,7 @@ #define TAS2764_TDM_CFG0_MASK GENMASK(3, 1) #define TAS2764_TDM_CFG0_44_1_48KHZ BIT(3) #define TAS2764_TDM_CFG0_88_2_96KHZ (BIT(3) | BIT(1)) +#define TAS2764_TDM_CFG0_FRAME_START BIT(0) /* TDM Configuration Reg1 */ #define TAS2764_TDM_CFG1 TAS2764_REG(0X0, 0x09) @@ -66,10 +67,7 @@ #define TAS2764_TDM_CFG2_RXS_16BITS 0x0 #define TAS2764_TDM_CFG2_RXS_24BITS BIT(0) #define TAS2764_TDM_CFG2_RXS_32BITS BIT(1) -#define TAS2764_TDM_CFG2_SCFG_MASK GENMASK(5, 4) -#define TAS2764_TDM_CFG2_SCFG_I2S 0x0 -#define TAS2764_TDM_CFG2_SCFG_LEFT_J BIT(4) -#define TAS2764_TDM_CFG2_SCFG_RIGHT_J BIT(5) +#define TAS2764_TDM_CFG2_SCFG_SHIFT 4 /* TDM Configuration Reg3 */ #define TAS2764_TDM_CFG3 TAS2764_REG(0X0, 0x0c) diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c index 315fd9d971c8..c213c8096142 100644 --- a/sound/soc/codecs/tas2770.c +++ b/sound/soc/codecs/tas2770.c @@ -46,34 +46,22 @@ static void tas2770_reset(struct tas2770_priv *tas2770) usleep_range(1000, 2000); } -static int tas2770_set_bias_level(struct snd_soc_component *component, - enum snd_soc_bias_level level) +static int tas2770_update_pwr_ctrl(struct tas2770_priv *tas2770) { - struct tas2770_priv *tas2770 = - snd_soc_component_get_drvdata(component); + struct snd_soc_component *component = tas2770->component; + unsigned int val; + int ret; - switch (level) { - case SND_SOC_BIAS_ON: - snd_soc_component_update_bits(component, TAS2770_PWR_CTRL, - TAS2770_PWR_CTRL_MASK, - TAS2770_PWR_CTRL_ACTIVE); - break; - case SND_SOC_BIAS_STANDBY: - case SND_SOC_BIAS_PREPARE: - snd_soc_component_update_bits(component, TAS2770_PWR_CTRL, - TAS2770_PWR_CTRL_MASK, - TAS2770_PWR_CTRL_MUTE); - break; - case SND_SOC_BIAS_OFF: - snd_soc_component_update_bits(component, TAS2770_PWR_CTRL, - TAS2770_PWR_CTRL_MASK, - TAS2770_PWR_CTRL_SHUTDOWN); - break; + if (tas2770->dac_powered) + val = tas2770->unmuted ? + TAS2770_PWR_CTRL_ACTIVE : TAS2770_PWR_CTRL_MUTE; + else + val = TAS2770_PWR_CTRL_SHUTDOWN; - default: - dev_err(tas2770->dev, "wrong power level setting %d\n", level); - return -EINVAL; - } + ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL, + TAS2770_PWR_CTRL_MASK, val); + if (ret < 0) + return ret; return 0; } @@ -114,9 +102,7 @@ static int tas2770_codec_resume(struct snd_soc_component *component) gpiod_set_value_cansleep(tas2770->sdz_gpio, 1); usleep_range(1000, 2000); } else { - ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL, - TAS2770_PWR_CTRL_MASK, - TAS2770_PWR_CTRL_ACTIVE); + ret = tas2770_update_pwr_ctrl(tas2770); if (ret < 0) return ret; } @@ -152,24 +138,19 @@ static int tas2770_dac_event(struct snd_soc_dapm_widget *w, switch (event) { case SND_SOC_DAPM_POST_PMU: - ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL, - TAS2770_PWR_CTRL_MASK, - TAS2770_PWR_CTRL_MUTE); + tas2770->dac_powered = 1; + ret = tas2770_update_pwr_ctrl(tas2770); break; case SND_SOC_DAPM_PRE_PMD: - ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL, - TAS2770_PWR_CTRL_MASK, - TAS2770_PWR_CTRL_SHUTDOWN); + tas2770->dac_powered = 0; + ret = tas2770_update_pwr_ctrl(tas2770); break; default: dev_err(tas2770->dev, "Not supported evevt\n"); return -EINVAL; } - if (ret < 0) - return ret; - - return 0; + return ret; } static const struct snd_kcontrol_new isense_switch = @@ -203,21 +184,11 @@ static const struct snd_soc_dapm_route tas2770_audio_map[] = { static int tas2770_mute(struct snd_soc_dai *dai, int mute, int direction) { struct snd_soc_component *component = dai->component; - int ret; + struct tas2770_priv *tas2770 = + snd_soc_component_get_drvdata(component); - if (mute) - ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL, - TAS2770_PWR_CTRL_MASK, - TAS2770_PWR_CTRL_MUTE); - else - ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL, - TAS2770_PWR_CTRL_MASK, - TAS2770_PWR_CTRL_ACTIVE); - - if (ret < 0) - return ret; - - return 0; + tas2770->unmuted = !mute; + return tas2770_update_pwr_ctrl(tas2770); } static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth) @@ -337,7 +308,7 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) struct snd_soc_component *component = dai->component; struct tas2770_priv *tas2770 = snd_soc_component_get_drvdata(component); - u8 tdm_rx_start_slot = 0, asi_cfg_1 = 0; + u8 tdm_rx_start_slot = 0, invert_fpol = 0, fpol_preinv = 0, asi_cfg_1 = 0; int ret; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { @@ -349,9 +320,15 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_NB_IF: + invert_fpol = 1; + fallthrough; case SND_SOC_DAIFMT_NB_NF: asi_cfg_1 |= TAS2770_TDM_CFG_REG1_RX_RSING; break; + case SND_SOC_DAIFMT_IB_IF: + invert_fpol = 1; + fallthrough; case SND_SOC_DAIFMT_IB_NF: asi_cfg_1 |= TAS2770_TDM_CFG_REG1_RX_FALING; break; @@ -369,15 +346,19 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: tdm_rx_start_slot = 1; + fpol_preinv = 0; break; case SND_SOC_DAIFMT_DSP_A: tdm_rx_start_slot = 0; + fpol_preinv = 1; break; case SND_SOC_DAIFMT_DSP_B: tdm_rx_start_slot = 1; + fpol_preinv = 1; break; case SND_SOC_DAIFMT_LEFT_J: tdm_rx_start_slot = 0; + fpol_preinv = 1; break; default: dev_err(tas2770->dev, @@ -391,6 +372,14 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) if (ret < 0) return ret; + ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG0, + TAS2770_TDM_CFG_REG0_FPOL_MASK, + (fpol_preinv ^ invert_fpol) + ? TAS2770_TDM_CFG_REG0_FPOL_RSING + : TAS2770_TDM_CFG_REG0_FPOL_FALING); + if (ret < 0) + return ret; + return 0; } @@ -406,21 +395,13 @@ static int tas2770_set_dai_tdm_slot(struct snd_soc_dai *dai, if (tx_mask == 0 || rx_mask != 0) return -EINVAL; - if (slots == 1) { - if (tx_mask != 1) - return -EINVAL; - - left_slot = 0; - right_slot = 0; + left_slot = __ffs(tx_mask); + tx_mask &= ~(1 << left_slot); + if (tx_mask == 0) { + right_slot = left_slot; } else { - left_slot = __ffs(tx_mask); - tx_mask &= ~(1 << left_slot); - if (tx_mask == 0) { - right_slot = left_slot; - } else { - right_slot = __ffs(tx_mask); - tx_mask &= ~(1 << right_slot); - } + right_slot = __ffs(tx_mask); + tx_mask &= ~(1 << right_slot); } if (tx_mask != 0 || left_slot >= slots || right_slot >= slots) @@ -489,7 +470,7 @@ static struct snd_soc_dai_driver tas2770_dai_driver[] = { .id = 0, .playback = { .stream_name = "ASI1 Playback", - .channels_min = 2, + .channels_min = 1, .channels_max = 2, .rates = TAS2770_RATES, .formats = TAS2770_FORMATS, @@ -506,6 +487,8 @@ static struct snd_soc_dai_driver tas2770_dai_driver[] = { }, }; +static const struct regmap_config tas2770_i2c_regmap; + static int tas2770_codec_probe(struct snd_soc_component *component) { struct tas2770_priv *tas2770 = @@ -519,6 +502,7 @@ static int tas2770_codec_probe(struct snd_soc_component *component) } tas2770_reset(tas2770); + regmap_reinit_cache(tas2770->regmap, &tas2770_i2c_regmap); return 0; } @@ -537,7 +521,6 @@ static const struct snd_soc_component_driver soc_component_driver_tas2770 = { .probe = tas2770_codec_probe, .suspend = tas2770_codec_suspend, .resume = tas2770_codec_resume, - .set_bias_level = tas2770_set_bias_level, .controls = tas2770_snd_controls, .num_controls = ARRAY_SIZE(tas2770_snd_controls), .dapm_widgets = tas2770_dapm_widgets, diff --git a/sound/soc/codecs/tas2770.h b/sound/soc/codecs/tas2770.h index d156666bcc55..f75f40781ab1 100644 --- a/sound/soc/codecs/tas2770.h +++ b/sound/soc/codecs/tas2770.h @@ -41,6 +41,9 @@ #define TAS2770_TDM_CFG_REG0_31_44_1_48KHZ 0x6 #define TAS2770_TDM_CFG_REG0_31_88_2_96KHZ 0x8 #define TAS2770_TDM_CFG_REG0_31_176_4_192KHZ 0xa +#define TAS2770_TDM_CFG_REG0_FPOL_MASK BIT(0) +#define TAS2770_TDM_CFG_REG0_FPOL_RSING 0 +#define TAS2770_TDM_CFG_REG0_FPOL_FALING 1 /* TDM Configuration Reg1 */ #define TAS2770_TDM_CFG_REG1 TAS2770_REG(0X0, 0x0B) #define TAS2770_TDM_CFG_REG1_MASK GENMASK(5, 1) @@ -135,6 +138,8 @@ struct tas2770_priv { struct device *dev; int v_sense_slot; int i_sense_slot; + bool dac_powered; + bool unmuted; }; #endif /* __TAS2770__ */ diff --git a/sound/soc/codecs/tscs454.c b/sound/soc/codecs/tscs454.c index d0af16b4db2f..a6f339bb4771 100644 --- a/sound/soc/codecs/tscs454.c +++ b/sound/soc/codecs/tscs454.c @@ -3115,18 +3115,17 @@ static int set_aif_sample_format(struct snd_soc_component *component, unsigned int width; int ret; - switch (format) { - case SNDRV_PCM_FORMAT_S16_LE: + switch (snd_pcm_format_width(format)) { + case 16: width = FV_WL_16; break; - case SNDRV_PCM_FORMAT_S20_3LE: + case 20: width = FV_WL_20; break; - case SNDRV_PCM_FORMAT_S24_3LE: + case 24: width = FV_WL_24; break; - case SNDRV_PCM_FORMAT_S24_LE: - case SNDRV_PCM_FORMAT_S32_LE: + case 32: width = FV_WL_32; break; default: @@ -3321,6 +3320,7 @@ static const struct snd_soc_component_driver soc_component_dev_tscs454 = { .num_dapm_routes = ARRAY_SIZE(tscs454_intercon), .controls = tscs454_snd_controls, .num_controls = ARRAY_SIZE(tscs454_snd_controls), + .endianness = 1, }; #define TSCS454_RATES SNDRV_PCM_RATE_8000_96000 diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c index 2677d0c3b19b..33c29a1f52d0 100644 --- a/sound/soc/codecs/wcd9335.c +++ b/sound/soc/codecs/wcd9335.c @@ -1971,8 +1971,8 @@ static int wcd9335_trigger(struct snd_pcm_substream *substream, int cmd, case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - slim_stream_unprepare(dai_data->sruntime); slim_stream_disable(dai_data->sruntime); + slim_stream_unprepare(dai_data->sruntime); break; default: break; @@ -2252,51 +2252,42 @@ static int wcd9335_rx_hph_mode_put(struct snd_kcontrol *kc, static const struct snd_kcontrol_new wcd9335_snd_controls[] = { /* -84dB min - 40dB max */ - SOC_SINGLE_SX_TLV("RX0 Digital Volume", WCD9335_CDC_RX0_RX_VOL_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX1 Digital Volume", WCD9335_CDC_RX1_RX_VOL_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX2 Digital Volume", WCD9335_CDC_RX2_RX_VOL_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX3 Digital Volume", WCD9335_CDC_RX3_RX_VOL_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX4 Digital Volume", WCD9335_CDC_RX4_RX_VOL_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX5 Digital Volume", WCD9335_CDC_RX5_RX_VOL_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX6 Digital Volume", WCD9335_CDC_RX6_RX_VOL_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX7 Digital Volume", WCD9335_CDC_RX7_RX_VOL_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX8 Digital Volume", WCD9335_CDC_RX8_RX_VOL_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX0 Mix Digital Volume", - WCD9335_CDC_RX0_RX_VOL_MIX_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX1 Mix Digital Volume", - WCD9335_CDC_RX1_RX_VOL_MIX_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX2 Mix Digital Volume", - WCD9335_CDC_RX2_RX_VOL_MIX_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX3 Mix Digital Volume", - WCD9335_CDC_RX3_RX_VOL_MIX_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX4 Mix Digital Volume", - WCD9335_CDC_RX4_RX_VOL_MIX_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX5 Mix Digital Volume", - WCD9335_CDC_RX5_RX_VOL_MIX_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX6 Mix Digital Volume", - WCD9335_CDC_RX6_RX_VOL_MIX_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX7 Mix Digital Volume", - WCD9335_CDC_RX7_RX_VOL_MIX_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX8 Mix Digital Volume", - WCD9335_CDC_RX8_RX_VOL_MIX_CTL, - 0, -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX0 Digital Volume", WCD9335_CDC_RX0_RX_VOL_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX1 Digital Volume", WCD9335_CDC_RX1_RX_VOL_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX2 Digital Volume", WCD9335_CDC_RX2_RX_VOL_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX3 Digital Volume", WCD9335_CDC_RX3_RX_VOL_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX4 Digital Volume", WCD9335_CDC_RX4_RX_VOL_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX5 Digital Volume", WCD9335_CDC_RX5_RX_VOL_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX6 Digital Volume", WCD9335_CDC_RX6_RX_VOL_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX7 Digital Volume", WCD9335_CDC_RX7_RX_VOL_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX8 Digital Volume", WCD9335_CDC_RX8_RX_VOL_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX0 Mix Digital Volume", WCD9335_CDC_RX0_RX_VOL_MIX_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX1 Mix Digital Volume", WCD9335_CDC_RX1_RX_VOL_MIX_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX2 Mix Digital Volume", WCD9335_CDC_RX2_RX_VOL_MIX_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX3 Mix Digital Volume", WCD9335_CDC_RX3_RX_VOL_MIX_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX4 Mix Digital Volume", WCD9335_CDC_RX4_RX_VOL_MIX_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX5 Mix Digital Volume", WCD9335_CDC_RX5_RX_VOL_MIX_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX6 Mix Digital Volume", WCD9335_CDC_RX6_RX_VOL_MIX_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX7 Mix Digital Volume", WCD9335_CDC_RX7_RX_VOL_MIX_CTL, + -84, 40, digital_gain), + SOC_SINGLE_S8_TLV("RX8 Mix Digital Volume", WCD9335_CDC_RX8_RX_VOL_MIX_CTL, + -84, 40, digital_gain), SOC_ENUM("RX INT0_1 HPF cut off", cf_int0_1_enum), SOC_ENUM("RX INT0_2 HPF cut off", cf_int0_2_enum), SOC_ENUM("RX INT1_1 HPF cut off", cf_int1_1_enum), diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c index 8540ac230d0e..104751ac6cd1 100644 --- a/sound/soc/codecs/wcd934x.c +++ b/sound/soc/codecs/wcd934x.c @@ -1188,29 +1188,7 @@ static int wcd934x_set_sido_input_src(struct wcd934x_codec *wcd, int sido_src) if (sido_src == wcd->sido_input_src) return 0; - if (sido_src == SIDO_SOURCE_INTERNAL) { - regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL, - WCD934X_ANA_BUCK_HI_ACCU_EN_MASK, 0); - usleep_range(100, 110); - regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL, - WCD934X_ANA_BUCK_HI_ACCU_PRE_ENX_MASK, 0x0); - usleep_range(100, 110); - regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO, - WCD934X_ANA_RCO_BG_EN_MASK, 0); - usleep_range(100, 110); - regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL, - WCD934X_ANA_BUCK_PRE_EN1_MASK, - WCD934X_ANA_BUCK_PRE_EN1_ENABLE); - usleep_range(100, 110); - regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL, - WCD934X_ANA_BUCK_PRE_EN2_MASK, - WCD934X_ANA_BUCK_PRE_EN2_ENABLE); - usleep_range(100, 110); - regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL, - WCD934X_ANA_BUCK_HI_ACCU_EN_MASK, - WCD934X_ANA_BUCK_HI_ACCU_ENABLE); - usleep_range(100, 110); - } else if (sido_src == SIDO_SOURCE_RCO_BG) { + if (sido_src == SIDO_SOURCE_RCO_BG) { regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO, WCD934X_ANA_RCO_BG_EN_MASK, WCD934X_ANA_RCO_BG_ENABLE); @@ -1296,8 +1274,6 @@ static int wcd934x_disable_ana_bias_and_syclk(struct wcd934x_codec *wcd) regmap_update_bits(wcd->regmap, WCD934X_CLK_SYS_MCLK_PRG, WCD934X_EXT_CLK_BUF_EN_MASK | WCD934X_MCLK_EN_MASK, 0x0); - wcd934x_set_sido_input_src(wcd, SIDO_SOURCE_INTERNAL); - regmap_update_bits(wcd->regmap, WCD934X_ANA_BIAS, WCD934X_ANA_BIAS_EN_MASK, 0); regmap_update_bits(wcd->regmap, WCD934X_ANA_BIAS, @@ -1853,8 +1829,8 @@ static int wcd934x_trigger(struct snd_pcm_substream *substream, int cmd, case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - slim_stream_unprepare(dai_data->sruntime); slim_stream_disable(dai_data->sruntime); + slim_stream_unprepare(dai_data->sruntime); break; default: break; diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index 72e165cc6443..97ece3114b3d 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c @@ -536,7 +536,7 @@ static int wm2000_anc_transition(struct wm2000_priv *wm2000, { struct i2c_client *i2c = wm2000->i2c; int i, j; - int ret; + int ret = 0; if (wm2000->anc_mode == mode) return 0; @@ -566,13 +566,13 @@ static int wm2000_anc_transition(struct wm2000_priv *wm2000, ret = anc_transitions[i].step[j](i2c, anc_transitions[i].analogue); if (ret != 0) - return ret; + break; } if (anc_transitions[i].dest == ANC_OFF) clk_disable_unprepare(wm2000->mclk); - return 0; + return ret; } static int wm2000_anc_set_mode(struct wm2000_priv *wm2000) diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c index 4238929b2375..d0cef982215d 100644 --- a/sound/soc/codecs/wm5110.c +++ b/sound/soc/codecs/wm5110.c @@ -413,6 +413,7 @@ static int wm5110_put_dre(struct snd_kcontrol *kcontrol, unsigned int rnew = (!!ucontrol->value.integer.value[1]) << mc->rshift; unsigned int lold, rold; unsigned int lena, rena; + bool change = false; int ret; snd_soc_dapm_mutex_lock(dapm); @@ -440,8 +441,8 @@ static int wm5110_put_dre(struct snd_kcontrol *kcontrol, goto err; } - ret = regmap_update_bits(arizona->regmap, ARIZONA_DRE_ENABLE, - mask, lnew | rnew); + ret = regmap_update_bits_check(arizona->regmap, ARIZONA_DRE_ENABLE, + mask, lnew | rnew, &change); if (ret) { dev_err(arizona->dev, "Failed to set DRE: %d\n", ret); goto err; @@ -454,6 +455,9 @@ static int wm5110_put_dre(struct snd_kcontrol *kcontrol, if (!rnew && rold) wm5110_clear_pga_volume(arizona, mc->rshift); + if (change) + ret = 1; + err: snd_soc_dapm_mutex_unlock(dapm); diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c index 304bf725a613..24a009d73f1e 100644 --- a/sound/soc/codecs/wm8731.c +++ b/sound/soc/codecs/wm8731.c @@ -602,7 +602,7 @@ static int wm8731_hw_init(struct device *dev, struct wm8731_priv *wm8731) ret = wm8731_reset(wm8731->regmap); if (ret < 0) { dev_err(dev, "Failed to issue reset: %d\n", ret); - goto err_regulator_enable; + goto err; } /* Clear POWEROFF, keep everything else disabled */ @@ -619,10 +619,7 @@ static int wm8731_hw_init(struct device *dev, struct wm8731_priv *wm8731) regcache_mark_dirty(wm8731->regmap); -err_regulator_enable: - /* Regulators will be enabled by bias management */ - regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies); - +err: return ret; } @@ -766,21 +763,27 @@ static int wm8731_i2c_probe(struct i2c_client *i2c, ret = PTR_ERR(wm8731->regmap); dev_err(&i2c->dev, "Failed to allocate register map: %d\n", ret); - return ret; + goto err_regulator_enable; } ret = wm8731_hw_init(&i2c->dev, wm8731); if (ret != 0) - return ret; + goto err_regulator_enable; ret = devm_snd_soc_register_component(&i2c->dev, &soc_component_dev_wm8731, &wm8731_dai, 1); if (ret != 0) { dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret); - return ret; + goto err_regulator_enable; } return 0; + +err_regulator_enable: + /* Regulators will be enabled by bias management */ + regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies); + + return ret; } static int wm8731_i2c_remove(struct i2c_client *client) diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c index 3bce9a14f0f3..f7256761d043 100644 --- a/sound/soc/codecs/wm8958-dsp2.c +++ b/sound/soc/codecs/wm8958-dsp2.c @@ -530,7 +530,7 @@ static int wm8958_mbc_put(struct snd_kcontrol *kcontrol, wm8958_dsp_apply(component, mbc, wm8994->mbc_ena[mbc]); - return 0; + return 1; } #define WM8958_MBC_SWITCH(xname, xval) {\ @@ -656,7 +656,7 @@ static int wm8958_vss_put(struct snd_kcontrol *kcontrol, wm8958_dsp_apply(component, vss, wm8994->vss_ena[vss]); - return 0; + return 1; } @@ -730,7 +730,7 @@ static int wm8958_hpf_put(struct snd_kcontrol *kcontrol, wm8958_dsp_apply(component, hpf % 3, ucontrol->value.integer.value[0]); - return 0; + return 1; } #define WM8958_HPF_SWITCH(xname, xval) {\ @@ -824,7 +824,7 @@ static int wm8958_enh_eq_put(struct snd_kcontrol *kcontrol, wm8958_dsp_apply(component, eq, ucontrol->value.integer.value[0]); - return 0; + return 1; } #define WM8958_ENH_EQ_SWITCH(xname, xval) {\ diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 0bd3bbc2aacf..57aeded978c2 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -1840,6 +1840,49 @@ SOC_SINGLE_TLV("SPKOUTR Mixer DACR Volume", WM8962_SPEAKER_MIXER_5, 4, 1, 0, inmix_tlv), }; +static int tp_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + int ret, reg, val, mask; + struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); + + ret = pm_runtime_resume_and_get(component->dev); + if (ret < 0) { + dev_err(component->dev, "Failed to resume device: %d\n", ret); + return ret; + } + + reg = WM8962_ADDITIONAL_CONTROL_4; + + if (!strcmp(w->name, "TEMP_HP")) { + mask = WM8962_TEMP_ENA_HP_MASK; + val = WM8962_TEMP_ENA_HP; + } else if (!strcmp(w->name, "TEMP_SPK")) { + mask = WM8962_TEMP_ENA_SPK_MASK; + val = WM8962_TEMP_ENA_SPK; + } else { + pm_runtime_put(component->dev); + return -EINVAL; + } + + switch (event) { + case SND_SOC_DAPM_POST_PMD: + val = 0; + fallthrough; + case SND_SOC_DAPM_POST_PMU: + ret = snd_soc_component_update_bits(component, reg, mask, val); + break; + default: + WARN(1, "Invalid event %d\n", event); + pm_runtime_put(component->dev); + return -EINVAL; + } + + pm_runtime_put(component->dev); + + return 0; +} + static int cp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { @@ -2133,8 +2176,10 @@ SND_SOC_DAPM_SUPPLY("TOCLK", WM8962_ADDITIONAL_CONTROL_1, 0, 0, NULL, 0), SND_SOC_DAPM_SUPPLY_S("DSP2", 1, WM8962_DSP2_POWER_MANAGEMENT, WM8962_DSP2_ENA_SHIFT, 0, dsp2_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), -SND_SOC_DAPM_SUPPLY("TEMP_HP", WM8962_ADDITIONAL_CONTROL_4, 2, 0, NULL, 0), -SND_SOC_DAPM_SUPPLY("TEMP_SPK", WM8962_ADDITIONAL_CONTROL_4, 1, 0, NULL, 0), +SND_SOC_DAPM_SUPPLY("TEMP_HP", SND_SOC_NOPM, 0, 0, tp_event, + SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD), +SND_SOC_DAPM_SUPPLY("TEMP_SPK", SND_SOC_NOPM, 0, 0, tp_event, + SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_MIXER("INPGAL", WM8962_LEFT_INPUT_PGA_CONTROL, 4, 0, inpgal, ARRAY_SIZE(inpgal)), @@ -2444,6 +2489,14 @@ static void wm8962_configure_bclk(struct snd_soc_component *component) snd_soc_component_update_bits(component, WM8962_CLOCKING2, WM8962_SYSCLK_ENA_MASK, WM8962_SYSCLK_ENA); + /* DSPCLK_DIV field in WM8962_CLOCKING1 register is used to generate + * correct frequency of LRCLK and BCLK. Sometimes the read-only value + * can't be updated timely after enabling SYSCLK. This results in wrong + * calculation values. Delay is introduced here to wait for newest + * value from register. The time of the delay should be at least + * 500~1000us according to test. + */ + usleep_range(500, 1000); dspclk = snd_soc_component_read(component, WM8962_CLOCKING1); if (snd_soc_component_get_bias_level(component) != SND_SOC_BIAS_ON) @@ -3760,6 +3813,11 @@ static int wm8962_i2c_probe(struct i2c_client *i2c, if (ret < 0) goto err_pm_runtime; + regmap_update_bits(wm8962->regmap, WM8962_ADDITIONAL_CONTROL_4, + WM8962_TEMP_ENA_HP_MASK, 0); + regmap_update_bits(wm8962->regmap, WM8962_ADDITIONAL_CONTROL_4, + WM8962_TEMP_ENA_SPK_MASK, 0); + regcache_cache_only(wm8962->regmap, true); /* The drivers should power up as needed */ @@ -3864,6 +3922,7 @@ static int wm8962_runtime_suspend(struct device *dev) #endif static const struct dev_pm_ops wm8962_pm = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) SET_RUNTIME_PM_OPS(wm8962_runtime_suspend, wm8962_runtime_resume, NULL) }; diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 51d95437e0fd..10189f44af28 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -800,7 +800,7 @@ int wm_adsp_fw_put(struct snd_kcontrol *kcontrol, struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; struct wm_adsp *dsp = snd_soc_component_get_drvdata(component); - int ret = 0; + int ret = 1; if (ucontrol->value.enumerated.item[0] == dsp[e->shift_l].fw) return 0; diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c index e13271ea84de..29cf9234984d 100644 --- a/sound/soc/fsl/eukrea-tlv320.c +++ b/sound/soc/fsl/eukrea-tlv320.c @@ -86,7 +86,7 @@ static int eukrea_tlv320_probe(struct platform_device *pdev) int ret; int int_port = 0, ext_port; struct device_node *np = pdev->dev.of_node; - struct device_node *ssi_np = NULL, *codec_np = NULL; + struct device_node *ssi_np = NULL, *codec_np = NULL, *tmp_np = NULL; eukrea_tlv320.dev = &pdev->dev; if (np) { @@ -143,7 +143,7 @@ static int eukrea_tlv320_probe(struct platform_device *pdev) } if (machine_is_eukrea_cpuimx27() || - of_find_compatible_node(NULL, NULL, "fsl,imx21-audmux")) { + (tmp_np = of_find_compatible_node(NULL, NULL, "fsl,imx21-audmux"))) { imx_audmux_v1_configure_port(MX27_AUDMUX_HPCR1_SSI0, IMX_AUDMUX_V1_PCR_SYN | IMX_AUDMUX_V1_PCR_TFSDIR | @@ -158,10 +158,11 @@ static int eukrea_tlv320_probe(struct platform_device *pdev) IMX_AUDMUX_V1_PCR_SYN | IMX_AUDMUX_V1_PCR_RXDSEL(MX27_AUDMUX_HPCR1_SSI0) ); + of_node_put(tmp_np); } else if (machine_is_eukrea_cpuimx25sd() || machine_is_eukrea_cpuimx35sd() || machine_is_eukrea_cpuimx51sd() || - of_find_compatible_node(NULL, NULL, "fsl,imx31-audmux")) { + (tmp_np = of_find_compatible_node(NULL, NULL, "fsl,imx31-audmux"))) { if (!np) ext_port = machine_is_eukrea_cpuimx25sd() ? 4 : 3; @@ -178,6 +179,7 @@ static int eukrea_tlv320_probe(struct platform_device *pdev) IMX_AUDMUX_V2_PTCR_SYN, IMX_AUDMUX_V2_PDCR_RXDSEL(int_port) ); + of_node_put(tmp_np); } else { if (np) { /* The eukrea,asoc-tlv320 driver was explicitly diff --git a/sound/soc/fsl/fsl_easrc.c b/sound/soc/fsl/fsl_easrc.c index 60951a8aabd3..3cf1f40e6892 100644 --- a/sound/soc/fsl/fsl_easrc.c +++ b/sound/soc/fsl/fsl_easrc.c @@ -476,7 +476,8 @@ static int fsl_easrc_prefilter_config(struct fsl_asrc *easrc, struct fsl_asrc_pair *ctx; struct device *dev; u32 inrate, outrate, offset = 0; - u32 in_s_rate, out_s_rate, in_s_fmt, out_s_fmt; + u32 in_s_rate, out_s_rate; + snd_pcm_format_t in_s_fmt, out_s_fmt; int ret, i; if (!easrc) @@ -1873,6 +1874,7 @@ static int fsl_easrc_probe(struct platform_device *pdev) struct resource *res; struct device_node *np; void __iomem *regs; + u32 asrc_fmt = 0; int ret, irq; easrc = devm_kzalloc(dev, sizeof(*easrc), GFP_KERNEL); @@ -1939,13 +1941,14 @@ static int fsl_easrc_probe(struct platform_device *pdev) return ret; } - ret = of_property_read_u32(np, "fsl,asrc-format", &easrc->asrc_format); + ret = of_property_read_u32(np, "fsl,asrc-format", &asrc_fmt); + easrc->asrc_format = (__force snd_pcm_format_t)asrc_fmt; if (ret) { dev_err(dev, "failed to asrc format\n"); return ret; } - if (!(FSL_EASRC_FORMATS & (1ULL << easrc->asrc_format))) { + if (!(FSL_EASRC_FORMATS & (pcm_format_to_bits(easrc->asrc_format)))) { dev_warn(dev, "unsupported format, switching to S24_LE\n"); easrc->asrc_format = SNDRV_PCM_FORMAT_S24_LE; } diff --git a/sound/soc/fsl/fsl_easrc.h b/sound/soc/fsl/fsl_easrc.h index 30620d56252c..5b8469757c12 100644 --- a/sound/soc/fsl/fsl_easrc.h +++ b/sound/soc/fsl/fsl_easrc.h @@ -569,7 +569,7 @@ struct fsl_easrc_io_params { unsigned int access_len; unsigned int fifo_wtmk; unsigned int sample_rate; - unsigned int sample_format; + snd_pcm_format_t sample_format; unsigned int norm_rate; }; diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c index efc5daf53bba..6c794605e33c 100644 --- a/sound/soc/fsl/fsl_micfil.c +++ b/sound/soc/fsl/fsl_micfil.c @@ -190,6 +190,25 @@ static int fsl_micfil_reset(struct device *dev) return ret; } + /* + * SRES is self-cleared bit, but REG_MICFIL_CTRL1 is defined + * as non-volatile register, so SRES still remain in regmap + * cache after set, that every update of REG_MICFIL_CTRL1, + * software reset happens. so clear it explicitly. + */ + ret = regmap_clear_bits(micfil->regmap, REG_MICFIL_CTRL1, + MICFIL_CTRL1_SRES); + if (ret) + return ret; + + /* + * Set SRES should clear CHnF flags, But even add delay here + * the CHnF may not be cleared sometimes, so clear CHnF explicitly. + */ + ret = regmap_write_bits(micfil->regmap, REG_MICFIL_STAT, 0xFF, 0xFF); + if (ret) + return ret; + return 0; } diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h index 4bbcd0dbe8f1..8923c680f0e0 100644 --- a/sound/soc/fsl/fsl_sai.h +++ b/sound/soc/fsl/fsl_sai.h @@ -80,8 +80,8 @@ #define FSL_SAI_xCR3(tx, ofs) (tx ? FSL_SAI_TCR3(ofs) : FSL_SAI_RCR3(ofs)) #define FSL_SAI_xCR4(tx, ofs) (tx ? FSL_SAI_TCR4(ofs) : FSL_SAI_RCR4(ofs)) #define FSL_SAI_xCR5(tx, ofs) (tx ? FSL_SAI_TCR5(ofs) : FSL_SAI_RCR5(ofs)) -#define FSL_SAI_xDR(tx, ofs) (tx ? FSL_SAI_TDR(ofs) : FSL_SAI_RDR(ofs)) -#define FSL_SAI_xFR(tx, ofs) (tx ? FSL_SAI_TFR(ofs) : FSL_SAI_RFR(ofs)) +#define FSL_SAI_xDR0(tx) (tx ? FSL_SAI_TDR0 : FSL_SAI_RDR0) +#define FSL_SAI_xFR0(tx) (tx ? FSL_SAI_TFR0 : FSL_SAI_RFR0) #define FSL_SAI_xMR(tx) (tx ? FSL_SAI_TMR : FSL_SAI_RMR) /* SAI Transmit/Receive Control Register */ diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c index f45cb4bbb6c4..5997bb5acb73 100644 --- a/sound/soc/fsl/imx-sgtl5000.c +++ b/sound/soc/fsl/imx-sgtl5000.c @@ -120,19 +120,19 @@ static int imx_sgtl5000_probe(struct platform_device *pdev) data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) { ret = -ENOMEM; - goto fail; + goto put_device; } comp = devm_kzalloc(&pdev->dev, 3 * sizeof(*comp), GFP_KERNEL); if (!comp) { ret = -ENOMEM; - goto fail; + goto put_device; } data->codec_clk = clk_get(&codec_dev->dev, NULL); if (IS_ERR(data->codec_clk)) { ret = PTR_ERR(data->codec_clk); - goto fail; + goto put_device; } data->clk_frequency = clk_get_rate(data->codec_clk); @@ -158,10 +158,10 @@ static int imx_sgtl5000_probe(struct platform_device *pdev) data->card.dev = &pdev->dev; ret = snd_soc_of_parse_card_name(&data->card, "model"); if (ret) - goto fail; + goto put_device; ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing"); if (ret) - goto fail; + goto put_device; data->card.num_links = 1; data->card.owner = THIS_MODULE; data->card.dai_link = &data->dai; @@ -176,7 +176,7 @@ static int imx_sgtl5000_probe(struct platform_device *pdev) if (ret != -EPROBE_DEFER) dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret); - goto fail; + goto put_device; } of_node_put(ssi_np); @@ -184,6 +184,8 @@ static int imx_sgtl5000_probe(struct platform_device *pdev) return 0; +put_device: + put_device(&codec_dev->dev); fail: if (data && !IS_ERR(data->codec_clk)) clk_put(data->codec_clk); diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c index 0c640308ed80..bfbee2d716f3 100644 --- a/sound/soc/generic/audio-graph-card.c +++ b/sound/soc/generic/audio-graph-card.c @@ -149,8 +149,10 @@ static int asoc_simple_parse_dai(struct device_node *ep, * if he unbinded CPU or Codec. */ ret = snd_soc_get_dai_name(&args, &dlc->dai_name); - if (ret < 0) + if (ret < 0) { + of_node_put(node); return ret; + } dlc->of_node = node; diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c index 7ed869bf1a92..81269ed5a2aa 100644 --- a/sound/soc/intel/boards/bytcht_es8316.c +++ b/sound/soc/intel/boards/bytcht_es8316.c @@ -450,6 +450,13 @@ static const struct dmi_system_id byt_cht_es8316_quirk_table[] = { | BYT_CHT_ES8316_INTMIC_IN2_MAP | BYT_CHT_ES8316_JD_INVERTED), }, + { /* Nanote UMPC-01 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "RWC CO.,LTD"), + DMI_MATCH(DMI_PRODUCT_NAME, "UMPC-01"), + }, + .driver_data = (void *)BYT_CHT_ES8316_INTMIC_IN1_MAP, + }, { /* Teclast X98 Plus II */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"), diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index 43ee3d095a1b..3020a993f6ef 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -615,6 +615,18 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_OVCD_SF_0P75 | BYT_RT5640_MCLK_EN), }, + { /* HP Pro Tablet 408 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pro Tablet 408"), + }, + .driver_data = (void *)(BYT_RT5640_DMIC1_MAP | + BYT_RT5640_JD_SRC_JD2_IN4N | + BYT_RT5640_OVCD_TH_1500UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { /* HP Stream 7 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), diff --git a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c index 9f243e60b95c..15d862cdcd2f 100644 --- a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c @@ -126,13 +126,13 @@ static const struct snd_soc_acpi_adr_device mx8373_1_adr[] = { { .adr = 0x000123019F837300, .num_endpoints = 1, - .endpoints = &spk_l_endpoint, + .endpoints = &spk_r_endpoint, .name_prefix = "Right" }, { .adr = 0x000127019F837300, .num_endpoints = 1, - .endpoints = &spk_r_endpoint, + .endpoints = &spk_l_endpoint, .name_prefix = "Left" } }; diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c index 87c891c46291..3b3868df9f67 100644 --- a/sound/soc/intel/skylake/skl-nhlt.c +++ b/sound/soc/intel/skylake/skl-nhlt.c @@ -201,7 +201,6 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks, struct nhlt_fmt_cfg *fmt_cfg; struct wav_fmt_ext *wav_fmt; unsigned long rate; - bool present = false; int rate_index = 0; u16 channels, bps; u8 clk_src; @@ -214,9 +213,12 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks, if (fmt->fmt_count == 0) return; + fmt_cfg = (struct nhlt_fmt_cfg *)fmt->fmt_config; for (i = 0; i < fmt->fmt_count; i++) { - fmt_cfg = &fmt->fmt_config[i]; - wav_fmt = &fmt_cfg->fmt_ext; + struct nhlt_fmt_cfg *saved_fmt_cfg = fmt_cfg; + bool present = false; + + wav_fmt = &saved_fmt_cfg->fmt_ext; channels = wav_fmt->fmt.channels; bps = wav_fmt->fmt.bits_per_sample; @@ -234,12 +236,18 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks, * derive the rate. */ for (j = i; j < fmt->fmt_count; j++) { - fmt_cfg = &fmt->fmt_config[j]; - wav_fmt = &fmt_cfg->fmt_ext; + struct nhlt_fmt_cfg *tmp_fmt_cfg = fmt_cfg; + + wav_fmt = &tmp_fmt_cfg->fmt_ext; if ((fs == wav_fmt->fmt.samples_per_sec) && - (bps == wav_fmt->fmt.bits_per_sample)) + (bps == wav_fmt->fmt.bits_per_sample)) { channels = max_t(u16, channels, wav_fmt->fmt.channels); + saved_fmt_cfg = tmp_fmt_cfg; + } + /* Move to the next nhlt_fmt_cfg */ + tmp_fmt_cfg = (struct nhlt_fmt_cfg *)(tmp_fmt_cfg->config.caps + + tmp_fmt_cfg->config.size); } rate = channels * bps * fs; @@ -255,8 +263,11 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks, /* Fill rate and parent for sclk/sclkfs */ if (!present) { + struct nhlt_fmt_cfg *first_fmt_cfg; + + first_fmt_cfg = (struct nhlt_fmt_cfg *)fmt->fmt_config; i2s_config_ext = (struct skl_i2s_config_blob_ext *) - fmt->fmt_config[0].config.caps; + first_fmt_cfg->config.caps; /* MCLK Divider Source Select */ if (is_legacy_blob(i2s_config_ext->hdr.sig)) { @@ -270,6 +281,9 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks, parent = skl_get_parent_clk(clk_src); + /* Move to the next nhlt_fmt_cfg */ + fmt_cfg = (struct nhlt_fmt_cfg *)(fmt_cfg->config.caps + + fmt_cfg->config.size); /* * Do not copy the config data if there is no parent * clock available for this clock source select @@ -278,9 +292,9 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks, continue; sclk[id].rate_cfg[rate_index].rate = rate; - sclk[id].rate_cfg[rate_index].config = fmt_cfg; + sclk[id].rate_cfg[rate_index].config = saved_fmt_cfg; sclkfs[id].rate_cfg[rate_index].rate = rate; - sclkfs[id].rate_cfg[rate_index].config = fmt_cfg; + sclkfs[id].rate_cfg[rate_index].config = saved_fmt_cfg; sclk[id].parent_name = parent->name; sclkfs[id].parent_name = parent->name; @@ -294,13 +308,13 @@ static void skl_get_mclk(struct skl_dev *skl, struct skl_ssp_clk *mclk, { struct skl_i2s_config_blob_ext *i2s_config_ext; struct skl_i2s_config_blob_legacy *i2s_config; - struct nhlt_specific_cfg *fmt_cfg; + struct nhlt_fmt_cfg *fmt_cfg; struct skl_clk_parent_src *parent; u32 clkdiv, div_ratio; u8 clk_src; - fmt_cfg = &fmt->fmt_config[0].config; - i2s_config_ext = (struct skl_i2s_config_blob_ext *)fmt_cfg->caps; + fmt_cfg = (struct nhlt_fmt_cfg *)fmt->fmt_config; + i2s_config_ext = (struct skl_i2s_config_blob_ext *)fmt_cfg->config.caps; /* MCLK Divider Source Select and divider */ if (is_legacy_blob(i2s_config_ext->hdr.sig)) { @@ -329,7 +343,7 @@ static void skl_get_mclk(struct skl_dev *skl, struct skl_ssp_clk *mclk, return; mclk[id].rate_cfg[0].rate = parent->rate/div_ratio; - mclk[id].rate_cfg[0].config = &fmt->fmt_config[0]; + mclk[id].rate_cfg[0].config = fmt_cfg; mclk[id].parent_name = parent->name; } diff --git a/sound/soc/mediatek/mt2701/mt2701-wm8960.c b/sound/soc/mediatek/mt2701/mt2701-wm8960.c index 414e422c0eba..70e494fb3da8 100644 --- a/sound/soc/mediatek/mt2701/mt2701-wm8960.c +++ b/sound/soc/mediatek/mt2701/mt2701-wm8960.c @@ -129,7 +129,8 @@ static int mt2701_wm8960_machine_probe(struct platform_device *pdev) if (!codec_node) { dev_err(&pdev->dev, "Property 'audio-codec' missing or invalid\n"); - return -EINVAL; + ret = -EINVAL; + goto put_platform_node; } for_each_card_prelinks(card, i, dai_link) { if (dai_link->codecs->name) @@ -140,7 +141,7 @@ static int mt2701_wm8960_machine_probe(struct platform_device *pdev) ret = snd_soc_of_parse_audio_routing(card, "audio-routing"); if (ret) { dev_err(&pdev->dev, "failed to parse audio-routing: %d\n", ret); - return ret; + goto put_codec_node; } ret = devm_snd_soc_register_card(&pdev->dev, card); @@ -148,6 +149,10 @@ static int mt2701_wm8960_machine_probe(struct platform_device *pdev) dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", __func__, ret); +put_codec_node: + of_node_put(codec_node); +put_platform_node: + of_node_put(platform_node); return ret; } diff --git a/sound/soc/mediatek/mt6797/mt6797-mt6351.c b/sound/soc/mediatek/mt6797/mt6797-mt6351.c index 496f32bcfb5e..d2f6213a6bfc 100644 --- a/sound/soc/mediatek/mt6797/mt6797-mt6351.c +++ b/sound/soc/mediatek/mt6797/mt6797-mt6351.c @@ -217,7 +217,8 @@ static int mt6797_mt6351_dev_probe(struct platform_device *pdev) if (!codec_node) { dev_err(&pdev->dev, "Property 'audio-codec' missing or invalid\n"); - return -EINVAL; + ret = -EINVAL; + goto put_platform_node; } for_each_card_prelinks(card, i, dai_link) { if (dai_link->codecs->name) @@ -230,6 +231,9 @@ static int mt6797_mt6351_dev_probe(struct platform_device *pdev) dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", __func__, ret); + of_node_put(codec_node); +put_platform_node: + of_node_put(platform_node); return ret; } diff --git a/sound/soc/mediatek/mt8173/mt8173-max98090.c b/sound/soc/mediatek/mt8173/mt8173-max98090.c index 3bdd4931316c..5f39e810e27a 100644 --- a/sound/soc/mediatek/mt8173/mt8173-max98090.c +++ b/sound/soc/mediatek/mt8173/mt8173-max98090.c @@ -167,7 +167,8 @@ static int mt8173_max98090_dev_probe(struct platform_device *pdev) if (!codec_node) { dev_err(&pdev->dev, "Property 'audio-codec' missing or invalid\n"); - return -EINVAL; + ret = -EINVAL; + goto put_platform_node; } for_each_card_prelinks(card, i, dai_link) { if (dai_link->codecs->name) @@ -182,6 +183,8 @@ static int mt8173_max98090_dev_probe(struct platform_device *pdev) __func__, ret); of_node_put(codec_node); + +put_platform_node: of_node_put(platform_node); return ret; } diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c index c8e4e85e1057..94a9bbf144d1 100644 --- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c +++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c @@ -256,14 +256,16 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev) if (!mt8173_rt5650_rt5676_dais[DAI_LINK_CODEC_I2S].codecs[0].of_node) { dev_err(&pdev->dev, "Property 'audio-codec' missing or invalid\n"); - return -EINVAL; + ret = -EINVAL; + goto put_node; } mt8173_rt5650_rt5676_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node = of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 1); if (!mt8173_rt5650_rt5676_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node) { dev_err(&pdev->dev, "Property 'audio-codec' missing or invalid\n"); - return -EINVAL; + ret = -EINVAL; + goto put_node; } mt8173_rt5650_rt5676_codec_conf[0].dlc.of_node = mt8173_rt5650_rt5676_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node; @@ -276,7 +278,8 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev) if (!mt8173_rt5650_rt5676_dais[DAI_LINK_HDMI_I2S].codecs->of_node) { dev_err(&pdev->dev, "Property 'audio-codec' missing or invalid\n"); - return -EINVAL; + ret = -EINVAL; + goto put_node; } card->dev = &pdev->dev; @@ -286,6 +289,7 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev) dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", __func__, ret); +put_node: of_node_put(platform_node); return ret; } diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650.c b/sound/soc/mediatek/mt8173/mt8173-rt5650.c index e168d31f4445..1de9dab218c6 100644 --- a/sound/soc/mediatek/mt8173/mt8173-rt5650.c +++ b/sound/soc/mediatek/mt8173/mt8173-rt5650.c @@ -280,7 +280,8 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev) if (!mt8173_rt5650_dais[DAI_LINK_CODEC_I2S].codecs[0].of_node) { dev_err(&pdev->dev, "Property 'audio-codec' missing or invalid\n"); - return -EINVAL; + ret = -EINVAL; + goto put_platform_node; } mt8173_rt5650_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node = mt8173_rt5650_dais[DAI_LINK_CODEC_I2S].codecs[0].of_node; @@ -293,7 +294,7 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev) dev_err(&pdev->dev, "%s codec_capture_dai name fail %d\n", __func__, ret); - return ret; + goto put_platform_node; } mt8173_rt5650_dais[DAI_LINK_CODEC_I2S].codecs[1].dai_name = codec_capture_dai; @@ -315,7 +316,8 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev) if (!mt8173_rt5650_dais[DAI_LINK_HDMI_I2S].codecs->of_node) { dev_err(&pdev->dev, "Property 'audio-codec' missing or invalid\n"); - return -EINVAL; + ret = -EINVAL; + goto put_platform_node; } card->dev = &pdev->dev; @@ -324,6 +326,7 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev) dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", __func__, ret); +put_platform_node: of_node_put(platform_node); return ret; } diff --git a/sound/soc/meson/aiu-acodec-ctrl.c b/sound/soc/meson/aiu-acodec-ctrl.c index 7078197e0cc5..e11b6a5cd772 100644 --- a/sound/soc/meson/aiu-acodec-ctrl.c +++ b/sound/soc/meson/aiu-acodec-ctrl.c @@ -58,7 +58,7 @@ static int aiu_acodec_ctrl_mux_put_enum(struct snd_kcontrol *kcontrol, snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL); - return 0; + return 1; } static SOC_ENUM_SINGLE_DECL(aiu_acodec_ctrl_mux_enum, AIU_ACODEC_CTRL, diff --git a/sound/soc/meson/aiu-codec-ctrl.c b/sound/soc/meson/aiu-codec-ctrl.c index 4b773d3e8b07..a807e5007d6e 100644 --- a/sound/soc/meson/aiu-codec-ctrl.c +++ b/sound/soc/meson/aiu-codec-ctrl.c @@ -57,7 +57,7 @@ static int aiu_codec_ctrl_mux_put_enum(struct snd_kcontrol *kcontrol, snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL); - return 0; + return 1; } static SOC_ENUM_SINGLE_DECL(aiu_hdmi_ctrl_mux_enum, AIU_HDMI_CLK_DATA_CTRL, diff --git a/sound/soc/meson/g12a-tohdmitx.c b/sound/soc/meson/g12a-tohdmitx.c index 9b2b59536ced..6c99052feafd 100644 --- a/sound/soc/meson/g12a-tohdmitx.c +++ b/sound/soc/meson/g12a-tohdmitx.c @@ -67,7 +67,7 @@ static int g12a_tohdmitx_i2s_mux_put_enum(struct snd_kcontrol *kcontrol, snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL); - return 0; + return 1; } static SOC_ENUM_SINGLE_DECL(g12a_tohdmitx_i2s_mux_enum, TOHDMITX_CTRL0, diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c index f2eda81985e2..d87ac26999cf 100644 --- a/sound/soc/mxs/mxs-saif.c +++ b/sound/soc/mxs/mxs-saif.c @@ -764,6 +764,7 @@ static int mxs_saif_probe(struct platform_device *pdev) saif->master_id = saif->id; } else { ret = of_alias_get_id(master, "saif"); + of_node_put(master); if (ret < 0) return ret; else diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c index e620a62ef534..ecd6c049ace2 100644 --- a/sound/soc/qcom/lpass-cpu.c +++ b/sound/soc/qcom/lpass-cpu.c @@ -745,10 +745,20 @@ static bool lpass_hdmi_regmap_volatile(struct device *dev, unsigned int reg) return true; if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v)) return true; + if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v)) + return true; + if (reg == LPASS_HDMI_TX_PARITY_ADDR(v)) + return true; for (i = 0; i < v->hdmi_rdma_channels; ++i) { if (reg == LPAIF_HDMI_RDMACURR_REG(v, i)) return true; + if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i)) + return true; + if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i)) + return true; + if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i)) + return true; } return false; } @@ -846,6 +856,7 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev) dsp_of_node = of_parse_phandle(pdev->dev.of_node, "qcom,adsp", 0); if (dsp_of_node) { dev_err(dev, "DSP exists and holds audio resources\n"); + of_node_put(dsp_of_node); return -EBUSY; } diff --git a/sound/soc/qcom/qdsp6/q6adm.c b/sound/soc/qcom/qdsp6/q6adm.c index 72f29720398c..182d36a34faf 100644 --- a/sound/soc/qcom/qdsp6/q6adm.c +++ b/sound/soc/qcom/qdsp6/q6adm.c @@ -217,7 +217,7 @@ static struct q6copp *q6adm_alloc_copp(struct q6adm *adm, int port_idx) idx = find_first_zero_bit(&adm->copp_bitmap[port_idx], MAX_COPPS_PER_PORT); - if (idx > MAX_COPPS_PER_PORT) + if (idx >= MAX_COPPS_PER_PORT) return ERR_PTR(-EBUSY); c = kzalloc(sizeof(*c), GFP_ATOMIC); diff --git a/sound/soc/samsung/aries_wm8994.c b/sound/soc/samsung/aries_wm8994.c index 0ac5956ba270..d2908c1ea835 100644 --- a/sound/soc/samsung/aries_wm8994.c +++ b/sound/soc/samsung/aries_wm8994.c @@ -585,19 +585,16 @@ static int aries_audio_probe(struct platform_device *pdev) extcon_np = of_parse_phandle(np, "extcon", 0); priv->usb_extcon = extcon_find_edev_by_node(extcon_np); - if (IS_ERR(priv->usb_extcon)) { - if (PTR_ERR(priv->usb_extcon) != -EPROBE_DEFER) - dev_err(dev, "Failed to get extcon device"); - return PTR_ERR(priv->usb_extcon); - } of_node_put(extcon_np); + if (IS_ERR(priv->usb_extcon)) + return dev_err_probe(dev, PTR_ERR(priv->usb_extcon), + "Failed to get extcon device"); priv->adc = devm_iio_channel_get(dev, "headset-detect"); - if (IS_ERR(priv->adc)) { - if (PTR_ERR(priv->adc) != -EPROBE_DEFER) - dev_err(dev, "Failed to get ADC channel"); - return PTR_ERR(priv->adc); - } + if (IS_ERR(priv->adc)) + return dev_err_probe(dev, PTR_ERR(priv->adc), + "Failed to get ADC channel"); + if (priv->adc->channel->type != IIO_VOLTAGE) return -EINVAL; @@ -631,8 +628,10 @@ static int aries_audio_probe(struct platform_device *pdev) return -EINVAL; codec = of_get_child_by_name(dev->of_node, "codec"); - if (!codec) - return -EINVAL; + if (!codec) { + ret = -EINVAL; + goto out; + } for_each_card_prelinks(card, i, dai_link) { dai_link->codecs->of_node = of_parse_phandle(codec, diff --git a/sound/soc/samsung/arndale.c b/sound/soc/samsung/arndale.c index 28587375813a..35e34e534b8b 100644 --- a/sound/soc/samsung/arndale.c +++ b/sound/soc/samsung/arndale.c @@ -174,9 +174,8 @@ static int arndale_audio_probe(struct platform_device *pdev) ret = devm_snd_soc_register_card(card->dev, card); if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, - "snd_soc_register_card() failed: %d\n", ret); + dev_err_probe(&pdev->dev, ret, + "snd_soc_register_card() failed\n"); goto err_put_of_nodes; } return 0; diff --git a/sound/soc/samsung/h1940_uda1380.c b/sound/soc/samsung/h1940_uda1380.c index 8aa78ff640f5..adb6b661c799 100644 --- a/sound/soc/samsung/h1940_uda1380.c +++ b/sound/soc/samsung/h1940_uda1380.c @@ -8,7 +8,7 @@ // Based on version from Arnaud Patard #include -#include +#include #include #include diff --git a/sound/soc/samsung/littlemill.c b/sound/soc/samsung/littlemill.c index a1ff1400857e..e73356a66087 100644 --- a/sound/soc/samsung/littlemill.c +++ b/sound/soc/samsung/littlemill.c @@ -325,9 +325,8 @@ static int littlemill_probe(struct platform_device *pdev) card->dev = &pdev->dev; ret = devm_snd_soc_register_card(&pdev->dev, card); - if (ret && ret != -EPROBE_DEFER) - dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", - ret); + if (ret) + dev_err_probe(&pdev->dev, ret, "snd_soc_register_card() failed\n"); return ret; } diff --git a/sound/soc/samsung/lowland.c b/sound/soc/samsung/lowland.c index 998d10cf8c94..7b12ccd2a9b2 100644 --- a/sound/soc/samsung/lowland.c +++ b/sound/soc/samsung/lowland.c @@ -183,9 +183,8 @@ static int lowland_probe(struct platform_device *pdev) card->dev = &pdev->dev; ret = devm_snd_soc_register_card(&pdev->dev, card); - if (ret && ret != -EPROBE_DEFER) - dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", - ret); + if (ret) + dev_err_probe(&pdev->dev, ret, "snd_soc_register_card() failed\n"); return ret; } diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c index ca643a488c3c..4ff12e2e704f 100644 --- a/sound/soc/samsung/odroid.c +++ b/sound/soc/samsung/odroid.c @@ -311,9 +311,7 @@ static int odroid_audio_probe(struct platform_device *pdev) ret = devm_snd_soc_register_card(dev, card); if (ret < 0) { - if (ret != -EPROBE_DEFER) - dev_err(dev, "snd_soc_register_card() failed: %d\n", - ret); + dev_err_probe(dev, ret, "snd_soc_register_card() failed\n"); goto err_put_clk_i2s; } diff --git a/sound/soc/samsung/rx1950_uda1380.c b/sound/soc/samsung/rx1950_uda1380.c index 400a7f77c711..354f379268d9 100644 --- a/sound/soc/samsung/rx1950_uda1380.c +++ b/sound/soc/samsung/rx1950_uda1380.c @@ -128,7 +128,7 @@ static int rx1950_startup(struct snd_pcm_substream *substream) &hw_rates); } -struct gpio_desc *gpiod_speaker_power; +static struct gpio_desc *gpiod_speaker_power; static int rx1950_spk_power(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) @@ -227,7 +227,7 @@ static int rx1950_probe(struct platform_device *pdev) return devm_snd_soc_register_card(dev, &rx1950_asoc); } -struct platform_driver rx1950_audio = { +static struct platform_driver rx1950_audio = { .driver = { .name = "rx1950-audio", .pm = &snd_soc_pm_ops, diff --git a/sound/soc/samsung/smdk_wm8994.c b/sound/soc/samsung/smdk_wm8994.c index 64a1a64656ab..92cd9e8a2e61 100644 --- a/sound/soc/samsung/smdk_wm8994.c +++ b/sound/soc/samsung/smdk_wm8994.c @@ -178,8 +178,8 @@ static int smdk_audio_probe(struct platform_device *pdev) ret = devm_snd_soc_register_card(&pdev->dev, card); - if (ret && ret != -EPROBE_DEFER) - dev_err(&pdev->dev, "snd_soc_register_card() failed:%d\n", ret); + if (ret) + dev_err_probe(&pdev->dev, ret, "snd_soc_register_card() failed\n"); return ret; } diff --git a/sound/soc/samsung/smdk_wm8994pcm.c b/sound/soc/samsung/smdk_wm8994pcm.c index a01640576f71..110a51a4f870 100644 --- a/sound/soc/samsung/smdk_wm8994pcm.c +++ b/sound/soc/samsung/smdk_wm8994pcm.c @@ -118,8 +118,8 @@ static int snd_smdk_probe(struct platform_device *pdev) smdk_pcm.dev = &pdev->dev; ret = devm_snd_soc_register_card(&pdev->dev, &smdk_pcm); - if (ret && ret != -EPROBE_DEFER) - dev_err(&pdev->dev, "snd_soc_register_card failed %d\n", ret); + if (ret) + dev_err_probe(&pdev->dev, ret, "snd_soc_register_card failed\n"); return ret; } diff --git a/sound/soc/samsung/snow.c b/sound/soc/samsung/snow.c index 07163f07c6d5..6aa2c66d8e8c 100644 --- a/sound/soc/samsung/snow.c +++ b/sound/soc/samsung/snow.c @@ -215,12 +215,9 @@ static int snow_probe(struct platform_device *pdev) snd_soc_card_set_drvdata(card, priv); ret = devm_snd_soc_register_card(dev, card); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, - "snd_soc_register_card failed (%d)\n", ret); - return ret; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, + "snd_soc_register_card failed\n"); return ret; } diff --git a/sound/soc/samsung/speyside.c b/sound/soc/samsung/speyside.c index f5f6ba00d073..37b1f4f60b21 100644 --- a/sound/soc/samsung/speyside.c +++ b/sound/soc/samsung/speyside.c @@ -330,9 +330,8 @@ static int speyside_probe(struct platform_device *pdev) card->dev = &pdev->dev; ret = devm_snd_soc_register_card(&pdev->dev, card); - if (ret && ret != -EPROBE_DEFER) - dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", - ret); + if (ret) + dev_err_probe(&pdev->dev, ret, "snd_soc_register_card() failed\n"); return ret; } diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c index 125e07f65d2b..ca1be7a7cb8a 100644 --- a/sound/soc/samsung/tm2_wm5110.c +++ b/sound/soc/samsung/tm2_wm5110.c @@ -611,8 +611,7 @@ static int tm2_probe(struct platform_device *pdev) ret = devm_snd_soc_register_card(dev, card); if (ret < 0) { - if (ret != -EPROBE_DEFER) - dev_err(dev, "Failed to register card: %d\n", ret); + dev_err_probe(dev, ret, "Failed to register card\n"); goto dai_node_put; } diff --git a/sound/soc/samsung/tobermory.c b/sound/soc/samsung/tobermory.c index c962d2c2a7f7..95c6267b0c0c 100644 --- a/sound/soc/samsung/tobermory.c +++ b/sound/soc/samsung/tobermory.c @@ -229,9 +229,8 @@ static int tobermory_probe(struct platform_device *pdev) card->dev = &pdev->dev; ret = devm_snd_soc_register_card(&pdev->dev, card); - if (ret && ret != -EPROBE_DEFER) - dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", - ret); + if (ret) + dev_err_probe(&pdev->dev, ret, "snd_soc_register_card() failed\n"); return ret; } diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c index 7647b3d4c0ba..25a8cfc27433 100644 --- a/sound/soc/sh/rcar/ctu.c +++ b/sound/soc/sh/rcar/ctu.c @@ -171,7 +171,11 @@ static int rsnd_ctu_init(struct rsnd_mod *mod, struct rsnd_dai_stream *io, struct rsnd_priv *priv) { - rsnd_mod_power_on(mod); + int ret; + + ret = rsnd_mod_power_on(mod); + if (ret < 0) + return ret; rsnd_ctu_activation(mod); diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c index 8d91c0eb0880..53b2ad01222b 100644 --- a/sound/soc/sh/rcar/dvc.c +++ b/sound/soc/sh/rcar/dvc.c @@ -186,7 +186,11 @@ static int rsnd_dvc_init(struct rsnd_mod *mod, struct rsnd_dai_stream *io, struct rsnd_priv *priv) { - rsnd_mod_power_on(mod); + int ret; + + ret = rsnd_mod_power_on(mod); + if (ret < 0) + return ret; rsnd_dvc_activation(mod); diff --git a/sound/soc/sh/rcar/mix.c b/sound/soc/sh/rcar/mix.c index a3e0370f5704..c6fe2595c373 100644 --- a/sound/soc/sh/rcar/mix.c +++ b/sound/soc/sh/rcar/mix.c @@ -146,7 +146,11 @@ static int rsnd_mix_init(struct rsnd_mod *mod, struct rsnd_dai_stream *io, struct rsnd_priv *priv) { - rsnd_mod_power_on(mod); + int ret; + + ret = rsnd_mod_power_on(mod); + if (ret < 0) + return ret; rsnd_mix_activation(mod); diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c index 585ffba0244b..fd52e26a3808 100644 --- a/sound/soc/sh/rcar/src.c +++ b/sound/soc/sh/rcar/src.c @@ -454,11 +454,14 @@ static int rsnd_src_init(struct rsnd_mod *mod, struct rsnd_priv *priv) { struct rsnd_src *src = rsnd_mod_to_src(mod); + int ret; /* reset sync convert_rate */ src->sync.val = 0; - rsnd_mod_power_on(mod); + ret = rsnd_mod_power_on(mod); + if (ret < 0) + return ret; rsnd_src_activation(mod); diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index 042207c11651..2ead44779d46 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c @@ -518,7 +518,9 @@ static int rsnd_ssi_init(struct rsnd_mod *mod, ssi->usrcnt++; - rsnd_mod_power_on(mod); + ret = rsnd_mod_power_on(mod); + if (ret < 0) + return ret; rsnd_ssi_config_init(mod, io); diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index ea145eedcb68..88bbd96ed8a2 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -3211,10 +3211,23 @@ EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_link_codecs); static int __init snd_soc_init(void) { - snd_soc_debugfs_init(); - snd_soc_util_init(); + int ret; - return platform_driver_register(&soc_driver); + snd_soc_debugfs_init(); + ret = snd_soc_util_init(); + if (ret) + goto err_util_init; + + ret = platform_driver_register(&soc_driver); + if (ret) + goto err_register; + return 0; + +err_register: + snd_soc_util_exit(); +err_util_init: + snd_soc_debugfs_exit(); + return ret; } module_init(snd_soc_init); diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 2924d89bf0da..754c1f16ee83 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -62,6 +62,8 @@ struct snd_soc_dapm_widget * snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_widget *widget); +static unsigned int soc_dapm_read(struct snd_soc_dapm_context *dapm, int reg); + /* dapm power sequences - make this per codec in the future */ static int dapm_up_seq[] = { [snd_soc_dapm_pre] = 1, @@ -442,6 +444,9 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, snd_soc_dapm_add_path(widget->dapm, data->widget, widget, NULL, NULL); + } else if (e->reg != SND_SOC_NOPM) { + data->value = soc_dapm_read(widget->dapm, e->reg) & + (e->mask << e->shift_l); } break; default: @@ -1683,8 +1688,7 @@ static void dapm_seq_run(struct snd_soc_card *card, switch (w->id) { case snd_soc_dapm_pre: if (!w->event) - list_for_each_entry_safe_continue(w, n, list, - power_list); + continue; if (event == SND_SOC_DAPM_STREAM_START) ret = w->event(w, @@ -1696,8 +1700,7 @@ static void dapm_seq_run(struct snd_soc_card *card, case snd_soc_dapm_post: if (!w->event) - list_for_each_entry_safe_continue(w, n, list, - power_list); + continue; if (event == SND_SOC_DAPM_STREAM_START) ret = w->event(w, @@ -3429,7 +3432,6 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol, update.val = val; card->update = &update; } - change |= reg_change; ret = soc_dapm_mixer_update_power(card, kcontrol, connect, rconnect); @@ -3531,7 +3533,6 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol, update.val = val; card->update = &update; } - change |= reg_change; ret = soc_dapm_mux_update_power(card, kcontrol, item[0], e); diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index 27c490773cd5..f99f90d60b2c 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c @@ -87,10 +87,10 @@ static int dmaengine_pcm_hw_params(struct snd_soc_component *component, memset(&slave_config, 0, sizeof(slave_config)); - if (pcm->config && pcm->config->prepare_slave_config) - prepare_slave_config = pcm->config->prepare_slave_config; - else + if (!pcm->config) prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config; + else + prepare_slave_config = pcm->config->prepare_slave_config; if (prepare_slave_config) { ret = prepare_slave_config(substream, params, &slave_config); diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index 2bc9fa6a34b8..daecd386d5ec 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -432,7 +432,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, val = ucontrol->value.integer.value[0]; if (mc->platform_max && val > mc->platform_max) return -EINVAL; - if (val > max - min) + if (val > max) return -EINVAL; if (val < 0) return -EINVAL; @@ -445,8 +445,15 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, return err; if (snd_soc_volsw_is_stereo(mc)) { + val2 = ucontrol->value.integer.value[1]; + + if (mc->platform_max && val2 > mc->platform_max) + return -EINVAL; + if (val2 > max) + return -EINVAL; + val_mask = mask << rshift; - val2 = (ucontrol->value.integer.value[1] + min) & mask; + val2 = (val2 + min) & mask; val2 = val2 << rshift; err = snd_soc_component_update_bits(component, reg2, val_mask, @@ -510,7 +517,15 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int val, val_mask; - int err, ret; + int err, ret, tmp; + + tmp = ucontrol->value.integer.value[0]; + if (tmp < 0) + return -EINVAL; + if (mc->platform_max && tmp > mc->platform_max) + return -EINVAL; + if (tmp > mc->max - mc->min) + return -EINVAL; if (invert) val = (max - ucontrol->value.integer.value[0]) & mask; @@ -525,6 +540,14 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, ret = err; if (snd_soc_volsw_is_stereo(mc)) { + tmp = ucontrol->value.integer.value[1]; + if (tmp < 0) + return -EINVAL; + if (mc->platform_max && tmp > mc->platform_max) + return -EINVAL; + if (tmp > mc->max - mc->min) + return -EINVAL; + if (invert) val = (max - ucontrol->value.integer.value[1]) & mask; else diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index f7aff87849fc..1f200bf49a18 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -724,11 +724,6 @@ static int soc_pcm_open(struct snd_pcm_substream *substream) ret = snd_soc_dai_startup(dai, substream); if (ret < 0) goto err; - - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - dai->tx_mask = 0; - else - dai->rx_mask = 0; } /* Dynamic PCM DAI links compat checks use dynamic capabilities */ @@ -1160,6 +1155,8 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe, return; be_substream = snd_soc_dpcm_get_substream(be, stream); + if (!be_substream) + return; for_each_dpcm_fe(be, stream, dpcm) { if (dpcm->fe == fe) diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c index f27f94ca064b..6b398ffabb02 100644 --- a/sound/soc/soc-utils.c +++ b/sound/soc/soc-utils.c @@ -171,7 +171,7 @@ int __init snd_soc_util_init(void) return ret; } -void __exit snd_soc_util_exit(void) +void snd_soc_util_exit(void) { platform_driver_unregister(&soc_dummy_driver); platform_device_unregister(soc_dummy_dev); diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c index 347636a80b48..4012097a9d60 100644 --- a/sound/soc/sof/intel/hda-loader.c +++ b/sound/soc/sof/intel/hda-loader.c @@ -79,9 +79,9 @@ out_put: } /* - * first boot sequence has some extra steps. core 0 waits for power - * status on core 1, so power up core 1 also momentarily, keep it in - * reset/stall and then turn it off + * first boot sequence has some extra steps. + * power on all host managed cores and only unstall/run the boot core to boot the + * DSP then turn off all non boot cores (if any) is powered on. */ static int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag) { @@ -115,7 +115,7 @@ static int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag) ((stream_tag - 1) << 9))); /* step 3: unset core 0 reset state & unstall/run core 0 */ - ret = hda_dsp_core_run(sdev, BIT(0)); + ret = hda_dsp_core_run(sdev, chip->init_core_mask); if (ret < 0) { if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) dev_err(sdev->dev, diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index b0faf050132d..b4cc72483137 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -39,6 +39,17 @@ #define EXCEPT_MAX_HDR_SIZE 0x400 #define HDA_EXT_ROM_STATUS_SIZE 8 +static const struct sof_intel_dsp_desc + *get_chip_info(struct snd_sof_pdata *pdata) +{ + const struct sof_dev_desc *desc = pdata->desc; + const struct sof_intel_dsp_desc *chip_info; + + chip_info = desc->chip_info; + + return chip_info; +} + #if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE) /* @@ -674,17 +685,6 @@ skip_soundwire: return 0; } -static const struct sof_intel_dsp_desc - *get_chip_info(struct snd_sof_pdata *pdata) -{ - const struct sof_dev_desc *desc = pdata->desc; - const struct sof_intel_dsp_desc *chip_info; - - chip_info = desc->chip_info; - - return chip_info; -} - static irqreturn_t hda_dsp_interrupt_handler(int irq, void *context) { struct snd_sof_dev *sdev = context; diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c index 75657a25dbc0..fe9feaab6a0a 100644 --- a/sound/soc/sof/sof-pci-dev.c +++ b/sound/soc/sof/sof-pci-dev.c @@ -75,7 +75,7 @@ static const struct dmi_system_id community_key_platforms[] = { { .ident = "Google Chromebooks", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "Google"), } }, {}, diff --git a/sound/soc/ti/j721e-evm.c b/sound/soc/ti/j721e-evm.c index 265bbc5a2f96..756cd9694cbe 100644 --- a/sound/soc/ti/j721e-evm.c +++ b/sound/soc/ti/j721e-evm.c @@ -631,17 +631,18 @@ static int j721e_soc_probe_cpb(struct j721e_priv *priv, int *link_idx, codec_node = of_parse_phandle(node, "ti,cpb-codec", 0); if (!codec_node) { dev_err(priv->dev, "CPB codec node is not provided\n"); - return -EINVAL; + ret = -EINVAL; + goto put_dai_node; } domain = &priv->audio_domains[J721E_AUDIO_DOMAIN_CPB]; ret = j721e_get_clocks(priv->dev, &domain->codec, "cpb-codec-scki"); if (ret) - return ret; + goto put_codec_node; ret = j721e_get_clocks(priv->dev, &domain->mcasp, "cpb-mcasp-auxclk"); if (ret) - return ret; + goto put_codec_node; /* * Common Processor Board, two links @@ -651,8 +652,10 @@ static int j721e_soc_probe_cpb(struct j721e_priv *priv, int *link_idx, comp_count = 6; compnent = devm_kzalloc(priv->dev, comp_count * sizeof(*compnent), GFP_KERNEL); - if (!compnent) - return -ENOMEM; + if (!compnent) { + ret = -ENOMEM; + goto put_codec_node; + } comp_idx = 0; priv->dai_links[*link_idx].cpus = &compnent[comp_idx++]; @@ -703,6 +706,12 @@ static int j721e_soc_probe_cpb(struct j721e_priv *priv, int *link_idx, (*conf_idx)++; return 0; + +put_codec_node: + of_node_put(codec_node); +put_dai_node: + of_node_put(dai_node); + return ret; } static int j721e_soc_probe_ivi(struct j721e_priv *priv, int *link_idx, @@ -727,23 +736,25 @@ static int j721e_soc_probe_ivi(struct j721e_priv *priv, int *link_idx, codeca_node = of_parse_phandle(node, "ti,ivi-codec-a", 0); if (!codeca_node) { dev_err(priv->dev, "IVI codec-a node is not provided\n"); - return -EINVAL; + ret = -EINVAL; + goto put_dai_node; } codecb_node = of_parse_phandle(node, "ti,ivi-codec-b", 0); if (!codecb_node) { dev_warn(priv->dev, "IVI codec-b node is not provided\n"); - return 0; + ret = 0; + goto put_codeca_node; } domain = &priv->audio_domains[J721E_AUDIO_DOMAIN_IVI]; ret = j721e_get_clocks(priv->dev, &domain->codec, "ivi-codec-scki"); if (ret) - return ret; + goto put_codecb_node; ret = j721e_get_clocks(priv->dev, &domain->mcasp, "ivi-mcasp-auxclk"); if (ret) - return ret; + goto put_codecb_node; /* * IVI extension, two links @@ -755,8 +766,10 @@ static int j721e_soc_probe_ivi(struct j721e_priv *priv, int *link_idx, comp_count = 8; compnent = devm_kzalloc(priv->dev, comp_count * sizeof(*compnent), GFP_KERNEL); - if (!compnent) - return -ENOMEM; + if (!compnent) { + ret = -ENOMEM; + goto put_codecb_node; + } comp_idx = 0; priv->dai_links[*link_idx].cpus = &compnent[comp_idx++]; @@ -817,6 +830,15 @@ static int j721e_soc_probe_ivi(struct j721e_priv *priv, int *link_idx, (*conf_idx)++; return 0; + + +put_codecb_node: + of_node_put(codecb_node); +put_codeca_node: + of_node_put(codeca_node); +put_dai_node: + of_node_put(dai_node); + return ret; } static int j721e_soc_probe(struct platform_device *pdev) diff --git a/sound/synth/emux/emux.c b/sound/synth/emux/emux.c index 6695530bba9b..c60ff81390a4 100644 --- a/sound/synth/emux/emux.c +++ b/sound/synth/emux/emux.c @@ -125,15 +125,10 @@ EXPORT_SYMBOL(snd_emux_register); */ int snd_emux_free(struct snd_emux *emu) { - unsigned long flags; - if (! emu) return -EINVAL; - spin_lock_irqsave(&emu->voice_lock, flags); - if (emu->timer_active) - del_timer(&emu->tlist); - spin_unlock_irqrestore(&emu->voice_lock, flags); + del_timer_sync(&emu->tlist); snd_emux_proc_free(emu); snd_emux_delete_virmidi(emu); diff --git a/sound/usb/bcd2000/bcd2000.c b/sound/usb/bcd2000/bcd2000.c index 010976d9ceb2..01f0b329797c 100644 --- a/sound/usb/bcd2000/bcd2000.c +++ b/sound/usb/bcd2000/bcd2000.c @@ -348,7 +348,8 @@ static int bcd2000_init_midi(struct bcd2000 *bcd2k) static void bcd2000_free_usb_related_resources(struct bcd2000 *bcd2k, struct usb_interface *interface) { - /* usb_kill_urb not necessary, urb is aborted automatically */ + usb_kill_urb(bcd2k->midi_out_urb); + usb_kill_urb(bcd2k->midi_in_urb); usb_free_urb(bcd2k->midi_out_urb); usb_free_urb(bcd2k->midi_in_urb); diff --git a/sound/usb/card.c b/sound/usb/card.c index 1b601c92e0d4..ee5d28ad8968 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -558,6 +558,14 @@ static const struct usb_audio_device_name usb_audio_names[] = { DEVICE_NAME(0x046d, 0x0990, "Logitech, Inc.", "QuickCam Pro 9000"), + /* ASUS ROG Zenith II: this machine has also two devices, one for + * the front headphone and another for the rest + */ + PROFILE_NAME(0x0b05, 0x1915, "ASUS", "Zenith II Front Headphone", + "Zenith-II-Front-Headphone"), + PROFILE_NAME(0x0b05, 0x1916, "ASUS", "Zenith II Main Audio", + "Zenith-II-Main-Audio"), + /* ASUS ROG Strix */ PROFILE_NAME(0x0b05, 0x1917, "Realtek", "ALC1220-VB-DT", "Realtek-ALC1220-VB-Desktop"), @@ -840,7 +848,7 @@ static bool check_delayed_register_option(struct snd_usb_audio *chip, int iface) if (delayed_register[i] && sscanf(delayed_register[i], "%x:%x", &id, &inum) == 2 && id == chip->usb_id) - return inum != iface; + return iface < inum; } return false; diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 8527267725bb..80dcac5abe0c 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -73,12 +73,13 @@ static inline unsigned get_usb_high_speed_rate(unsigned int rate) */ static void release_urb_ctx(struct snd_urb_ctx *u) { - if (u->buffer_size) + if (u->urb && u->buffer_size) usb_free_coherent(u->ep->chip->dev, u->buffer_size, u->urb->transfer_buffer, u->urb->transfer_dma); usb_free_urb(u->urb); u->urb = NULL; + u->buffer_size = 0; } static const char *usb_error_string(int err) @@ -998,6 +999,7 @@ static int sync_ep_set_params(struct snd_usb_endpoint *ep) if (!ep->syncbuf) return -ENOMEM; + ep->nurbs = SYNC_URBS; for (i = 0; i < SYNC_URBS; i++) { struct snd_urb_ctx *u = &ep->urb[i]; u->index = i; @@ -1017,8 +1019,6 @@ static int sync_ep_set_params(struct snd_usb_endpoint *ep) u->urb->complete = snd_complete_urb; } - ep->nurbs = SYNC_URBS; - return 0; out_of_memory: diff --git a/sound/usb/midi.c b/sound/usb/midi.c index fa91290ad89d..b02e1a33304f 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -1149,10 +1149,8 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) port = &umidi->endpoints[i].out->ports[j]; break; } - if (!port) { - snd_BUG(); + if (!port) return -ENXIO; - } substream->runtime->private_data = port; port->state = STATE_UNKNOWN; @@ -1161,6 +1159,9 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream) { + struct usbmidi_out_port *port = substream->runtime->private_data; + + cancel_work_sync(&port->ep->work); return substream_open(substream, 0, 0); } @@ -1210,6 +1211,7 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream) } while (drain_urbs && timeout); finish_wait(&ep->drain_wait, &wait); } + port->active = 0; spin_unlock_irq(&ep->buffer_lock); } diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c index 81ace832d7e4..b708a240a5f0 100644 --- a/sound/usb/mixer_maps.c +++ b/sound/usb/mixer_maps.c @@ -367,13 +367,28 @@ static const struct usbmix_name_map corsair_virtuoso_map[] = { { 0 } }; -/* Some mobos shipped with a dummy HD-audio show the invalid GET_MIN/GET_MAX - * response for Input Gain Pad (id=19, control=12) and the connector status - * for SPDIF terminal (id=18). Skip them. - */ -static const struct usbmix_name_map asus_rog_map[] = { - { 18, NULL }, /* OT, connector control */ - { 19, NULL, 12 }, /* FU, Input Gain Pad */ +/* ASUS ROG Zenith II with Realtek ALC1220-VB */ +static const struct usbmix_name_map asus_zenith_ii_map[] = { + { 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */ + { 16, "Speaker" }, /* OT */ + { 22, "Speaker Playback" }, /* FU */ + { 7, "Line" }, /* IT */ + { 19, "Line Capture" }, /* FU */ + { 8, "Mic" }, /* IT */ + { 20, "Mic Capture" }, /* FU */ + { 9, "Front Mic" }, /* IT */ + { 21, "Front Mic Capture" }, /* FU */ + { 17, "IEC958" }, /* OT */ + { 23, "IEC958 Playback" }, /* FU */ + {} +}; + +static const struct usbmix_connector_map asus_zenith_ii_connector_map[] = { + { 10, 16 }, /* (Back) Speaker */ + { 11, 17 }, /* SPDIF */ + { 13, 7 }, /* Line */ + { 14, 8 }, /* Mic */ + { 15, 9 }, /* Front Mic */ {} }; @@ -590,9 +605,10 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = { .map = trx40_mobo_map, .connector_map = trx40_mobo_connector_map, }, - { /* ASUS ROG Zenith II */ + { /* ASUS ROG Zenith II (main audio) */ .id = USB_ID(0x0b05, 0x1916), - .map = asus_rog_map, + .map = asus_zenith_ii_map, + .connector_map = asus_zenith_ii_connector_map, }, { /* ASUS ROG Strix */ .id = USB_ID(0x0b05, 0x1917), diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index aabd3a10ec5b..6a78813b63f5 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -2028,6 +2028,10 @@ YAMAHA_DEVICE(0x7010, "UB99"), } } }, +{ + /* M-Audio Micro */ + USB_DEVICE_VENDOR_SPEC(0x0763, 0x201a), +}, { USB_DEVICE_VENDOR_SPEC(0x0763, 0x2030), .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { @@ -3208,6 +3212,15 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), } }, +/* Rane SL-1 */ +{ + USB_DEVICE(0x13e5, 0x0001), + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + } +}, + /* disabled due to regression for other devices; * see https://bugzilla.kernel.org/show_bug.cgi?id=199905 */ @@ -3647,6 +3660,58 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), } }, +/* + * MacroSilicon MS2100/MS2106 based AV capture cards + * + * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch. + * They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if + * they pretend to be 96kHz mono as a workaround for stereo being broken + * by that... + * + * They also have an issue with initial stream alignment that causes the + * channels to be swapped and out of phase, which is dealt with in quirks.c. + */ +{ + USB_AUDIO_DEVICE(0x534d, 0x0021), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "MacroSilicon", + .product_name = "MS210x", + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = &(const struct snd_usb_audio_quirk[]) { + { + .ifnum = 2, + .type = QUIRK_AUDIO_ALIGN_TRANSFER, + }, + { + .ifnum = 2, + .type = QUIRK_AUDIO_STANDARD_MIXER, + }, + { + .ifnum = 3, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels = 2, + .iface = 3, + .altsetting = 1, + .altset_idx = 1, + .attributes = 0, + .endpoint = 0x82, + .ep_attr = USB_ENDPOINT_XFER_ISOC | + USB_ENDPOINT_SYNC_ASYNC, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .rate_min = 48000, + .rate_max = 48000, + } + }, + { + .ifnum = -1 + } + } + } +}, + /* * MacroSilicon MS2109 based HDMI capture cards * diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 6333a2ecb848..752422147fb3 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1508,6 +1508,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs, case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */ pioneer_djm_set_format_quirk(subs); break; + case USB_ID(0x534d, 0x0021): /* MacroSilicon MS2100/MS2106 */ case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */ subs->stream_offset_adj = 2; break; @@ -1743,6 +1744,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, /* XMOS based USB DACs */ switch (chip->usb_id) { case USB_ID(0x1511, 0x0037): /* AURALiC VEGA */ + case USB_ID(0x21ed, 0xd75a): /* Accuphase DAC-60 option card */ case USB_ID(0x2522, 0x0012): /* LH Labs VI DAC Infinity */ case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */ if (fp->altsetting == 2) @@ -1911,7 +1913,7 @@ bool snd_usb_registration_quirk(struct snd_usb_audio *chip, int iface) for (q = registration_quirks; q->usb_id; q++) if (chip->usb_id == q->usb_id) - return iface != q->interface; + return iface < q->interface; /* Register as normal */ return false; diff --git a/sound/usb/stream.c b/sound/usb/stream.c index bf88933acc4f..d1a8dd56cb61 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -500,6 +500,10 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip, return 0; } } + + if (chip->card->registered) + chip->need_delayed_register = true; + /* look for an empty stream */ list_for_each_entry(as, &chip->pcm_list, list) { if (as->fmt_type != fp->fmt_type) @@ -507,9 +511,6 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip, subs = &as->substream[stream]; if (subs->ep_num) continue; - if (snd_device_get_state(chip->card, as->pcm) != - SNDRV_DEV_BUILD) - chip->need_delayed_register = true; err = snd_pcm_new_stream(as->pcm, stream, 1); if (err < 0) return err; @@ -1110,7 +1111,7 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip, * Dallas DS4201 workaround: It presents 5 altsettings, but the last * one misses syncpipe, and does not produce any sound. */ - if (chip->usb_id == USB_ID(0x04fa, 0x4201)) + if (chip->usb_id == USB_ID(0x04fa, 0x4201) && num >= 4) num = 4; for (i = 0; i < num; i++) { diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index 6d40b46c3c13..0b8d5c18e4fd 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -8,7 +8,7 @@ */ /* handling of USB vendor/product ID pairs as 32-bit numbers */ -#define USB_ID(vendor, product) (((vendor) << 16) | (product)) +#define USB_ID(vendor, product) (((unsigned int)(vendor) << 16) | (product)) #define USB_ID_VENDOR(id) ((id) >> 16) #define USB_ID_PRODUCT(id) ((u16)(id)) diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index b58730cc12e8..ec53f52a06a5 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -203,8 +203,8 @@ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ -#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ -#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCEs for Spectre variant 2 */ +#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */ +#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ @@ -290,6 +290,13 @@ #define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */ #define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */ #define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */ +#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */ +#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */ +#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ +#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ +#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */ +#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */ +#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM-Exit when EIBRS is enabled */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ @@ -308,6 +315,7 @@ #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ +#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ @@ -417,5 +425,7 @@ #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ +#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ +#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index 5861d34f9771..d109c5ec967c 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h @@ -56,6 +56,25 @@ # define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) #endif +#ifdef CONFIG_RETPOLINE +# define DISABLE_RETPOLINE 0 +#else +# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \ + (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31))) +#endif + +#ifdef CONFIG_RETHUNK +# define DISABLE_RETHUNK 0 +#else +# define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31)) +#endif + +#ifdef CONFIG_CPU_UNRET_ENTRY +# define DISABLE_UNRET 0 +#else +# define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31)) +#endif + #ifdef CONFIG_IOMMU_SUPPORT # define DISABLE_ENQCMD 0 #else @@ -76,7 +95,7 @@ #define DISABLED_MASK8 0 #define DISABLED_MASK9 (DISABLE_SMAP) #define DISABLED_MASK10 0 -#define DISABLED_MASK11 0 +#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET) #define DISABLED_MASK12 0 #define DISABLED_MASK13 0 #define DISABLED_MASK14 0 diff --git a/tools/arch/x86/include/asm/inat.h b/tools/arch/x86/include/asm/inat.h index 877827b7c2c3..a61051400311 100644 --- a/tools/arch/x86/include/asm/inat.h +++ b/tools/arch/x86/include/asm/inat.h @@ -6,7 +6,7 @@ * * Written by Masami Hiramatsu */ -#include "inat_types.h" +#include "inat_types.h" /* __ignore_sync_check__ */ /* * Internal bits. Don't use bitmasks directly, because these bits are diff --git a/tools/arch/x86/include/asm/insn.h b/tools/arch/x86/include/asm/insn.h index 52c6262e6bfd..636ec02793a7 100644 --- a/tools/arch/x86/include/asm/insn.h +++ b/tools/arch/x86/include/asm/insn.h @@ -8,7 +8,7 @@ */ /* insn_attr_t is defined in inat.h */ -#include "inat.h" +#include "inat.h" /* __ignore_sync_check__ */ struct insn_field { union { @@ -87,13 +87,25 @@ struct insn { #define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */ extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64); -extern void insn_get_prefixes(struct insn *insn); -extern void insn_get_opcode(struct insn *insn); -extern void insn_get_modrm(struct insn *insn); -extern void insn_get_sib(struct insn *insn); -extern void insn_get_displacement(struct insn *insn); -extern void insn_get_immediate(struct insn *insn); -extern void insn_get_length(struct insn *insn); +extern int insn_get_prefixes(struct insn *insn); +extern int insn_get_opcode(struct insn *insn); +extern int insn_get_modrm(struct insn *insn); +extern int insn_get_sib(struct insn *insn); +extern int insn_get_displacement(struct insn *insn); +extern int insn_get_immediate(struct insn *insn); +extern int insn_get_length(struct insn *insn); + +enum insn_mode { + INSN_MODE_32, + INSN_MODE_64, + /* Mode is determined by the current kernel build. */ + INSN_MODE_KERN, + INSN_NUM_MODES, +}; + +extern int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m); + +#define insn_decode_kernel(_insn, _ptr) insn_decode((_insn), (_ptr), MAX_INSN_SIZE, INSN_MODE_KERN) /* Attribute will be determined after getting ModRM (for opcode groups) */ static inline void insn_get_attribute(struct insn *insn) diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 972a34d93505..8fb925676813 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -51,6 +51,8 @@ #define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */ #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ #define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ +#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */ +#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT) #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ #define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ @@ -91,6 +93,7 @@ #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a #define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */ #define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */ +#define ARCH_CAP_RSBA BIT(2) /* RET may use alternative branch predictors */ #define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */ #define ARCH_CAP_SSB_NO BIT(4) /* * Not susceptible to Speculative Store Bypass @@ -114,6 +117,41 @@ * Not susceptible to * TSX Async Abort (TAA) vulnerabilities. */ +#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /* + * Not susceptible to SBDR and SSDP + * variants of Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FBSDP_NO BIT(14) /* + * Not susceptible to FBSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_PSDP_NO BIT(15) /* + * Not susceptible to PSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FB_CLEAR BIT(17) /* + * VERW clears CPU fill buffer + * even on MDS_NO CPUs. + */ +#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /* + * MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS] + * bit available to control VERW + * behavior. + */ +#define ARCH_CAP_RRSBA BIT(19) /* + * Indicates RET may use predictors + * other than the RSB. With eIBRS + * enabled predictions in kernel mode + * are restricted to targets in + * kernel. + */ +#define ARCH_CAP_PBRSB_NO BIT(24) /* + * Not susceptible to Post-Barrier + * Return Stack Buffer Predictions. + */ #define MSR_IA32_FLUSH_CMD 0x0000010b #define L1D_FLUSH BIT(0) /* @@ -131,6 +169,7 @@ /* SRBDS support */ #define MSR_IA32_MCU_OPT_CTRL 0x00000123 #define RNGDS_MITG_DIS BIT(0) +#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 @@ -450,6 +489,11 @@ #define MSR_AMD64_CPUID_FN_1 0xc0011004 #define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_DC_CFG 0xc0011022 + +#define MSR_AMD64_DE_CFG 0xc0011029 +#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 +#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) + #define MSR_AMD64_BU_CFG2 0xc001102a #define MSR_AMD64_IBSFETCHCTL 0xc0011030 #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 @@ -482,6 +526,9 @@ /* Fam 17h MSRs */ #define MSR_F17H_IRPERF 0xc00000e9 +#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3 +#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1) + /* Fam 16h MSRs */ #define MSR_F16H_L2I_PERF_CTL 0xc0010230 #define MSR_F16H_L2I_PERF_CTR 0xc0010231 @@ -523,9 +570,6 @@ #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL #define FAM10H_MMIO_CONF_BASE_SHIFT 20 #define MSR_FAM10H_NODE_ID 0xc001100c -#define MSR_F10H_DECFG 0xc0011029 -#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1 -#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT) /* K8 MSRs */ #define MSR_K8_TOP_MEM1 0xc001001a diff --git a/tools/arch/x86/lib/inat.c b/tools/arch/x86/lib/inat.c index 4f5ed49e1b4e..dfbcc6405941 100644 --- a/tools/arch/x86/lib/inat.c +++ b/tools/arch/x86/lib/inat.c @@ -4,7 +4,7 @@ * * Written by Masami Hiramatsu */ -#include "../include/asm/insn.h" +#include "../include/asm/insn.h" /* __ignore_sync_check__ */ /* Attribute tables are generated from opcode map */ #include "inat-tables.c" diff --git a/tools/arch/x86/lib/insn.c b/tools/arch/x86/lib/insn.c index 0151dfc6da61..f24cc0f618e4 100644 --- a/tools/arch/x86/lib/insn.c +++ b/tools/arch/x86/lib/insn.c @@ -10,10 +10,13 @@ #else #include #endif -#include "../include/asm/inat.h" -#include "../include/asm/insn.h" +#include "../include/asm/inat.h" /* __ignore_sync_check__ */ +#include "../include/asm/insn.h" /* __ignore_sync_check__ */ -#include "../include/asm/emulate_prefix.h" +#include +#include + +#include "../include/asm/emulate_prefix.h" /* __ignore_sync_check__ */ /* Verify next sizeof(t) bytes can be on the same instruction */ #define validate_next(t, insn, n) \ @@ -97,8 +100,12 @@ static void insn_get_emulate_prefix(struct insn *insn) * Populates the @insn->prefixes bitmap, and updates @insn->next_byte * to point to the (first) opcode. No effect if @insn->prefixes.got * is already set. + * + * * Returns: + * 0: on success + * < 0: on error */ -void insn_get_prefixes(struct insn *insn) +int insn_get_prefixes(struct insn *insn) { struct insn_field *prefixes = &insn->prefixes; insn_attr_t attr; @@ -106,7 +113,7 @@ void insn_get_prefixes(struct insn *insn) int i, nb; if (prefixes->got) - return; + return 0; insn_get_emulate_prefix(insn); @@ -217,8 +224,10 @@ vex_end: prefixes->got = 1; + return 0; + err_out: - return; + return -ENODATA; } /** @@ -230,16 +239,25 @@ err_out: * If necessary, first collects any preceding (prefix) bytes. * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got * is already 1. + * + * Returns: + * 0: on success + * < 0: on error */ -void insn_get_opcode(struct insn *insn) +int insn_get_opcode(struct insn *insn) { struct insn_field *opcode = &insn->opcode; + int pfx_id, ret; insn_byte_t op; - int pfx_id; + if (opcode->got) - return; - if (!insn->prefixes.got) - insn_get_prefixes(insn); + return 0; + + if (!insn->prefixes.got) { + ret = insn_get_prefixes(insn); + if (ret) + return ret; + } /* Get first opcode */ op = get_next(insn_byte_t, insn); @@ -254,9 +272,13 @@ void insn_get_opcode(struct insn *insn) insn->attr = inat_get_avx_attribute(op, m, p); if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) || (!inat_accept_vex(insn->attr) && - !inat_is_group(insn->attr))) - insn->attr = 0; /* This instruction is bad */ - goto end; /* VEX has only 1 byte for opcode */ + !inat_is_group(insn->attr))) { + /* This instruction is bad */ + insn->attr = 0; + return -EINVAL; + } + /* VEX has only 1 byte for opcode */ + goto end; } insn->attr = inat_get_opcode_attribute(op); @@ -267,13 +289,18 @@ void insn_get_opcode(struct insn *insn) pfx_id = insn_last_prefix_id(insn); insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr); } - if (inat_must_vex(insn->attr)) - insn->attr = 0; /* This instruction is bad */ + + if (inat_must_vex(insn->attr)) { + /* This instruction is bad */ + insn->attr = 0; + return -EINVAL; + } end: opcode->got = 1; + return 0; err_out: - return; + return -ENODATA; } /** @@ -283,15 +310,25 @@ err_out: * Populates @insn->modrm and updates @insn->next_byte to point past the * ModRM byte, if any. If necessary, first collects the preceding bytes * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1. + * + * Returns: + * 0: on success + * < 0: on error */ -void insn_get_modrm(struct insn *insn) +int insn_get_modrm(struct insn *insn) { struct insn_field *modrm = &insn->modrm; insn_byte_t pfx_id, mod; + int ret; + if (modrm->got) - return; - if (!insn->opcode.got) - insn_get_opcode(insn); + return 0; + + if (!insn->opcode.got) { + ret = insn_get_opcode(insn); + if (ret) + return ret; + } if (inat_has_modrm(insn->attr)) { mod = get_next(insn_byte_t, insn); @@ -301,17 +338,22 @@ void insn_get_modrm(struct insn *insn) pfx_id = insn_last_prefix_id(insn); insn->attr = inat_get_group_attribute(mod, pfx_id, insn->attr); - if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) - insn->attr = 0; /* This is bad */ + if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) { + /* Bad insn */ + insn->attr = 0; + return -EINVAL; + } } } if (insn->x86_64 && inat_is_force64(insn->attr)) insn->opnd_bytes = 8; + modrm->got = 1; + return 0; err_out: - return; + return -ENODATA; } @@ -325,11 +367,16 @@ err_out: int insn_rip_relative(struct insn *insn) { struct insn_field *modrm = &insn->modrm; + int ret; if (!insn->x86_64) return 0; - if (!modrm->got) - insn_get_modrm(insn); + + if (!modrm->got) { + ret = insn_get_modrm(insn); + if (ret) + return 0; + } /* * For rip-relative instructions, the mod field (top 2 bits) * is zero and the r/m field (bottom 3 bits) is 0x5. @@ -343,15 +390,25 @@ int insn_rip_relative(struct insn *insn) * * If necessary, first collects the instruction up to and including the * ModRM byte. + * + * Returns: + * 0: if decoding succeeded + * < 0: otherwise. */ -void insn_get_sib(struct insn *insn) +int insn_get_sib(struct insn *insn) { insn_byte_t modrm; + int ret; if (insn->sib.got) - return; - if (!insn->modrm.got) - insn_get_modrm(insn); + return 0; + + if (!insn->modrm.got) { + ret = insn_get_modrm(insn); + if (ret) + return ret; + } + if (insn->modrm.nbytes) { modrm = (insn_byte_t)insn->modrm.value; if (insn->addr_bytes != 2 && @@ -362,8 +419,10 @@ void insn_get_sib(struct insn *insn) } insn->sib.got = 1; + return 0; + err_out: - return; + return -ENODATA; } @@ -374,15 +433,25 @@ err_out: * If necessary, first collects the instruction up to and including the * SIB byte. * Displacement value is sign-expanded. + * + * * Returns: + * 0: if decoding succeeded + * < 0: otherwise. */ -void insn_get_displacement(struct insn *insn) +int insn_get_displacement(struct insn *insn) { insn_byte_t mod, rm, base; + int ret; if (insn->displacement.got) - return; - if (!insn->sib.got) - insn_get_sib(insn); + return 0; + + if (!insn->sib.got) { + ret = insn_get_sib(insn); + if (ret) + return ret; + } + if (insn->modrm.nbytes) { /* * Interpreting the modrm byte: @@ -425,9 +494,10 @@ void insn_get_displacement(struct insn *insn) } out: insn->displacement.got = 1; + return 0; err_out: - return; + return -ENODATA; } /* Decode moffset16/32/64. Return 0 if failed */ @@ -538,20 +608,30 @@ err_out: } /** - * insn_get_immediate() - Get the immediates of instruction + * insn_get_immediate() - Get the immediate in an instruction * @insn: &struct insn containing instruction * * If necessary, first collects the instruction up to and including the * displacement bytes. * Basically, most of immediates are sign-expanded. Unsigned-value can be - * get by bit masking with ((1 << (nbytes * 8)) - 1) + * computed by bit masking with ((1 << (nbytes * 8)) - 1) + * + * Returns: + * 0: on success + * < 0: on error */ -void insn_get_immediate(struct insn *insn) +int insn_get_immediate(struct insn *insn) { + int ret; + if (insn->immediate.got) - return; - if (!insn->displacement.got) - insn_get_displacement(insn); + return 0; + + if (!insn->displacement.got) { + ret = insn_get_displacement(insn); + if (ret) + return ret; + } if (inat_has_moffset(insn->attr)) { if (!__get_moffset(insn)) @@ -604,9 +684,10 @@ void insn_get_immediate(struct insn *insn) } done: insn->immediate.got = 1; + return 0; err_out: - return; + return -ENODATA; } /** @@ -615,13 +696,58 @@ err_out: * * If necessary, first collects the instruction up to and including the * immediates bytes. - */ -void insn_get_length(struct insn *insn) + * + * Returns: + * - 0 on success + * - < 0 on error +*/ +int insn_get_length(struct insn *insn) { + int ret; + if (insn->length) - return; - if (!insn->immediate.got) - insn_get_immediate(insn); + return 0; + + if (!insn->immediate.got) { + ret = insn_get_immediate(insn); + if (ret) + return ret; + } + insn->length = (unsigned char)((unsigned long)insn->next_byte - (unsigned long)insn->kaddr); + + return 0; +} + +/** + * insn_decode() - Decode an x86 instruction + * @insn: &struct insn to be initialized + * @kaddr: address (in kernel memory) of instruction (or copy thereof) + * @buf_len: length of the insn buffer at @kaddr + * @m: insn mode, see enum insn_mode + * + * Returns: + * 0: if decoding succeeded + * < 0: otherwise. + */ +int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m) +{ + int ret; + +#define INSN_MODE_KERN (enum insn_mode)-1 /* __ignore_sync_check__ mode is only valid in the kernel */ + + if (m == INSN_MODE_KERN) + insn_init(insn, kaddr, buf_len, IS_ENABLED(CONFIG_X86_64)); + else + insn_init(insn, kaddr, buf_len, m == INSN_MODE_64); + + ret = insn_get_length(insn); + if (ret) + return ret; + + if (insn_complete(insn)) + return 0; + + return -EINVAL; } diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S index 1e299ac73c86..59cf2343f3d9 100644 --- a/tools/arch/x86/lib/memcpy_64.S +++ b/tools/arch/x86/lib/memcpy_64.S @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include .pushsection .noinstr.text, "ax" @@ -39,7 +39,7 @@ SYM_FUNC_START_WEAK(memcpy) rep movsq movl %edx, %ecx rep movsb - ret + RET SYM_FUNC_END(memcpy) SYM_FUNC_END_ALIAS(__memcpy) EXPORT_SYMBOL(memcpy) @@ -53,7 +53,7 @@ SYM_FUNC_START_LOCAL(memcpy_erms) movq %rdi, %rax movq %rdx, %rcx rep movsb - ret + RET SYM_FUNC_END(memcpy_erms) SYM_FUNC_START_LOCAL(memcpy_orig) @@ -137,7 +137,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig) movq %r9, 1*8(%rdi) movq %r10, -2*8(%rdi, %rdx) movq %r11, -1*8(%rdi, %rdx) - retq + RET .p2align 4 .Lless_16bytes: cmpl $8, %edx @@ -149,7 +149,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig) movq -1*8(%rsi, %rdx), %r9 movq %r8, 0*8(%rdi) movq %r9, -1*8(%rdi, %rdx) - retq + RET .p2align 4 .Lless_8bytes: cmpl $4, %edx @@ -162,7 +162,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig) movl -4(%rsi, %rdx), %r8d movl %ecx, (%rdi) movl %r8d, -4(%rdi, %rdx) - retq + RET .p2align 4 .Lless_3bytes: subl $1, %edx @@ -180,7 +180,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig) movb %cl, (%rdi) .Lend: - retq + RET SYM_FUNC_END(memcpy_orig) .popsection diff --git a/tools/arch/x86/lib/memset_64.S b/tools/arch/x86/lib/memset_64.S index 0bfd26e4ca9e..d624f2bc42f1 100644 --- a/tools/arch/x86/lib/memset_64.S +++ b/tools/arch/x86/lib/memset_64.S @@ -3,7 +3,7 @@ #include #include -#include +#include #include /* @@ -40,7 +40,7 @@ SYM_FUNC_START(__memset) movl %edx,%ecx rep stosb movq %r9,%rax - ret + RET SYM_FUNC_END(__memset) SYM_FUNC_END_ALIAS(memset) EXPORT_SYMBOL(memset) @@ -63,7 +63,7 @@ SYM_FUNC_START_LOCAL(memset_erms) movq %rdx,%rcx rep stosb movq %r9,%rax - ret + RET SYM_FUNC_END(memset_erms) SYM_FUNC_START_LOCAL(memset_orig) @@ -125,7 +125,7 @@ SYM_FUNC_START_LOCAL(memset_orig) .Lende: movq %r10,%rax - ret + RET .Lbad_alignment: cmpq $7,%rdx diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index 0e9310727281..13be48763199 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c @@ -416,7 +416,7 @@ static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset, *(char *)data); break; case BTF_INT_BOOL: - jsonw_bool(jw, *(int *)data); + jsonw_bool(jw, *(bool *)data); break; default: /* shouldn't happen */ diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 6ebf2b215ef4..eefa2b34e641 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -271,6 +271,9 @@ int do_pin_any(int argc, char **argv, int (*get_fd)(int *, char ***)) int err; int fd; + if (!REQ_ARGS(3)) + return -EINVAL; + fd = get_fd(&argc, &argv); if (fd < 0) return fd; diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index 1854d6b97860..4fd4e3462ebc 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -398,6 +398,16 @@ int main(int argc, char **argv) setlinebuf(stdout); +#ifdef USE_LIBCAP + /* Libcap < 2.63 hooks before main() to compute the number of + * capabilities of the running kernel, and doing so it calls prctl() + * which may fail and set errno to non-zero. + * Let's reset errno to make sure this does not interfere with the + * batch mode. + */ + errno = 0; +#endif + last_do_help = do_help; pretty_output = false; json_output = false; diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 22ea350dab58..221250973d07 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -210,9 +210,16 @@ strip-libs = $(filter-out -l%,$(1)) PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null) PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS)) PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS)) -PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null` +PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null) FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS) +ifeq ($(CC_NO_CLANG), 0) + PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS)) + PERL_EMBED_CCOPTS := $(filter-out -flto=auto -ffat-lto-objects, $(PERL_EMBED_CCOPTS)) + PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS)) + FLAGS_PERL_EMBED += -Wno-compound-token-split-by-macro +endif + $(OUTPUT)test-libperl.bin: $(BUILD) $(FLAGS_PERL_EMBED) diff --git a/tools/build/feature/test-libcrypto.c b/tools/build/feature/test-libcrypto.c index a98174e0569c..bc34a5bbb504 100644 --- a/tools/build/feature/test-libcrypto.c +++ b/tools/build/feature/test-libcrypto.c @@ -1,16 +1,23 @@ // SPDX-License-Identifier: GPL-2.0 +#include #include #include int main(void) { - MD5_CTX context; + EVP_MD_CTX *mdctx; unsigned char md[MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH]; unsigned char dat[] = "12345"; + unsigned int digest_len; - MD5_Init(&context); - MD5_Update(&context, &dat[0], sizeof(dat)); - MD5_Final(&md[0], &context); + mdctx = EVP_MD_CTX_new(); + if (!mdctx) + return 0; + + EVP_DigestInit_ex(mdctx, EVP_md5(), NULL); + EVP_DigestUpdate(mdctx, &dat[0], sizeof(dat)); + EVP_DigestFinal_ex(mdctx, &md[0], &digest_len); + EVP_MD_CTX_free(mdctx); SHA1(&dat[0], sizeof(dat), &md[0]); diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c index 7399eb7f1378..d66b18c54606 100644 --- a/tools/iio/iio_utils.c +++ b/tools/iio/iio_utils.c @@ -543,6 +543,10 @@ static int calc_digits(int num) { int count = 0; + /* It takes a digit to represent zero */ + if (!num) + return 1; + while (num != 0) { num /= 10; count++; diff --git a/tools/include/asm/alternative-asm.h b/tools/include/asm/alternative.h similarity index 100% rename from tools/include/asm/alternative-asm.h rename to tools/include/asm/alternative.h diff --git a/tools/include/linux/kconfig.h b/tools/include/linux/kconfig.h new file mode 100644 index 000000000000..13b86bd3b746 --- /dev/null +++ b/tools/include/linux/kconfig.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _TOOLS_LINUX_KCONFIG_H +#define _TOOLS_LINUX_KCONFIG_H + +/* CONFIG_CC_VERSION_TEXT (Do not delete this comment. See help in Kconfig) */ + +#define __ARG_PLACEHOLDER_1 0, +#define __take_second_arg(__ignored, val, ...) val + +/* + * The use of "&&" / "||" is limited in certain expressions. + * The following enable to calculate "and" / "or" with macro expansion only. + */ +#define __and(x, y) ___and(x, y) +#define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y) +#define ____and(arg1_or_junk, y) __take_second_arg(arg1_or_junk y, 0) + +#define __or(x, y) ___or(x, y) +#define ___or(x, y) ____or(__ARG_PLACEHOLDER_##x, y) +#define ____or(arg1_or_junk, y) __take_second_arg(arg1_or_junk 1, y) + +/* + * Helper macros to use CONFIG_ options in C/CPP expressions. Note that + * these only work with boolean and tristate options. + */ + +/* + * Getting something that works in C and CPP for an arg that may or may + * not be defined is tricky. Here, if we have "#define CONFIG_BOOGER 1" + * we match on the placeholder define, insert the "0," for arg1 and generate + * the triplet (0, 1, 0). Then the last step cherry picks the 2nd arg (a one). + * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when + * the last step cherry picks the 2nd arg, we get a zero. + */ +#define __is_defined(x) ___is_defined(x) +#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val) +#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0) + +/* + * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0 + * otherwise. For boolean options, this is equivalent to + * IS_ENABLED(CONFIG_FOO). + */ +#define IS_BUILTIN(option) __is_defined(option) + +/* + * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 + * otherwise. + */ +#define IS_MODULE(option) __is_defined(option##_MODULE) + +/* + * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled + * code can call a function defined in code compiled based on CONFIG_FOO. + * This is similar to IS_ENABLED(), but returns false when invoked from + * built-in code when CONFIG_FOO is set to 'm'. + */ +#define IS_REACHABLE(option) __or(IS_BUILTIN(option), \ + __and(IS_MODULE(option), __is_defined(MODULE))) + +/* + * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', + * 0 otherwise. + */ +#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) + +#endif /* _TOOLS_LINUX_KCONFIG_H */ diff --git a/tools/include/linux/objtool.h b/tools/include/linux/objtool.h index 577f51436cf9..662f19374bd9 100644 --- a/tools/include/linux/objtool.h +++ b/tools/include/linux/objtool.h @@ -29,11 +29,19 @@ struct unwind_hint { * * UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that * sp_reg+sp_offset points to the iret return frame. + * + * UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function. + * Useful for code which doesn't have an ELF function annotation. + * + * UNWIND_HINT_ENTRY: machine entry without stack, SYSCALL/SYSENTER etc. */ #define UNWIND_HINT_TYPE_CALL 0 #define UNWIND_HINT_TYPE_REGS 1 #define UNWIND_HINT_TYPE_REGS_PARTIAL 2 -#define UNWIND_HINT_TYPE_RET_OFFSET 3 +#define UNWIND_HINT_TYPE_FUNC 3 +#define UNWIND_HINT_TYPE_ENTRY 4 +#define UNWIND_HINT_TYPE_SAVE 5 +#define UNWIND_HINT_TYPE_RESTORE 6 #ifdef CONFIG_STACK_VALIDATION @@ -96,7 +104,7 @@ struct unwind_hint { * the debuginfo as necessary. It will also warn if it sees any * inconsistencies. */ -.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0 +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 .Lunwind_hint_ip_\@: .pushsection .discard.unwind_hints /* struct unwind_hint */ @@ -120,7 +128,7 @@ struct unwind_hint { #define STACK_FRAME_NON_STANDARD(func) #else #define ANNOTATE_INTRA_FUNCTION_CALL -.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0 +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 .endm #endif diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index b8cecb66d28b..c20d2fe7ceba 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -2318,9 +2318,9 @@ static __attribute__((unused)) int memcmp(const void *s1, const void *s2, size_t n) { size_t ofs = 0; - char c1 = 0; + int c1 = 0; - while (ofs < n && !(c1 = ((char *)s1)[ofs] - ((char *)s2)[ofs])) { + while (ofs < n && !(c1 = ((unsigned char *)s1)[ofs] - ((unsigned char *)s2)[ofs])) { ofs++; } return c1; diff --git a/tools/include/uapi/asm/errno.h b/tools/include/uapi/asm/errno.h index d30439b4b8ab..869379f91fe4 100644 --- a/tools/include/uapi/asm/errno.h +++ b/tools/include/uapi/asm/errno.h @@ -9,8 +9,8 @@ #include "../../../arch/alpha/include/uapi/asm/errno.h" #elif defined(__mips__) #include "../../../arch/mips/include/uapi/asm/errno.h" -#elif defined(__xtensa__) -#include "../../../arch/xtensa/include/uapi/asm/errno.h" +#elif defined(__hppa__) +#include "../../../arch/parisc/include/uapi/asm/errno.h" #else #include #endif diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e440cd7f32a6..7943e748916d 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -4180,7 +4180,8 @@ struct bpf_sock { __u32 src_ip4; __u32 src_ip6[4]; __u32 src_port; /* host byte order */ - __u32 dst_port; /* network byte order */ + __be16 dst_port; /* network byte order */ + __u16 :16; /* zero padding */ __u32 dst_ip4; __u32 dst_ip6[4]; __u32 state; @@ -5006,7 +5007,10 @@ struct bpf_pidns_info { /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ struct bpf_sk_lookup { - __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ + union { + __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ + __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */ + }; __u32 family; /* Protocol family (AF_INET, AF_INET6) */ __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ diff --git a/tools/include/uapi/linux/openat2.h b/tools/include/uapi/linux/openat2.h index 58b1eb711360..a5feb7604948 100644 --- a/tools/include/uapi/linux/openat2.h +++ b/tools/include/uapi/linux/openat2.h @@ -35,5 +35,9 @@ struct open_how { #define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".." be scoped inside the dirfd (similar to chroot(2)). */ +#define RESOLVE_CACHED 0x20 /* Only complete if resolution can be + completed through cached lookup. May + return -EAGAIN if that's not + possible. */ #endif /* _UAPI_LINUX_OPENAT2_H */ diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index b0bf56c5f120..a1efcfbd8b9e 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat @@ -1646,7 +1646,8 @@ Press any other key to refresh statistics immediately. .format(values)) if len(pids) > 1: sys.exit('Error: Multiple processes found (pids: {}). Use "-p"' - ' to specify the desired pid'.format(" ".join(pids))) + ' to specify the desired pid' + .format(" ".join(map(str, pids)))) namespace.pid = pids[0] argparser = argparse.ArgumentParser(description=description_text, diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 154b75fc1373..f2a353bba25f 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -147,7 +147,7 @@ GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \ sort -u | wc -l) VERSIONED_SYM_COUNT = $(shell readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \ sed 's/\[.*\]//' | \ - awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \ + awk '/GLOBAL/ && /DEFAULT/ && !/UND|ABS/ {print $$NF}' | \ grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l) CMD_TARGETS = $(LIB_TARGET) $(PC_FILE) @@ -216,7 +216,7 @@ check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT) sort -u > $(OUTPUT)libbpf_global_syms.tmp; \ readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \ sed 's/\[.*\]//' | \ - awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \ + awk '/GLOBAL/ && /DEFAULT/ && !/UND|ABS/ {print $$NF}'| \ grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \ sort -u > $(OUTPUT)libbpf_versioned_syms.tmp; \ diff -u $(OUTPUT)libbpf_global_syms.tmp \ diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 61df26f048d9..66d7f8d494de 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -3652,7 +3652,7 @@ static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info) int bpf_map__reuse_fd(struct bpf_map *map, int fd) { struct bpf_map_info info = {}; - __u32 len = sizeof(info); + __u32 len = sizeof(info), name_len; int new_fd, err; char *new_name; @@ -3662,7 +3662,12 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd) if (err) return err; - new_name = strdup(info.name); + name_len = strlen(info.name); + if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0) + new_name = strdup(map->name); + else + new_name = strdup(info.name); + if (!new_name) return -errno; @@ -5928,9 +5933,10 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) */ prog = NULL; for (i = 0; i < obj->nr_programs; i++) { - prog = &obj->programs[i]; - if (strcmp(prog->sec_name, sec_name) == 0) + if (strcmp(obj->programs[i].sec_name, sec_name) == 0) { + prog = &obj->programs[i]; break; + } } if (!prog) { pr_warn("sec '%s': failed to find a BPF program\n", sec_name); @@ -5945,10 +5951,17 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) insn_idx = rec->insn_off / BPF_INSN_SZ; prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); if (!prog) { - pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n", - sec_name, insn_idx, i); - err = -EINVAL; - goto out; + /* When __weak subprog is "overridden" by another instance + * of the subprog from a different object file, linker still + * appends all the .BTF.ext info that used to belong to that + * eliminated subprogram. + * This is similar to what x86-64 linker does for relocations. + * So just ignore such relocations just like we ignore + * subprog instructions when discovering subprograms. + */ + pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n", + sec_name, i, insn_idx); + continue; } /* no need to apply CO-RE relocation if the program is * not going to be loaded diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index d38284a3aaf0..13393f0eab25 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -244,7 +244,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) case BPF_MAP_TYPE_RINGBUF: key_size = 0; value_size = 0; - max_entries = 4096; + max_entries = sysconf(_SC_PAGE_SIZE); break; case BPF_MAP_TYPE_UNSPEC: case BPF_MAP_TYPE_HASH: diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c index 86c31c787fb9..5e242be45206 100644 --- a/tools/lib/bpf/ringbuf.c +++ b/tools/lib/bpf/ringbuf.c @@ -59,6 +59,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd, __u32 len = sizeof(info); struct epoll_event *e; struct ring *r; + __u64 mmap_sz; void *tmp; int err; @@ -97,8 +98,7 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd, r->mask = info.max_entries - 1; /* Map writable consumer page */ - tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, - map_fd, 0); + tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0); if (tmp == MAP_FAILED) { err = -errno; pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n", @@ -111,8 +111,12 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd, * data size to allow simple reading of samples that wrap around the * end of a ring buffer. See kernel implementation for details. * */ - tmp = mmap(NULL, rb->page_size + 2 * info.max_entries, PROT_READ, - MAP_SHARED, map_fd, rb->page_size); + mmap_sz = rb->page_size + 2 * (__u64)info.max_entries; + if (mmap_sz != (__u64)(size_t)mmap_sz) { + pr_warn("ringbuf: ring buffer size (%u) is too big\n", info.max_entries); + return -E2BIG; + } + tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size); if (tmp == MAP_FAILED) { err = -errno; ringbuf_unmap_ring(rb, r); diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index c4390ef98b19..fa1f8faf7dfe 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -849,8 +849,6 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, goto out_mmap_tx; } - ctx->prog_fd = -1; - if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) { err = xsk_setup_xdp_prog(xsk); if (err) @@ -931,11 +929,14 @@ void xsk_socket__delete(struct xsk_socket *xsk) ctx = xsk->ctx; umem = ctx->umem; - if (ctx->prog_fd != -1) { + + if (ctx->refcount == 1) { xsk_delete_bpf_maps(xsk); close(ctx->prog_fd); } + xsk_put_ctx(ctx, true); + err = xsk_get_mmap_offsets(xsk->fd, &off); if (!err) { if (xsk->rx) { @@ -948,8 +949,6 @@ void xsk_socket__delete(struct xsk_socket *xsk) } } - xsk_put_ctx(ctx, true); - umem->refcount--; /* Do not close an fd that also has an associated umem connected * to it. diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c index 17465d454a0e..f76b1a9d5a6e 100644 --- a/tools/lib/perf/evlist.c +++ b/tools/lib/perf/evlist.c @@ -571,7 +571,6 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist, { struct perf_evsel *evsel; const struct perf_cpu_map *cpus = evlist->cpus; - const struct perf_thread_map *threads = evlist->threads; if (!ops || !ops->get || !ops->mmap) return -EINVAL; @@ -583,7 +582,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist, perf_evlist__for_each_entry(evlist, evsel) { if ((evsel->attr.read_format & PERF_FORMAT_ID) && evsel->sample_id == NULL && - perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0) + perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0) return -ENOMEM; } diff --git a/tools/objtool/Documentation/stack-validation.txt b/tools/objtool/Documentation/stack-validation.txt index 0542e46c7552..30f38fdc0d56 100644 --- a/tools/objtool/Documentation/stack-validation.txt +++ b/tools/objtool/Documentation/stack-validation.txt @@ -315,13 +315,15 @@ they mean, and suggestions for how to fix them. function tracing inserts additional calls, which is not obvious from the sources). -10. file.o: warning: func()+0x5c: alternative modifies stack +10. file.o: warning: func()+0x5c: stack layout conflict in alternatives - This means that an alternative includes instructions that modify the - stack. The problem is that there is only one ORC unwind table, this means - that the ORC unwind entries must be valid for each of the alternatives. - The easiest way to enforce this is to ensure alternatives do not contain - any ORC entries, which in turn implies the above constraint. + This means that in the use of the alternative() or ALTERNATIVE() + macro, the code paths have conflicting modifications to the stack. + The problem is that there is only one ORC unwind table, which means + that the ORC unwind entries must be consistent for all possible + instruction boundaries regardless of which code has been patched. + This limitation can be overcome by massaging the alternatives with + NOPs to shift the stack changes around so they no longer conflict. 11. file.o: warning: unannotated intra-function call diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 5cdb19036d7f..a43096f713c7 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -46,10 +46,6 @@ ifeq ($(SRCARCH),x86) SUBCMD_ORC := y endif -ifeq ($(SUBCMD_ORC),y) - CFLAGS += -DINSN_USE_ORC -endif - export SUBCMD_CHECK SUBCMD_ORC export srctree OUTPUT CFLAGS SRCARCH AWK include $(srctree)/tools/build/Makefile.include diff --git a/tools/objtool/arch.h b/tools/objtool/arch.h index 4a84c3081b8e..580ce1857585 100644 --- a/tools/objtool/arch.h +++ b/tools/objtool/arch.h @@ -11,10 +11,6 @@ #include "objtool.h" #include "cfi.h" -#ifdef INSN_USE_ORC -#include -#endif - enum insn_type { INSN_JUMP_CONDITIONAL, INSN_JUMP_UNCONDITIONAL, @@ -30,6 +26,7 @@ enum insn_type { INSN_CLAC, INSN_STD, INSN_CLD, + INSN_TRAP, INSN_OTHER, }; @@ -87,7 +84,13 @@ unsigned long arch_jump_destination(struct instruction *insn); unsigned long arch_dest_reloc_offset(int addend); const char *arch_nop_insn(int len); +const char *arch_ret_insn(int len); -int arch_decode_hint_reg(struct instruction *insn, u8 sp_reg); +int arch_decode_hint_reg(u8 sp_reg, int *base); + +bool arch_is_retpoline(struct symbol *sym); +bool arch_is_rethunk(struct symbol *sym); + +int arch_rewrite_retpolines(struct objtool_file *file); #endif /* _ARCH_H */ diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index cde9c36e40ae..d8f47704fd85 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -16,6 +16,7 @@ #include "../../arch.h" #include "../../warn.h" #include +#include "arch_elf.h" static unsigned char op_to_cfi_reg[][2] = { {CFI_AX, CFI_R8}, @@ -455,6 +456,11 @@ int arch_decode_instruction(const struct elf *elf, const struct section *sec, break; + case 0xcc: + /* int3 */ + *type = INSN_TRAP; + break; + case 0xe3: /* jecxz/jrcxz */ *type = INSN_JUMP_CONDITIONAL; @@ -563,8 +569,8 @@ void arch_initial_func_cfi_state(struct cfi_init_state *state) state->cfa.offset = 8; /* initial RA (return address) */ - state->regs[16].base = CFI_CFA; - state->regs[16].offset = -8; + state->regs[CFI_RA].base = CFI_CFA; + state->regs[CFI_RA].offset = -8; } const char *arch_nop_insn(int len) @@ -585,34 +591,52 @@ const char *arch_nop_insn(int len) return nops[len-1]; } -int arch_decode_hint_reg(struct instruction *insn, u8 sp_reg) -{ - struct cfi_reg *cfa = &insn->cfi.cfa; +#define BYTE_RET 0xC3 +const char *arch_ret_insn(int len) +{ + static const char ret[5][5] = { + { BYTE_RET }, + { BYTE_RET, 0xcc }, + { BYTE_RET, 0xcc, 0x90 }, + { BYTE_RET, 0xcc, 0x66, 0x90 }, + { BYTE_RET, 0xcc, 0x0f, 0x1f, 0x00 }, + }; + + if (len < 1 || len > 5) { + WARN("invalid RET size: %d\n", len); + return NULL; + } + + return ret[len-1]; +} + +int arch_decode_hint_reg(u8 sp_reg, int *base) +{ switch (sp_reg) { case ORC_REG_UNDEFINED: - cfa->base = CFI_UNDEFINED; + *base = CFI_UNDEFINED; break; case ORC_REG_SP: - cfa->base = CFI_SP; + *base = CFI_SP; break; case ORC_REG_BP: - cfa->base = CFI_BP; + *base = CFI_BP; break; case ORC_REG_SP_INDIRECT: - cfa->base = CFI_SP_INDIRECT; + *base = CFI_SP_INDIRECT; break; case ORC_REG_R10: - cfa->base = CFI_R10; + *base = CFI_R10; break; case ORC_REG_R13: - cfa->base = CFI_R13; + *base = CFI_R13; break; case ORC_REG_DI: - cfa->base = CFI_DI; + *base = CFI_DI; break; case ORC_REG_DX: - cfa->base = CFI_DX; + *base = CFI_DX; break; default: return -1; @@ -620,3 +644,13 @@ int arch_decode_hint_reg(struct instruction *insn, u8 sp_reg) return 0; } + +bool arch_is_retpoline(struct symbol *sym) +{ + return !strncmp(sym->name, "__x86_indirect_", 15); +} + +bool arch_is_rethunk(struct symbol *sym) +{ + return !strcmp(sym->name, "__x86_return_thunk"); +} diff --git a/tools/objtool/arch/x86/include/arch_special.h b/tools/objtool/arch/x86/include/arch_special.h index d818b2bffa02..14271cca0c74 100644 --- a/tools/objtool/arch/x86/include/arch_special.h +++ b/tools/objtool/arch/x86/include/arch_special.h @@ -10,7 +10,7 @@ #define JUMP_ORIG_OFFSET 0 #define JUMP_NEW_OFFSET 4 -#define ALT_ENTRY_SIZE 13 +#define ALT_ENTRY_SIZE 12 #define ALT_ORIG_OFFSET 0 #define ALT_NEW_OFFSET 4 #define ALT_FEATURE_OFFSET 8 diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c index b84cdc72b51f..5b2f3a09c326 100644 --- a/tools/objtool/builtin-check.c +++ b/tools/objtool/builtin-check.c @@ -18,7 +18,8 @@ #include "builtin.h" #include "objtool.h" -bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats, validate_dup, vmlinux, mcount, noinstr; +bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats, + validate_dup, vmlinux, mcount, noinstr, sls, unret, rethunk; static const char * const check_usage[] = { "objtool check [] file.o", @@ -29,6 +30,8 @@ const struct option check_options[] = { OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"), OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"), OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"), + OPT_BOOLEAN(0, "rethunk", &rethunk, "validate and annotate rethunk usage"), + OPT_BOOLEAN(0, "unret", &unret, "validate entry unret placement"), OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"), OPT_BOOLEAN('b', "backtrace", &backtrace, "unwind on error"), OPT_BOOLEAN('a', "uaccess", &uaccess, "enable uaccess checking"), @@ -37,6 +40,7 @@ const struct option check_options[] = { OPT_BOOLEAN('n', "noinstr", &noinstr, "noinstr validation for vmlinux.o"), OPT_BOOLEAN('l', "vmlinux", &vmlinux, "vmlinux.o validation"), OPT_BOOLEAN('M', "mcount", &mcount, "generate __mcount_loc"), + OPT_BOOLEAN('S', "sls", &sls, "validate straight-line-speculation"), OPT_END(), }; diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c index 7b31121fa60b..508bdf6ae8dc 100644 --- a/tools/objtool/builtin-orc.c +++ b/tools/objtool/builtin-orc.c @@ -51,11 +51,7 @@ int cmd_orc(int argc, const char **argv) if (list_empty(&file->insn_list)) return 0; - ret = create_orc(file); - if (ret) - return ret; - - ret = create_orc_sections(file); + ret = orc_create(file); if (ret) return ret; diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h index 2502bb27de17..94e653d4d230 100644 --- a/tools/objtool/builtin.h +++ b/tools/objtool/builtin.h @@ -8,7 +8,8 @@ #include extern const struct option check_options[]; -extern bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats, validate_dup, vmlinux, mcount, noinstr; +extern bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats, + validate_dup, vmlinux, mcount, noinstr, sls, unret, rethunk; extern int cmd_check(int argc, const char **argv); extern int cmd_orc(int argc, const char **argv); diff --git a/tools/objtool/cfi.h b/tools/objtool/cfi.h index c7c59c6a44ee..f579802d7ec2 100644 --- a/tools/objtool/cfi.h +++ b/tools/objtool/cfi.h @@ -7,6 +7,7 @@ #define _OBJTOOL_CFI_H #include "cfi_regs.h" +#include #define CFI_UNDEFINED -1 #define CFI_CFA -2 @@ -24,6 +25,7 @@ struct cfi_init_state { }; struct cfi_state { + struct hlist_node hash; /* must be first, cficmp() */ struct cfi_reg regs[CFI_NUM_REGS]; struct cfi_reg vals[CFI_NUM_REGS]; struct cfi_reg cfa; diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 4c114338b75b..836c9195210e 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -5,6 +5,8 @@ #include #include +#include +#include #include "builtin.h" #include "cfi.h" @@ -19,15 +21,17 @@ #include #include -#define FAKE_JUMP_OFFSET -1 - struct alternative { struct list_head list; struct instruction *insn; bool skip_orig; }; -struct cfi_init_state initial_func_cfi; +static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; + +static struct cfi_init_state initial_func_cfi; +static struct cfi_state init_cfi; +static struct cfi_state func_cfi; struct instruction *find_insn(struct objtool_file *file, struct section *sec, unsigned long offset) @@ -109,17 +113,34 @@ static struct instruction *prev_insn_same_sym(struct objtool_file *file, for (insn = next_insn_same_sec(file, insn); insn; \ insn = next_insn_same_sec(file, insn)) +static bool is_jump_table_jump(struct instruction *insn) +{ + struct alt_group *alt_group = insn->alt_group; + + if (insn->jump_table) + return true; + + /* Retpoline alternative for a jump table? */ + return alt_group && alt_group->orig_group && + alt_group->orig_group->first_insn->jump_table; +} + static bool is_sibling_call(struct instruction *insn) { - /* An indirect jump is either a sibling call or a jump to a table. */ - if (insn->type == INSN_JUMP_DYNAMIC) - return list_empty(&insn->alts); - - if (!is_static_jump(insn)) + /* + * Assume only ELF functions can make sibling calls. This ensures + * sibling call detection consistency between vmlinux.o and individual + * objects. + */ + if (!insn->func) return false; + /* An indirect jump is either a sibling call or a jump to a table. */ + if (insn->type == INSN_JUMP_DYNAMIC) + return !is_jump_table_jump(insn); + /* add_jump_destinations() sets insn->call_dest for sibling calls. */ - return !!insn->call_dest; + return (is_static_jump(insn) && insn->call_dest); } /* @@ -250,6 +271,78 @@ static void init_insn_state(struct insn_state *state, struct section *sec) state->noinstr = sec->noinstr; } +static struct cfi_state *cfi_alloc(void) +{ + struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1); + if (!cfi) { + WARN("calloc failed"); + exit(1); + } + nr_cfi++; + return cfi; +} + +static int cfi_bits; +static struct hlist_head *cfi_hash; + +static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2) +{ + return memcmp((void *)cfi1 + sizeof(cfi1->hash), + (void *)cfi2 + sizeof(cfi2->hash), + sizeof(struct cfi_state) - sizeof(struct hlist_node)); +} + +static inline u32 cfi_key(struct cfi_state *cfi) +{ + return jhash((void *)cfi + sizeof(cfi->hash), + sizeof(*cfi) - sizeof(cfi->hash), 0); +} + +static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi) +{ + struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; + struct cfi_state *obj; + + hlist_for_each_entry(obj, head, hash) { + if (!cficmp(cfi, obj)) { + nr_cfi_cache++; + return obj; + } + } + + obj = cfi_alloc(); + *obj = *cfi; + hlist_add_head(&obj->hash, head); + + return obj; +} + +static void cfi_hash_add(struct cfi_state *cfi) +{ + struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; + + hlist_add_head(&cfi->hash, head); +} + +static void *cfi_hash_alloc(void) +{ + cfi_bits = vmlinux ? ELF_HASH_BITS - 3 : 13; + cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits, + PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANON, -1, 0); + if (cfi_hash == (void *)-1L) { + WARN("mmap fail cfi_hash"); + cfi_hash = NULL; + } else if (stats) { + printf("cfi_bits: %d\n", cfi_bits); + } + + return cfi_hash; +} + +static unsigned long nr_insns; +static unsigned long nr_insns_visited; + /* * Call the arch-specific instruction decoder for all the instructions and add * them to the global instruction list. @@ -260,7 +353,6 @@ static int decode_instructions(struct objtool_file *file) struct symbol *func; unsigned long offset; struct instruction *insn; - unsigned long nr_insns = 0; int ret; for_each_sec(file, sec) { @@ -274,7 +366,8 @@ static int decode_instructions(struct objtool_file *file) sec->text = true; if (!strcmp(sec->name, ".noinstr.text") || - !strcmp(sec->name, ".entry.text")) + !strcmp(sec->name, ".entry.text") || + !strncmp(sec->name, ".text.__x86.", 12)) sec->noinstr = true; for (offset = 0; offset < sec->len; offset += insn->len) { @@ -286,7 +379,6 @@ static int decode_instructions(struct objtool_file *file) memset(insn, 0, sizeof(*insn)); INIT_LIST_HEAD(&insn->alts); INIT_LIST_HEAD(&insn->stack_ops); - init_cfi_state(&insn->cfi); insn->sec = sec; insn->offset = offset; @@ -377,12 +469,12 @@ static int add_dead_ends(struct objtool_file *file) else if (reloc->addend == reloc->sym->sec->len) { insn = find_last_insn(file, reloc->sym->sec); if (!insn) { - WARN("can't find unreachable insn at %s+0x%x", + WARN("can't find unreachable insn at %s+0x%" PRIx64, reloc->sym->sec->name, reloc->addend); return -1; } } else { - WARN("can't find unreachable insn at %s+0x%x", + WARN("can't find unreachable insn at %s+0x%" PRIx64, reloc->sym->sec->name, reloc->addend); return -1; } @@ -412,12 +504,12 @@ reachable: else if (reloc->addend == reloc->sym->sec->len) { insn = find_last_insn(file, reloc->sym->sec); if (!insn) { - WARN("can't find reachable insn at %s+0x%x", + WARN("can't find reachable insn at %s+0x%" PRIx64, reloc->sym->sec->name, reloc->addend); return -1; } } else { - WARN("can't find reachable insn at %s+0x%x", + WARN("can't find reachable insn at %s+0x%" PRIx64, reloc->sym->sec->name, reloc->addend); return -1; } @@ -430,8 +522,7 @@ reachable: static int create_static_call_sections(struct objtool_file *file) { - struct section *sec, *reloc_sec; - struct reloc *reloc; + struct section *sec; struct static_call_site *site; struct instruction *insn; struct symbol *key_sym; @@ -449,7 +540,7 @@ static int create_static_call_sections(struct objtool_file *file) return 0; idx = 0; - list_for_each_entry(insn, &file->static_call_list, static_call_node) + list_for_each_entry(insn, &file->static_call_list, call_node) idx++; sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE, @@ -457,36 +548,18 @@ static int create_static_call_sections(struct objtool_file *file) if (!sec) return -1; - reloc_sec = elf_create_reloc_section(file->elf, sec, SHT_RELA); - if (!reloc_sec) - return -1; - idx = 0; - list_for_each_entry(insn, &file->static_call_list, static_call_node) { + list_for_each_entry(insn, &file->static_call_list, call_node) { site = (struct static_call_site *)sec->data->d_buf + idx; memset(site, 0, sizeof(struct static_call_site)); /* populate reloc for 'addr' */ - reloc = malloc(sizeof(*reloc)); - - if (!reloc) { - perror("malloc"); + if (elf_add_reloc_to_insn(file->elf, sec, + idx * sizeof(struct static_call_site), + R_X86_64_PC32, + insn->sec, insn->offset)) return -1; - } - memset(reloc, 0, sizeof(*reloc)); - - insn_to_reloc_sym_addend(insn->sec, insn->offset, reloc); - if (!reloc->sym) { - WARN_FUNC("static call tramp: missing containing symbol", - insn->sec, insn->offset); - return -1; - } - - reloc->type = R_X86_64_PC32; - reloc->offset = idx * sizeof(struct static_call_site); - reloc->sec = reloc_sec; - elf_add_reloc(file->elf, reloc); /* find key symbol */ key_name = strdup(insn->call_dest->name); @@ -523,32 +596,113 @@ static int create_static_call_sections(struct objtool_file *file) free(key_name); /* populate reloc for 'key' */ - reloc = malloc(sizeof(*reloc)); - if (!reloc) { - perror("malloc"); + if (elf_add_reloc(file->elf, sec, + idx * sizeof(struct static_call_site) + 4, + R_X86_64_PC32, key_sym, + is_sibling_call(insn) * STATIC_CALL_SITE_TAIL)) return -1; - } - memset(reloc, 0, sizeof(*reloc)); - reloc->sym = key_sym; - reloc->addend = is_sibling_call(insn) ? STATIC_CALL_SITE_TAIL : 0; - reloc->type = R_X86_64_PC32; - reloc->offset = idx * sizeof(struct static_call_site) + 4; - reloc->sec = reloc_sec; - elf_add_reloc(file->elf, reloc); idx++; } - if (elf_rebuild_reloc_section(file->elf, reloc_sec)) + return 0; +} + +static int create_retpoline_sites_sections(struct objtool_file *file) +{ + struct instruction *insn; + struct section *sec; + int idx; + + sec = find_section_by_name(file->elf, ".retpoline_sites"); + if (sec) { + WARN("file already has .retpoline_sites, skipping"); + return 0; + } + + idx = 0; + list_for_each_entry(insn, &file->retpoline_call_list, call_node) + idx++; + + if (!idx) + return 0; + + sec = elf_create_section(file->elf, ".retpoline_sites", 0, + sizeof(int), idx); + if (!sec) { + WARN("elf_create_section: .retpoline_sites"); return -1; + } + + idx = 0; + list_for_each_entry(insn, &file->retpoline_call_list, call_node) { + + int *site = (int *)sec->data->d_buf + idx; + *site = 0; + + if (elf_add_reloc_to_insn(file->elf, sec, + idx * sizeof(int), + R_X86_64_PC32, + insn->sec, insn->offset)) { + WARN("elf_add_reloc_to_insn: .retpoline_sites"); + return -1; + } + + idx++; + } + + return 0; +} + +static int create_return_sites_sections(struct objtool_file *file) +{ + struct instruction *insn; + struct section *sec; + int idx; + + sec = find_section_by_name(file->elf, ".return_sites"); + if (sec) { + WARN("file already has .return_sites, skipping"); + return 0; + } + + idx = 0; + list_for_each_entry(insn, &file->return_thunk_list, call_node) + idx++; + + if (!idx) + return 0; + + sec = elf_create_section(file->elf, ".return_sites", 0, + sizeof(int), idx); + if (!sec) { + WARN("elf_create_section: .return_sites"); + return -1; + } + + idx = 0; + list_for_each_entry(insn, &file->return_thunk_list, call_node) { + + int *site = (int *)sec->data->d_buf + idx; + *site = 0; + + if (elf_add_reloc_to_insn(file->elf, sec, + idx * sizeof(int), + R_X86_64_PC32, + insn->sec, insn->offset)) { + WARN("elf_add_reloc_to_insn: .return_sites"); + return -1; + } + + idx++; + } return 0; } static int create_mcount_loc_sections(struct objtool_file *file) { - struct section *sec, *reloc_sec; - struct reloc *reloc; + struct section *sec; unsigned long *loc; struct instruction *insn; int idx; @@ -571,49 +725,21 @@ static int create_mcount_loc_sections(struct objtool_file *file) if (!sec) return -1; - reloc_sec = elf_create_reloc_section(file->elf, sec, SHT_RELA); - if (!reloc_sec) - return -1; - idx = 0; list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node) { loc = (unsigned long *)sec->data->d_buf + idx; memset(loc, 0, sizeof(unsigned long)); - reloc = malloc(sizeof(*reloc)); - if (!reloc) { - perror("malloc"); + if (elf_add_reloc_to_insn(file->elf, sec, + idx * sizeof(unsigned long), + R_X86_64_64, + insn->sec, insn->offset)) return -1; - } - memset(reloc, 0, sizeof(*reloc)); - - if (insn->sec->sym) { - reloc->sym = insn->sec->sym; - reloc->addend = insn->offset; - } else { - reloc->sym = find_symbol_containing(insn->sec, insn->offset); - - if (!reloc->sym) { - WARN("missing symbol for insn at offset 0x%lx\n", - insn->offset); - return -1; - } - - reloc->addend = insn->offset - reloc->sym->offset; - } - - reloc->type = R_X86_64_64; - reloc->offset = idx * sizeof(unsigned long); - reloc->sec = reloc_sec; - elf_add_reloc(file->elf, reloc); idx++; } - if (elf_rebuild_reloc_section(file->elf, reloc_sec)) - return -1; - return 0; } @@ -847,6 +973,172 @@ static int add_ignore_alternatives(struct objtool_file *file) return 0; } +__weak bool arch_is_retpoline(struct symbol *sym) +{ + return false; +} + +__weak bool arch_is_rethunk(struct symbol *sym) +{ + return false; +} + +#define NEGATIVE_RELOC ((void *)-1L) + +static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) +{ + if (insn->reloc == NEGATIVE_RELOC) + return NULL; + + if (!insn->reloc) { + insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec, + insn->offset, insn->len); + if (!insn->reloc) { + insn->reloc = NEGATIVE_RELOC; + return NULL; + } + } + + return insn->reloc; +} + +static void remove_insn_ops(struct instruction *insn) +{ + struct stack_op *op, *tmp; + + list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) { + list_del(&op->list); + free(op); + } +} + +static void annotate_call_site(struct objtool_file *file, + struct instruction *insn, bool sibling) +{ + struct reloc *reloc = insn_reloc(file, insn); + struct symbol *sym = insn->call_dest; + + if (!sym) + sym = reloc->sym; + + /* + * Alternative replacement code is just template code which is + * sometimes copied to the original instruction. For now, don't + * annotate it. (In the future we might consider annotating the + * original instruction if/when it ever makes sense to do so.) + */ + if (!strcmp(insn->sec->name, ".altinstr_replacement")) + return; + + if (sym->static_call_tramp) { + list_add_tail(&insn->call_node, &file->static_call_list); + return; + } + + if (sym->retpoline_thunk) { + list_add_tail(&insn->call_node, &file->retpoline_call_list); + return; + } + + /* + * Many compilers cannot disable KCOV with a function attribute + * so they need a little help, NOP out any KCOV calls from noinstr + * text. + */ + if (insn->sec->noinstr && sym->kcov) { + if (reloc) { + reloc->type = R_NONE; + elf_write_reloc(file->elf, reloc); + } + + elf_write_insn(file->elf, insn->sec, + insn->offset, insn->len, + sibling ? arch_ret_insn(insn->len) + : arch_nop_insn(insn->len)); + + insn->type = sibling ? INSN_RETURN : INSN_NOP; + + if (sibling) { + /* + * We've replaced the tail-call JMP insn by two new + * insn: RET; INT3, except we only have a single struct + * insn here. Mark it retpoline_safe to avoid the SLS + * warning, instead of adding another insn. + */ + insn->retpoline_safe = true; + } + + return; + } +} + +static void add_call_dest(struct objtool_file *file, struct instruction *insn, + struct symbol *dest, bool sibling) +{ + insn->call_dest = dest; + if (!dest) + return; + + /* + * Whatever stack impact regular CALLs have, should be undone + * by the RETURN of the called function. + * + * Annotated intra-function calls retain the stack_ops but + * are converted to JUMP, see read_intra_function_calls(). + */ + remove_insn_ops(insn); + + annotate_call_site(file, insn, sibling); +} + +static void add_retpoline_call(struct objtool_file *file, struct instruction *insn) +{ + /* + * Retpoline calls/jumps are really dynamic calls/jumps in disguise, + * so convert them accordingly. + */ + switch (insn->type) { + case INSN_CALL: + insn->type = INSN_CALL_DYNAMIC; + break; + case INSN_JUMP_UNCONDITIONAL: + insn->type = INSN_JUMP_DYNAMIC; + break; + case INSN_JUMP_CONDITIONAL: + insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL; + break; + default: + return; + } + + insn->retpoline_safe = true; + + /* + * Whatever stack impact regular CALLs have, should be undone + * by the RETURN of the called function. + * + * Annotated intra-function calls retain the stack_ops but + * are converted to JUMP, see read_intra_function_calls(). + */ + remove_insn_ops(insn); + + annotate_call_site(file, insn, false); +} + +static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add) +{ + /* + * Return thunk tail calls are really just returns in disguise, + * so convert them accordingly. + */ + insn->type = INSN_RETURN; + insn->retpoline_safe = true; + + /* Skip the non-text sections, specially .discard ones */ + if (add && insn->sec->text) + list_add_tail(&insn->call_node, &file->return_thunk_list); +} + /* * CONFIG_CFI_CLANG: Check if the section is a CFI jump table or a * compiler-generated CFI handler. @@ -892,41 +1184,29 @@ static int add_jump_destinations(struct objtool_file *file) if (!is_static_jump(insn)) continue; - if (insn->offset == FAKE_JUMP_OFFSET) - continue; - - reloc = find_reloc_by_dest_range(file->elf, insn->sec, - insn->offset, insn->len); + reloc = insn_reloc(file, insn); if (!reloc) { dest_sec = insn->sec; dest_off = arch_jump_destination(insn); } else if (reloc->sym->type == STT_SECTION) { dest_sec = reloc->sym->sec; dest_off = arch_dest_reloc_offset(reloc->addend); + } else if (reloc->sym->retpoline_thunk) { + add_retpoline_call(file, insn); + continue; + } else if (reloc->sym->return_thunk) { + add_return_call(file, insn, true); + continue; + } else if (insn->func) { + /* internal or external sibling call (with reloc) */ + add_call_dest(file, insn, reloc->sym, true); + continue; } else if (reloc->sym->sec->idx) { dest_sec = reloc->sym->sec; dest_off = reloc->sym->sym.st_value + arch_dest_reloc_offset(reloc->addend); - } else if (!strncmp(reloc->sym->name, "__x86_indirect_thunk_", 21) || - !strncmp(reloc->sym->name, "__x86_retpoline_", 16)) { - /* - * Retpoline jumps are really dynamic jumps in - * disguise, so convert them accordingly. - */ - if (insn->type == INSN_JUMP_UNCONDITIONAL) - insn->type = INSN_JUMP_DYNAMIC; - else - insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL; - - insn->retpoline_safe = true; - continue; } else { - /* external sibling call */ - insn->call_dest = reloc->sym; - if (insn->call_dest->static_call_tramp) { - list_add_tail(&insn->static_call_node, - &file->static_call_list); - } + /* non-func asm code jumping to another file */ continue; } @@ -936,6 +1216,7 @@ static int add_jump_destinations(struct objtool_file *file) insn->jump_dest = find_last_insn(file, dest_sec); if (!insn->jump_dest) { + struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); /* * This is a special case where an alt instruction @@ -948,6 +1229,19 @@ static int add_jump_destinations(struct objtool_file *file) if (is_cfi_section(insn->sec)) continue; + /* + * This is a special case for zen_untrain_ret(). + * It jumps to __x86_return_thunk(), but objtool + * can't find the thunk's starting RET + * instruction, because the RET is also in the + * middle of another instruction. Objtool only + * knows about the outer instruction. + */ + if (sym && sym->return_thunk) { + add_return_call(file, insn, false); + continue; + } + WARN_FUNC("can't find jump dest instruction at %s+0x%lx", insn->sec, insn->offset, dest_sec->name, dest_off); @@ -982,13 +1276,8 @@ static int add_jump_destinations(struct objtool_file *file) } else if (insn->jump_dest->func->pfunc != insn->func->pfunc && insn->jump_dest->offset == insn->jump_dest->func->offset) { - - /* internal sibling call */ - insn->call_dest = insn->jump_dest->func; - if (insn->call_dest->static_call_tramp) { - list_add_tail(&insn->static_call_node, - &file->static_call_list); - } + /* internal sibling call (without reloc) */ + add_call_dest(file, insn, insn->jump_dest->func, true); } } } @@ -996,16 +1285,6 @@ static int add_jump_destinations(struct objtool_file *file) return 0; } -static void remove_insn_ops(struct instruction *insn) -{ - struct stack_op *op, *tmp; - - list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) { - list_del(&op->list); - free(op); - } -} - static struct symbol *find_call_destination(struct section *sec, unsigned long offset) { struct symbol *call_dest; @@ -1024,17 +1303,19 @@ static int add_call_destinations(struct objtool_file *file) { struct instruction *insn; unsigned long dest_off; + struct symbol *dest; struct reloc *reloc; for_each_insn(file, insn) { if (insn->type != INSN_CALL) continue; - reloc = find_reloc_by_dest_range(file->elf, insn->sec, - insn->offset, insn->len); + reloc = insn_reloc(file, insn); if (!reloc) { dest_off = arch_jump_destination(insn); - insn->call_dest = find_call_destination(insn->sec, dest_off); + dest = find_call_destination(insn->sec, dest_off); + + add_call_dest(file, insn, dest, false); if (insn->ignore) continue; @@ -1052,9 +1333,8 @@ static int add_call_destinations(struct objtool_file *file) } else if (reloc->sym->type == STT_SECTION) { dest_off = arch_dest_reloc_offset(reloc->addend); - insn->call_dest = find_call_destination(reloc->sym->sec, - dest_off); - if (!insn->call_dest) { + dest = find_call_destination(reloc->sym->sec, dest_off); + if (!dest) { if (is_cfi_section(reloc->sym->sec)) continue; @@ -1064,129 +1344,96 @@ static int add_call_destinations(struct objtool_file *file) dest_off); return -1; } + + add_call_dest(file, insn, dest, false); + + } else if (reloc->sym->retpoline_thunk) { + add_retpoline_call(file, insn); + } else - insn->call_dest = reloc->sym; - - if (insn->call_dest && insn->call_dest->static_call_tramp) { - list_add_tail(&insn->static_call_node, - &file->static_call_list); - } - - /* - * Many compilers cannot disable KCOV with a function attribute - * so they need a little help, NOP out any KCOV calls from noinstr - * text. - */ - if (insn->sec->noinstr && - !strncmp(insn->call_dest->name, "__sanitizer_cov_", 16)) { - if (reloc) { - reloc->type = R_NONE; - elf_write_reloc(file->elf, reloc); - } - - elf_write_insn(file->elf, insn->sec, - insn->offset, insn->len, - arch_nop_insn(insn->len)); - insn->type = INSN_NOP; - } - - if (mcount && !strcmp(insn->call_dest->name, "__fentry__")) { - if (reloc) { - reloc->type = R_NONE; - elf_write_reloc(file->elf, reloc); - } - - elf_write_insn(file->elf, insn->sec, - insn->offset, insn->len, - arch_nop_insn(insn->len)); - - insn->type = INSN_NOP; - - list_add_tail(&insn->mcount_loc_node, - &file->mcount_loc_list); - } - - /* - * Whatever stack impact regular CALLs have, should be undone - * by the RETURN of the called function. - * - * Annotated intra-function calls retain the stack_ops but - * are converted to JUMP, see read_intra_function_calls(). - */ - remove_insn_ops(insn); + add_call_dest(file, insn, reloc->sym, false); } return 0; } /* - * The .alternatives section requires some extra special care, over and above - * what other special sections require: - * - * 1. Because alternatives are patched in-place, we need to insert a fake jump - * instruction at the end so that validate_branch() skips all the original - * replaced instructions when validating the new instruction path. - * - * 2. An added wrinkle is that the new instruction length might be zero. In - * that case the old instructions are replaced with noops. We simulate that - * by creating a fake jump as the only new instruction. - * - * 3. In some cases, the alternative section includes an instruction which - * conditionally jumps to the _end_ of the entry. We have to modify these - * jumps' destinations to point back to .text rather than the end of the - * entry in .altinstr_replacement. + * The .alternatives section requires some extra special care over and above + * other special sections because alternatives are patched in place. */ static int handle_group_alt(struct objtool_file *file, struct special_alt *special_alt, struct instruction *orig_insn, struct instruction **new_insn) { - static unsigned int alt_group_next_index = 1; - struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump = NULL; - unsigned int alt_group = alt_group_next_index++; + struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL; + struct alt_group *orig_alt_group, *new_alt_group; unsigned long dest_off; + + orig_alt_group = malloc(sizeof(*orig_alt_group)); + if (!orig_alt_group) { + WARN("malloc failed"); + return -1; + } + orig_alt_group->cfi = calloc(special_alt->orig_len, + sizeof(struct cfi_state *)); + if (!orig_alt_group->cfi) { + WARN("calloc failed"); + return -1; + } + last_orig_insn = NULL; insn = orig_insn; sec_for_each_insn_from(file, insn) { if (insn->offset >= special_alt->orig_off + special_alt->orig_len) break; - insn->alt_group = alt_group; + insn->alt_group = orig_alt_group; last_orig_insn = insn; } + orig_alt_group->orig_group = NULL; + orig_alt_group->first_insn = orig_insn; + orig_alt_group->last_insn = last_orig_insn; - if (next_insn_same_sec(file, last_orig_insn)) { - fake_jump = malloc(sizeof(*fake_jump)); - if (!fake_jump) { + + new_alt_group = malloc(sizeof(*new_alt_group)); + if (!new_alt_group) { + WARN("malloc failed"); + return -1; + } + + if (special_alt->new_len < special_alt->orig_len) { + /* + * Insert a fake nop at the end to make the replacement + * alt_group the same size as the original. This is needed to + * allow propagate_alt_cfi() to do its magic. When the last + * instruction affects the stack, the instruction after it (the + * nop) will propagate the new state to the shared CFI array. + */ + nop = malloc(sizeof(*nop)); + if (!nop) { WARN("malloc failed"); return -1; } - memset(fake_jump, 0, sizeof(*fake_jump)); - INIT_LIST_HEAD(&fake_jump->alts); - INIT_LIST_HEAD(&fake_jump->stack_ops); - init_cfi_state(&fake_jump->cfi); + memset(nop, 0, sizeof(*nop)); + INIT_LIST_HEAD(&nop->alts); + INIT_LIST_HEAD(&nop->stack_ops); - fake_jump->sec = special_alt->new_sec; - fake_jump->offset = FAKE_JUMP_OFFSET; - fake_jump->type = INSN_JUMP_UNCONDITIONAL; - fake_jump->jump_dest = list_next_entry(last_orig_insn, list); - fake_jump->func = orig_insn->func; + nop->sec = special_alt->new_sec; + nop->offset = special_alt->new_off + special_alt->new_len; + nop->len = special_alt->orig_len - special_alt->new_len; + nop->type = INSN_NOP; + nop->func = orig_insn->func; + nop->alt_group = new_alt_group; + nop->ignore = orig_insn->ignore_alts; } if (!special_alt->new_len) { - if (!fake_jump) { - WARN("%s: empty alternative at end of section", - special_alt->orig_sec->name); - return -1; - } - - *new_insn = fake_jump; - return 0; + *new_insn = nop; + goto end; } - last_new_insn = NULL; - alt_group = alt_group_next_index++; insn = *new_insn; sec_for_each_insn_from(file, insn) { struct reloc *alt_reloc; @@ -1198,7 +1445,7 @@ static int handle_group_alt(struct objtool_file *file, insn->ignore = orig_insn->ignore_alts; insn->func = orig_insn->func; - insn->alt_group = alt_group; + insn->alt_group = new_alt_group; /* * Since alternative replacement code is copy/pasted by the @@ -1208,8 +1455,7 @@ static int handle_group_alt(struct objtool_file *file, * alternatives code can adjust the relative offsets * accordingly. */ - alt_reloc = find_reloc_by_dest_range(file->elf, insn->sec, - insn->offset, insn->len); + alt_reloc = insn_reloc(file, insn); if (alt_reloc && !arch_support_alt_relocation(special_alt, insn, alt_reloc)) { @@ -1225,14 +1471,8 @@ static int handle_group_alt(struct objtool_file *file, continue; dest_off = arch_jump_destination(insn); - if (dest_off == special_alt->new_off + special_alt->new_len) { - if (!fake_jump) { - WARN("%s: alternative jump to end of section", - special_alt->orig_sec->name); - return -1; - } - insn->jump_dest = fake_jump; - } + if (dest_off == special_alt->new_off + special_alt->new_len) + insn->jump_dest = next_insn_same_sec(file, last_orig_insn); if (!insn->jump_dest) { WARN_FUNC("can't find alternative jump destination", @@ -1247,9 +1487,13 @@ static int handle_group_alt(struct objtool_file *file, return -1; } - if (fake_jump) - list_add(&fake_jump->list, &last_new_insn->list); - + if (nop) + list_add(&nop->list, &last_new_insn->list); +end: + new_alt_group->orig_group = orig_alt_group; + new_alt_group->first_insn = *new_insn; + new_alt_group->last_insn = nop ? : last_new_insn; + new_alt_group->cfi = orig_alt_group->cfi; return 0; } @@ -1541,13 +1785,21 @@ static int add_jump_table_alts(struct objtool_file *file) return 0; } +static void set_func_state(struct cfi_state *state) +{ + state->cfa = initial_func_cfi.cfa; + memcpy(&state->regs, &initial_func_cfi.regs, + CFI_NUM_REGS * sizeof(struct cfi_reg)); + state->stack_size = initial_func_cfi.cfa.offset; +} + static int read_unwind_hints(struct objtool_file *file) { + struct cfi_state cfi = init_cfi; struct section *sec, *relocsec; - struct reloc *reloc; struct unwind_hint *hint; struct instruction *insn; - struct cfi_reg *cfa; + struct reloc *reloc; int i; sec = find_section_by_name(file->elf, ".discard.unwind_hints"); @@ -1582,24 +1834,51 @@ static int read_unwind_hints(struct objtool_file *file) return -1; } - cfa = &insn->cfi.cfa; + insn->hint = true; - if (hint->type == UNWIND_HINT_TYPE_RET_OFFSET) { - insn->ret_offset = hint->sp_offset; + if (hint->type == UNWIND_HINT_TYPE_SAVE) { + insn->hint = false; + insn->save = true; continue; } - insn->hint = true; + if (hint->type == UNWIND_HINT_TYPE_RESTORE) { + insn->restore = true; + continue; + } - if (arch_decode_hint_reg(insn, hint->sp_reg)) { + if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) { + struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset); + + if (sym && sym->bind == STB_GLOBAL) { + insn->entry = 1; + } + } + + if (hint->type == UNWIND_HINT_TYPE_ENTRY) { + hint->type = UNWIND_HINT_TYPE_CALL; + insn->entry = 1; + } + + if (hint->type == UNWIND_HINT_TYPE_FUNC) { + insn->cfi = &func_cfi; + continue; + } + + if (insn->cfi) + cfi = *(insn->cfi); + + if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { WARN_FUNC("unsupported unwind_hint sp base reg %d", insn->sec, insn->offset, hint->sp_reg); return -1; } - cfa->offset = hint->sp_offset; - insn->cfi.type = hint->type; - insn->cfi.end = hint->end; + cfi.cfa.offset = hint->sp_offset; + cfi.type = hint->type; + cfi.end = hint->end; + + insn->cfi = cfi_hash_find_or_add(&cfi); } return 0; @@ -1628,8 +1907,10 @@ static int read_retpoline_hints(struct objtool_file *file) } if (insn->type != INSN_JUMP_DYNAMIC && - insn->type != INSN_CALL_DYNAMIC) { - WARN_FUNC("retpoline_safe hint not an indirect jump/call", + insn->type != INSN_CALL_DYNAMIC && + insn->type != INSN_RETURN && + insn->type != INSN_NOP) { + WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop", insn->sec, insn->offset); return -1; } @@ -1738,17 +2019,31 @@ static int read_intra_function_calls(struct objtool_file *file) return 0; } -static int read_static_call_tramps(struct objtool_file *file) +static int classify_symbols(struct objtool_file *file) { struct section *sec; struct symbol *func; for_each_sec(file, sec) { list_for_each_entry(func, &sec->symbol_list, list) { - if (func->bind == STB_GLOBAL && - !strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, + if (func->bind != STB_GLOBAL) + continue; + + if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, strlen(STATIC_CALL_TRAMP_PREFIX_STR))) func->static_call_tramp = true; + + if (arch_is_retpoline(func)) + func->retpoline_thunk = true; + + if (arch_is_rethunk(func)) + func->return_thunk = true; + + if (!strcmp(func->name, "__fentry__")) + func->fentry = true; + + if (!strncmp(func->name, "__sanitizer_cov_", 16)) + func->kcov = true; } } @@ -1806,10 +2101,14 @@ static int decode_sections(struct objtool_file *file) /* * Must be before add_{jump_call}_destination. */ - ret = read_static_call_tramps(file); + ret = classify_symbols(file); if (ret) return ret; + /* + * Must be before add_special_section_alts() as that depends on + * jump_dest being set. + */ ret = add_jump_destinations(file); if (ret) return ret; @@ -1851,9 +2150,9 @@ static int decode_sections(struct objtool_file *file) static bool is_fentry_call(struct instruction *insn) { - if (insn->type == INSN_CALL && insn->call_dest && - insn->call_dest->type == STT_NOTYPE && - !strcmp(insn->call_dest->name, "__fentry__")) + if (insn->type == INSN_CALL && + insn->call_dest && + insn->call_dest->fentry) return true; return false; @@ -1861,27 +2160,18 @@ static bool is_fentry_call(struct instruction *insn) static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state) { - u8 ret_offset = insn->ret_offset; struct cfi_state *cfi = &state->cfi; int i; if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap) return true; - if (cfi->cfa.offset != initial_func_cfi.cfa.offset + ret_offset) + if (cfi->cfa.offset != initial_func_cfi.cfa.offset) return true; - if (cfi->stack_size != initial_func_cfi.cfa.offset + ret_offset) + if (cfi->stack_size != initial_func_cfi.cfa.offset) return true; - /* - * If there is a ret offset hint then don't check registers - * because a callee-saved register might have been pushed on - * the stack. - */ - if (ret_offset) - return false; - for (i = 0; i < CFI_NUM_REGS; i++) { if (cfi->regs[i].base != initial_func_cfi.regs[i].base || cfi->regs[i].offset != initial_func_cfi.regs[i].offset) @@ -2350,22 +2640,52 @@ static int update_cfi_state(struct instruction *insn, struct cfi_state *cfi, return 0; } +/* + * The stack layouts of alternatives instructions can sometimes diverge when + * they have stack modifications. That's fine as long as the potential stack + * layouts don't conflict at any given potential instruction boundary. + * + * Flatten the CFIs of the different alternative code streams (both original + * and replacement) into a single shared CFI array which can be used to detect + * conflicts and nicely feed a linear array of ORC entries to the unwinder. + */ +static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn) +{ + struct cfi_state **alt_cfi; + int group_off; + + if (!insn->alt_group) + return 0; + + if (!insn->cfi) { + WARN("CFI missing"); + return -1; + } + + alt_cfi = insn->alt_group->cfi; + group_off = insn->offset - insn->alt_group->first_insn->offset; + + if (!alt_cfi[group_off]) { + alt_cfi[group_off] = insn->cfi; + } else { + if (cficmp(alt_cfi[group_off], insn->cfi)) { + WARN_FUNC("stack layout conflict in alternatives", + insn->sec, insn->offset); + return -1; + } + } + + return 0; +} + static int handle_insn_ops(struct instruction *insn, struct insn_state *state) { struct stack_op *op; list_for_each_entry(op, &insn->stack_ops, list) { - struct cfi_state old_cfi = state->cfi; - int res; - res = update_cfi_state(insn, &state->cfi, op); - if (res) - return res; - - if (insn->alt_group && memcmp(&state->cfi, &old_cfi, sizeof(struct cfi_state))) { - WARN_FUNC("alternative modifies stack", insn->sec, insn->offset); - return -1; - } + if (update_cfi_state(insn, &state->cfi, op)) + return 1; if (op->dest.type == OP_DEST_PUSHF) { if (!state->uaccess_stack) { @@ -2394,9 +2714,14 @@ static int handle_insn_ops(struct instruction *insn, struct insn_state *state) static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) { - struct cfi_state *cfi1 = &insn->cfi; + struct cfi_state *cfi1 = insn->cfi; int i; + if (!cfi1) { + WARN("CFI missing"); + return false; + } + if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d", @@ -2555,28 +2880,20 @@ static int validate_return(struct symbol *func, struct instruction *insn, struct return 0; } -/* - * Alternatives should not contain any ORC entries, this in turn means they - * should not contain any CFI ops, which implies all instructions should have - * the same same CFI state. - * - * It is possible to constuct alternatives that have unreachable holes that go - * unreported (because they're NOPs), such holes would result in CFI_UNDEFINED - * states which then results in ORC entries, which we just said we didn't want. - * - * Avoid them by copying the CFI entry of the first instruction into the whole - * alternative. - */ -static void fill_alternative_cfi(struct objtool_file *file, struct instruction *insn) +static struct instruction *next_insn_to_validate(struct objtool_file *file, + struct instruction *insn) { - struct instruction *first_insn = insn; - int alt_group = insn->alt_group; + struct alt_group *alt_group = insn->alt_group; - sec_for_each_insn_continue(file, insn) { - if (insn->alt_group != alt_group) - break; - insn->cfi = first_insn->cfi; - } + /* + * Simulate the fact that alternatives are patched in-place. When the + * end of a replacement alt_group is reached, redirect objtool flow to + * the end of the original alt_group. + */ + if (alt_group && insn == alt_group->last_insn && alt_group->orig_group) + return next_insn_same_sec(file, alt_group->orig_group->last_insn); + + return next_insn_same_sec(file, insn); } /* @@ -2589,7 +2906,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, struct instruction *insn, struct insn_state state) { struct alternative *alt; - struct instruction *next_insn; + struct instruction *next_insn, *prev_insn = NULL; struct section *sec; u8 visited; int ret; @@ -2597,7 +2914,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, sec = insn->sec; while (1) { - next_insn = next_insn_same_sec(file, insn); + next_insn = next_insn_to_validate(file, insn); if (file->c_file && func && insn->func && func != insn->func->pfunc) { WARN("%s() falls through to next function %s()", @@ -2611,25 +2928,67 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, return 1; } - visited = 1 << state.uaccess; - if (insn->visited) { + visited = VISITED_BRANCH << state.uaccess; + if (insn->visited & VISITED_BRANCH_MASK) { if (!insn->hint && !insn_cfi_match(insn, &state.cfi)) return 1; if (insn->visited & visited) return 0; + } else { + nr_insns_visited++; } if (state.noinstr) state.instr += insn->instr; - if (insn->hint) - state.cfi = insn->cfi; - else - insn->cfi = state.cfi; + if (insn->hint) { + if (insn->restore) { + struct instruction *save_insn, *i; + + i = insn; + save_insn = NULL; + + sym_for_each_insn_continue_reverse(file, func, i) { + if (i->save) { + save_insn = i; + break; + } + } + + if (!save_insn) { + WARN_FUNC("no corresponding CFI save for CFI restore", + sec, insn->offset); + return 1; + } + + if (!save_insn->visited) { + WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo", + sec, insn->offset); + return 1; + } + + insn->cfi = save_insn->cfi; + nr_cfi_reused++; + } + + state.cfi = *insn->cfi; + } else { + /* XXX track if we actually changed state.cfi */ + + if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) { + insn->cfi = prev_insn->cfi; + nr_cfi_reused++; + } else { + insn->cfi = cfi_hash_find_or_add(&state.cfi); + } + } insn->visited |= visited; + if (propagate_alt_cfi(file, insn)) + return 1; + if (!insn->ignore_alts && !list_empty(&insn->alts)) { bool skip_orig = false; @@ -2645,9 +3004,6 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, } } - if (insn->alt_group) - fill_alternative_cfi(file, insn); - if (skip_orig) return 0; } @@ -2658,6 +3014,11 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, switch (insn->type) { case INSN_RETURN: + if (sls && !insn->retpoline_safe && + next_insn && next_insn->type != INSN_TRAP) { + WARN_FUNC("missing int3 after ret", + insn->sec, insn->offset); + } return validate_return(func, insn, &state); case INSN_CALL: @@ -2680,7 +3041,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, case INSN_JUMP_CONDITIONAL: case INSN_JUMP_UNCONDITIONAL: - if (func && is_sibling_call(insn)) { + if (is_sibling_call(insn)) { ret = validate_sibling_call(insn, &state); if (ret) return ret; @@ -2701,8 +3062,15 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, break; case INSN_JUMP_DYNAMIC: + if (sls && !insn->retpoline_safe && + next_insn && next_insn->type != INSN_TRAP) { + WARN_FUNC("missing int3 after indirect jump", + insn->sec, insn->offset); + } + + /* fallthrough */ case INSN_JUMP_DYNAMIC_CONDITIONAL: - if (func && is_sibling_call(insn)) { + if (is_sibling_call(insn)) { ret = validate_sibling_call(insn, &state); if (ret) return ret; @@ -2776,6 +3144,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, return 1; } + prev_insn = insn; insn = next_insn; } @@ -2815,6 +3184,145 @@ static int validate_unwind_hints(struct objtool_file *file, struct section *sec) return warnings; } +/* + * Validate rethunk entry constraint: must untrain RET before the first RET. + * + * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes + * before an actual RET instruction. + */ +static int validate_entry(struct objtool_file *file, struct instruction *insn) +{ + struct instruction *next, *dest; + int ret, warnings = 0; + + for (;;) { + next = next_insn_to_validate(file, insn); + + if (insn->visited & VISITED_ENTRY) + return 0; + + insn->visited |= VISITED_ENTRY; + + if (!insn->ignore_alts && !list_empty(&insn->alts)) { + struct alternative *alt; + bool skip_orig = false; + + list_for_each_entry(alt, &insn->alts, list) { + if (alt->skip_orig) + skip_orig = true; + + ret = validate_entry(file, alt->insn); + if (ret) { + if (backtrace) + BT_FUNC("(alt)", insn); + return ret; + } + } + + if (skip_orig) + return 0; + } + + switch (insn->type) { + + case INSN_CALL_DYNAMIC: + case INSN_JUMP_DYNAMIC: + case INSN_JUMP_DYNAMIC_CONDITIONAL: + WARN_FUNC("early indirect call", insn->sec, insn->offset); + return 1; + + case INSN_JUMP_UNCONDITIONAL: + case INSN_JUMP_CONDITIONAL: + if (!is_sibling_call(insn)) { + if (!insn->jump_dest) { + WARN_FUNC("unresolved jump target after linking?!?", + insn->sec, insn->offset); + return -1; + } + ret = validate_entry(file, insn->jump_dest); + if (ret) { + if (backtrace) { + BT_FUNC("(branch%s)", insn, + insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : ""); + } + return ret; + } + + if (insn->type == INSN_JUMP_UNCONDITIONAL) + return 0; + + break; + } + + /* fallthrough */ + case INSN_CALL: + dest = find_insn(file, insn->call_dest->sec, + insn->call_dest->offset); + if (!dest) { + WARN("Unresolved function after linking!?: %s", + insn->call_dest->name); + return -1; + } + + ret = validate_entry(file, dest); + if (ret) { + if (backtrace) + BT_FUNC("(call)", insn); + return ret; + } + /* + * If a call returns without error, it must have seen UNTRAIN_RET. + * Therefore any non-error return is a success. + */ + return 0; + + case INSN_RETURN: + WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset); + return 1; + + case INSN_NOP: + if (insn->retpoline_safe) + return 0; + break; + + default: + break; + } + + if (!next) { + WARN_FUNC("teh end!", insn->sec, insn->offset); + return -1; + } + insn = next; + } + + return warnings; +} + +/* + * Validate that all branches starting at 'insn->entry' encounter UNRET_END + * before RET. + */ +static int validate_unret(struct objtool_file *file) +{ + struct instruction *insn; + int ret, warnings = 0; + + for_each_insn(file, insn) { + if (!insn->entry) + continue; + + ret = validate_entry(file, insn); + if (ret < 0) { + WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset); + return ret; + } + warnings += ret; + } + + return warnings; +} + static int validate_retpoline(struct objtool_file *file) { struct instruction *insn; @@ -2822,7 +3330,8 @@ static int validate_retpoline(struct objtool_file *file) for_each_insn(file, insn) { if (insn->type != INSN_JUMP_DYNAMIC && - insn->type != INSN_CALL_DYNAMIC) + insn->type != INSN_CALL_DYNAMIC && + insn->type != INSN_RETURN) continue; if (insn->retpoline_safe) @@ -2837,9 +3346,17 @@ static int validate_retpoline(struct objtool_file *file) if (!strcmp(insn->sec->name, ".init.text") && !module) continue; - WARN_FUNC("indirect %s found in RETPOLINE build", - insn->sec, insn->offset, - insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); + if (insn->type == INSN_RETURN) { + if (rethunk) { + WARN_FUNC("'naked' return found in RETHUNK build", + insn->sec, insn->offset); + } else + continue; + } else { + WARN_FUNC("indirect %s found in RETPOLINE build", + insn->sec, insn->offset, + insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); + } warnings++; } @@ -2865,7 +3382,7 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio int i; struct instruction *prev_insn; - if (insn->ignore || insn->type == INSN_NOP) + if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP) return true; /* @@ -2880,9 +3397,6 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio !strcmp(insn->sec->name, ".altinstr_aux")) return true; - if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->offset == FAKE_JUMP_OFFSET) - return true; - if (!insn->func) return false; @@ -2968,10 +3482,7 @@ static int validate_section(struct objtool_file *file, struct section *sec) continue; init_insn_state(&state, sec); - state.cfi.cfa = initial_func_cfi.cfa; - memcpy(&state.cfi.regs, &initial_func_cfi.regs, - CFI_NUM_REGS * sizeof(struct cfi_reg)); - state.cfi.stack_size = initial_func_cfi.cfa.offset; + set_func_state(&state.cfi); warnings += validate_symbol(file, sec, func, &state); } @@ -3037,10 +3548,20 @@ int check(struct objtool_file *file) int ret, warnings = 0; arch_initial_func_cfi_state(&initial_func_cfi); + init_cfi_state(&init_cfi); + init_cfi_state(&func_cfi); + set_func_state(&func_cfi); + + if (!cfi_hash_alloc()) + goto out; + + cfi_hash_add(&init_cfi); + cfi_hash_add(&func_cfi); ret = decode_sections(file); if (ret < 0) goto out; + warnings += ret; if (list_empty(&file->insn_list)) @@ -3072,6 +3593,17 @@ int check(struct objtool_file *file) goto out; warnings += ret; + if (unret) { + /* + * Must be after validate_branch() and friends, it plays + * further games with insn->visited. + */ + ret = validate_unret(file); + if (ret < 0) + return ret; + warnings += ret; + } + if (!warnings) { ret = validate_reachable_instructions(file); if (ret < 0) @@ -3091,6 +3623,27 @@ int check(struct objtool_file *file) warnings += ret; } + if (retpoline) { + ret = create_retpoline_sites_sections(file); + if (ret < 0) + goto out; + warnings += ret; + } + + if (rethunk) { + ret = create_return_sites_sections(file); + if (ret < 0) + goto out; + warnings += ret; + } + + if (stats) { + printf("nr_insns_visited: %ld\n", nr_insns_visited); + printf("nr_cfi: %ld\n", nr_cfi); + printf("nr_cfi_reused: %ld\n", nr_cfi_reused); + printf("nr_cfi_cache: %ld\n", nr_cfi_cache); + } + out: /* * For now, don't fail the kernel build on fatal warnings. These diff --git a/tools/objtool/check.h b/tools/objtool/check.h index 3a9a3daf6e3d..6daa64d2ee97 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h @@ -19,11 +19,28 @@ struct insn_state { s8 instr; }; +struct alt_group { + /* + * Pointer from a replacement group to the original group. NULL if it + * *is* the original group. + */ + struct alt_group *orig_group; + + /* First and last instructions in the group */ + struct instruction *first_insn, *last_insn; + + /* + * Byte-offset-addressed len-sized array of pointers to CFI structs. + * This is shared with the other alt_groups in the same alternative. + */ + struct cfi_state **cfi; +}; + struct instruction { struct list_head list; struct hlist_node hash; - struct list_head static_call_node; struct list_head mcount_loc_node; + struct list_head call_node; struct section *sec; unsigned long offset; unsigned int len; @@ -31,24 +48,28 @@ struct instruction { unsigned long immediate; bool dead_end, ignore, ignore_alts; bool hint; + bool save, restore; bool retpoline_safe; + bool entry; s8 instr; u8 visited; - u8 ret_offset; - int alt_group; + struct alt_group *alt_group; struct symbol *call_dest; struct instruction *jump_dest; struct instruction *first_jump_src; struct reloc *jump_table; + struct reloc *reloc; struct list_head alts; struct symbol *func; struct list_head stack_ops; - struct cfi_state cfi; -#ifdef INSN_USE_ORC - struct orc_entry orc; -#endif + struct cfi_state *cfi; }; +#define VISITED_BRANCH 0x01 +#define VISITED_BRANCH_UACCESS 0x02 +#define VISITED_BRANCH_MASK 0x03 +#define VISITED_ENTRY 0x04 + static inline bool is_static_jump(struct instruction *insn) { return insn->type == INSN_JUMP_CONDITIONAL || diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index d8421e1d06be..a2ea3931e01d 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -262,32 +262,6 @@ struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, uns return find_reloc_by_dest_range(elf, sec, offset, 1); } -void insn_to_reloc_sym_addend(struct section *sec, unsigned long offset, - struct reloc *reloc) -{ - if (sec->sym) { - reloc->sym = sec->sym; - reloc->addend = offset; - return; - } - - /* - * The Clang assembler strips section symbols, so we have to reference - * the function symbol instead: - */ - reloc->sym = find_symbol_containing(sec, offset); - if (!reloc->sym) { - /* - * Hack alert. This happens when we need to reference the NOP - * pad insn immediately after the function. - */ - reloc->sym = find_symbol_containing(sec, offset - 1); - } - - if (reloc->sym) - reloc->addend = offset - reloc->sym->offset; -} - static int read_sections(struct elf *elf) { Elf_Scn *s = NULL; @@ -367,12 +341,41 @@ static int read_sections(struct elf *elf) return 0; } +static void elf_add_symbol(struct elf *elf, struct symbol *sym) +{ + struct list_head *entry; + struct rb_node *pnode; + + sym->alias = sym; + + sym->type = GELF_ST_TYPE(sym->sym.st_info); + sym->bind = GELF_ST_BIND(sym->sym.st_info); + + sym->offset = sym->sym.st_value; + sym->len = sym->sym.st_size; + + rb_add(&sym->sec->symbol_tree, &sym->node, symbol_to_offset); + pnode = rb_prev(&sym->node); + if (pnode) + entry = &rb_entry(pnode, struct symbol, node)->list; + else + entry = &sym->sec->symbol_list; + list_add(&sym->list, entry); + elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx); + elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name)); + + /* + * Don't store empty STT_NOTYPE symbols in the rbtree. They + * can exist within a function, confusing the sorting. + */ + if (!sym->len) + rb_erase(&sym->node, &sym->sec->symbol_tree); +} + static int read_symbols(struct elf *elf) { struct section *symtab, *symtab_shndx, *sec; struct symbol *sym, *pfunc; - struct list_head *entry; - struct rb_node *pnode; int symbols_nr, i; char *coldstr; Elf_Data *shndx_data = NULL; @@ -400,7 +403,6 @@ static int read_symbols(struct elf *elf) return -1; } memset(sym, 0, sizeof(*sym)); - sym->alias = sym; sym->idx = i; @@ -417,9 +419,6 @@ static int read_symbols(struct elf *elf) goto err; } - sym->type = GELF_ST_TYPE(sym->sym.st_info); - sym->bind = GELF_ST_BIND(sym->sym.st_info); - if ((sym->sym.st_shndx > SHN_UNDEF && sym->sym.st_shndx < SHN_LORESERVE) || (shndx_data && sym->sym.st_shndx == SHN_XINDEX)) { @@ -432,32 +431,14 @@ static int read_symbols(struct elf *elf) sym->name); goto err; } - if (sym->type == STT_SECTION) { + if (GELF_ST_TYPE(sym->sym.st_info) == STT_SECTION) { sym->name = sym->sec->name; sym->sec->sym = sym; } } else sym->sec = find_section_by_index(elf, 0); - sym->offset = sym->sym.st_value; - sym->len = sym->sym.st_size; - - rb_add(&sym->sec->symbol_tree, &sym->node, symbol_to_offset); - pnode = rb_prev(&sym->node); - if (pnode) - entry = &rb_entry(pnode, struct symbol, node)->list; - else - entry = &sym->sec->symbol_list; - list_add(&sym->list, entry); - elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx); - elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name)); - - /* - * Don't store empty STT_NOTYPE symbols in the rbtree. They - * can exist within a function, confusing the sorting. - */ - if (!sym->len) - rb_erase(&sym->node, &sym->sec->symbol_tree); + elf_add_symbol(elf, sym); } if (stats) @@ -524,12 +505,280 @@ err: return -1; } -void elf_add_reloc(struct elf *elf, struct reloc *reloc) -{ - struct section *sec = reloc->sec; +static struct section *elf_create_reloc_section(struct elf *elf, + struct section *base, + int reltype); - list_add_tail(&reloc->list, &sec->reloc_list); +int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset, + unsigned int type, struct symbol *sym, s64 addend) +{ + struct reloc *reloc; + + if (!sec->reloc && !elf_create_reloc_section(elf, sec, SHT_RELA)) + return -1; + + reloc = malloc(sizeof(*reloc)); + if (!reloc) { + perror("malloc"); + return -1; + } + memset(reloc, 0, sizeof(*reloc)); + + reloc->sec = sec->reloc; + reloc->offset = offset; + reloc->type = type; + reloc->sym = sym; + reloc->addend = addend; + + list_add_tail(&reloc->list, &sec->reloc->reloc_list); elf_hash_add(elf->reloc_hash, &reloc->hash, reloc_hash(reloc)); + + sec->reloc->changed = true; + + return 0; +} + +/* + * Ensure that any reloc section containing references to @sym is marked + * changed such that it will get re-generated in elf_rebuild_reloc_sections() + * with the new symbol index. + */ +static void elf_dirty_reloc_sym(struct elf *elf, struct symbol *sym) +{ + struct section *sec; + + list_for_each_entry(sec, &elf->sections, list) { + struct reloc *reloc; + + if (sec->changed) + continue; + + list_for_each_entry(reloc, &sec->reloc_list, list) { + if (reloc->sym == sym) { + sec->changed = true; + break; + } + } + } +} + +/* + * The libelf API is terrible; gelf_update_sym*() takes a data block relative + * index value, *NOT* the symbol index. As such, iterate the data blocks and + * adjust index until it fits. + * + * If no data block is found, allow adding a new data block provided the index + * is only one past the end. + */ +static int elf_update_symbol(struct elf *elf, struct section *symtab, + struct section *symtab_shndx, struct symbol *sym) +{ + Elf32_Word shndx = sym->sec ? sym->sec->idx : SHN_UNDEF; + Elf_Data *symtab_data = NULL, *shndx_data = NULL; + Elf64_Xword entsize = symtab->sh.sh_entsize; + int max_idx, idx = sym->idx; + Elf_Scn *s, *t = NULL; + bool is_special_shndx = sym->sym.st_shndx >= SHN_LORESERVE && + sym->sym.st_shndx != SHN_XINDEX; + + if (is_special_shndx) + shndx = sym->sym.st_shndx; + + s = elf_getscn(elf->elf, symtab->idx); + if (!s) { + WARN_ELF("elf_getscn"); + return -1; + } + + if (symtab_shndx) { + t = elf_getscn(elf->elf, symtab_shndx->idx); + if (!t) { + WARN_ELF("elf_getscn"); + return -1; + } + } + + for (;;) { + /* get next data descriptor for the relevant sections */ + symtab_data = elf_getdata(s, symtab_data); + if (t) + shndx_data = elf_getdata(t, shndx_data); + + /* end-of-list */ + if (!symtab_data) { + void *buf; + + if (idx) { + /* we don't do holes in symbol tables */ + WARN("index out of range"); + return -1; + } + + /* if @idx == 0, it's the next contiguous entry, create it */ + symtab_data = elf_newdata(s); + if (t) + shndx_data = elf_newdata(t); + + buf = calloc(1, entsize); + if (!buf) { + WARN("malloc"); + return -1; + } + + symtab_data->d_buf = buf; + symtab_data->d_size = entsize; + symtab_data->d_align = 1; + symtab_data->d_type = ELF_T_SYM; + + symtab->sh.sh_size += entsize; + symtab->changed = true; + + if (t) { + shndx_data->d_buf = &sym->sec->idx; + shndx_data->d_size = sizeof(Elf32_Word); + shndx_data->d_align = sizeof(Elf32_Word); + shndx_data->d_type = ELF_T_WORD; + + symtab_shndx->sh.sh_size += sizeof(Elf32_Word); + symtab_shndx->changed = true; + } + + break; + } + + /* empty blocks should not happen */ + if (!symtab_data->d_size) { + WARN("zero size data"); + return -1; + } + + /* is this the right block? */ + max_idx = symtab_data->d_size / entsize; + if (idx < max_idx) + break; + + /* adjust index and try again */ + idx -= max_idx; + } + + /* something went side-ways */ + if (idx < 0) { + WARN("negative index"); + return -1; + } + + /* setup extended section index magic and write the symbol */ + if ((shndx >= SHN_UNDEF && shndx < SHN_LORESERVE) || is_special_shndx) { + sym->sym.st_shndx = shndx; + if (!shndx_data) + shndx = 0; + } else { + sym->sym.st_shndx = SHN_XINDEX; + if (!shndx_data) { + WARN("no .symtab_shndx"); + return -1; + } + } + + if (!gelf_update_symshndx(symtab_data, shndx_data, idx, &sym->sym, shndx)) { + WARN_ELF("gelf_update_symshndx"); + return -1; + } + + return 0; +} + +static struct symbol * +elf_create_section_symbol(struct elf *elf, struct section *sec) +{ + struct section *symtab, *symtab_shndx; + Elf32_Word first_non_local, new_idx; + struct symbol *sym, *old; + + symtab = find_section_by_name(elf, ".symtab"); + if (symtab) { + symtab_shndx = find_section_by_name(elf, ".symtab_shndx"); + } else { + WARN("no .symtab"); + return NULL; + } + + sym = calloc(1, sizeof(*sym)); + if (!sym) { + perror("malloc"); + return NULL; + } + + sym->name = sec->name; + sym->sec = sec; + + // st_name 0 + sym->sym.st_info = GELF_ST_INFO(STB_LOCAL, STT_SECTION); + // st_other 0 + // st_value 0 + // st_size 0 + + /* + * Move the first global symbol, as per sh_info, into a new, higher + * symbol index. This fees up a spot for a new local symbol. + */ + first_non_local = symtab->sh.sh_info; + new_idx = symtab->sh.sh_size / symtab->sh.sh_entsize; + old = find_symbol_by_index(elf, first_non_local); + if (old) { + old->idx = new_idx; + + hlist_del(&old->hash); + elf_hash_add(elf->symbol_hash, &old->hash, old->idx); + + elf_dirty_reloc_sym(elf, old); + + if (elf_update_symbol(elf, symtab, symtab_shndx, old)) { + WARN("elf_update_symbol move"); + return NULL; + } + + new_idx = first_non_local; + } + + sym->idx = new_idx; + if (elf_update_symbol(elf, symtab, symtab_shndx, sym)) { + WARN("elf_update_symbol"); + return NULL; + } + + /* + * Either way, we added a LOCAL symbol. + */ + symtab->sh.sh_info += 1; + + elf_add_symbol(elf, sym); + + return sym; +} + +int elf_add_reloc_to_insn(struct elf *elf, struct section *sec, + unsigned long offset, unsigned int type, + struct section *insn_sec, unsigned long insn_off) +{ + struct symbol *sym = insn_sec->sym; + int addend = insn_off; + + if (!sym) { + /* + * Due to how weak functions work, we must use section based + * relocations. Symbol based relocations would result in the + * weak and non-weak function annotations being overlaid on the + * non-weak function after linking. + */ + sym = elf_create_section_symbol(elf, insn_sec); + if (!sym) + return -1; + + insn_sec->sym = sym; + } + + return elf_add_reloc(elf, sec, offset, type, sym, addend); } static int read_rel_reloc(struct section *sec, int i, struct reloc *reloc, unsigned int *symndx) @@ -609,7 +858,9 @@ static int read_relocs(struct elf *elf) return -1; } - elf_add_reloc(elf, reloc); + list_add_tail(&reloc->list, &sec->reloc_list); + elf_hash_add(elf->reloc_hash, &reloc->hash, reloc_hash(reloc)); + nr_reloc++; } max_reloc = max(max_reloc, nr_reloc); @@ -687,13 +938,49 @@ err: return NULL; } +static int elf_add_string(struct elf *elf, struct section *strtab, char *str) +{ + Elf_Data *data; + Elf_Scn *s; + int len; + + if (!strtab) + strtab = find_section_by_name(elf, ".strtab"); + if (!strtab) { + WARN("can't find .strtab section"); + return -1; + } + + s = elf_getscn(elf->elf, strtab->idx); + if (!s) { + WARN_ELF("elf_getscn"); + return -1; + } + + data = elf_newdata(s); + if (!data) { + WARN_ELF("elf_newdata"); + return -1; + } + + data->d_buf = str; + data->d_size = strlen(str) + 1; + data->d_align = 1; + data->d_type = ELF_T_SYM; + + len = strtab->len; + strtab->len += data->d_size; + strtab->changed = true; + + return len; +} + struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr) { struct section *sec, *shstrtab; size_t size = entsize * nr; Elf_Scn *s; - Elf_Data *data; sec = malloc(sizeof(*sec)); if (!sec) { @@ -750,7 +1037,6 @@ struct section *elf_create_section(struct elf *elf, const char *name, sec->sh.sh_addralign = 1; sec->sh.sh_flags = SHF_ALLOC | sh_flags; - /* Add section name to .shstrtab (or .strtab for Clang) */ shstrtab = find_section_by_name(elf, ".shstrtab"); if (!shstrtab) @@ -759,27 +1045,9 @@ struct section *elf_create_section(struct elf *elf, const char *name, WARN("can't find .shstrtab or .strtab section"); return NULL; } - - s = elf_getscn(elf->elf, shstrtab->idx); - if (!s) { - WARN_ELF("elf_getscn"); + sec->sh.sh_name = elf_add_string(elf, shstrtab, sec->name); + if (sec->sh.sh_name == -1) return NULL; - } - - data = elf_newdata(s); - if (!data) { - WARN_ELF("elf_newdata"); - return NULL; - } - - data->d_buf = sec->name; - data->d_size = strlen(name) + 1; - data->d_align = 1; - - sec->sh.sh_name = shstrtab->len; - - shstrtab->len += strlen(name) + 1; - shstrtab->changed = true; list_add_tail(&sec->list, &elf->sections); elf_hash_add(elf->section_hash, &sec->hash, sec->idx); @@ -850,7 +1118,7 @@ static struct section *elf_create_rela_reloc_section(struct elf *elf, struct sec return sec; } -struct section *elf_create_reloc_section(struct elf *elf, +static struct section *elf_create_reloc_section(struct elf *elf, struct section *base, int reltype) { @@ -920,14 +1188,11 @@ static int elf_rebuild_rela_reloc_section(struct section *sec, int nr) return 0; } -int elf_rebuild_reloc_section(struct elf *elf, struct section *sec) +static int elf_rebuild_reloc_section(struct elf *elf, struct section *sec) { struct reloc *reloc; int nr; - sec->changed = true; - elf->changed = true; - nr = 0; list_for_each_entry(reloc, &sec->reloc_list, list) nr++; @@ -991,9 +1256,15 @@ int elf_write(struct elf *elf) struct section *sec; Elf_Scn *s; - /* Update section headers for changed sections: */ + /* Update changed relocation sections and section headers: */ list_for_each_entry(sec, &elf->sections, list) { if (sec->changed) { + if (sec->base && + elf_rebuild_reloc_section(elf, sec)) { + WARN("elf_rebuild_reloc_section"); + return -1; + } + s = elf_getscn(elf->elf, sec->idx); if (!s) { WARN_ELF("elf_getscn"); @@ -1005,6 +1276,7 @@ int elf_write(struct elf *elf) } sec->changed = false; + elf->changed = true; } } diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index e6890cc70a25..a1863eb35fbb 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -55,8 +55,12 @@ struct symbol { unsigned long offset; unsigned int len; struct symbol *pfunc, *cfunc, *alias; - bool uaccess_safe; - bool static_call_tramp; + u8 uaccess_safe : 1; + u8 static_call_tramp : 1; + u8 retpoline_thunk : 1; + u8 return_thunk : 1; + u8 fentry : 1; + u8 kcov : 1; }; struct reloc { @@ -70,7 +74,7 @@ struct reloc { struct symbol *sym; unsigned long offset; unsigned int type; - int addend; + s64 addend; int idx; bool jump_table_start; }; @@ -122,8 +126,13 @@ static inline u32 reloc_hash(struct reloc *reloc) struct elf *elf_open_read(const char *name, int flags); struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr); -struct section *elf_create_reloc_section(struct elf *elf, struct section *base, int reltype); -void elf_add_reloc(struct elf *elf, struct reloc *reloc); + +int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset, + unsigned int type, struct symbol *sym, s64 addend); +int elf_add_reloc_to_insn(struct elf *elf, struct section *sec, + unsigned long offset, unsigned int type, + struct section *insn_sec, unsigned long insn_off); + int elf_write_insn(struct elf *elf, struct section *sec, unsigned long offset, unsigned int len, const char *insn); @@ -140,9 +149,6 @@ struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, uns struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *sec, unsigned long offset, unsigned int len); struct symbol *find_func_containing(struct section *sec, unsigned long offset); -void insn_to_reloc_sym_addend(struct section *sec, unsigned long offset, - struct reloc *reloc); -int elf_rebuild_reloc_section(struct elf *elf, struct section *sec); #define for_each_sec(file, sec) \ list_for_each_entry(sec, &file->elf->sections, list) diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c index c1819a6f2a18..f69035522693 100644 --- a/tools/objtool/objtool.c +++ b/tools/objtool/objtool.c @@ -61,6 +61,8 @@ struct objtool_file *objtool_open_read(const char *_objname) INIT_LIST_HEAD(&file.insn_list); hash_init(file.insn_hash); + INIT_LIST_HEAD(&file.retpoline_call_list); + INIT_LIST_HEAD(&file.return_thunk_list); INIT_LIST_HEAD(&file.static_call_list); INIT_LIST_HEAD(&file.mcount_loc_list); file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment"); diff --git a/tools/objtool/objtool.h b/tools/objtool/objtool.h index cf004dd60c2b..efd1a66158e7 100644 --- a/tools/objtool/objtool.h +++ b/tools/objtool/objtool.h @@ -18,6 +18,8 @@ struct objtool_file { struct elf *elf; struct list_head insn_list; DECLARE_HASHTABLE(insn_hash, 20); + struct list_head retpoline_call_list; + struct list_head return_thunk_list; struct list_head static_call_list; struct list_head mcount_loc_list; bool ignore_unreachables, c_file, hints, rodata; @@ -27,7 +29,6 @@ struct objtool_file *objtool_open_read(const char *_objname); int check(struct objtool_file *file); int orc_dump(const char *objname); -int create_orc(struct objtool_file *file); -int create_orc_sections(struct objtool_file *file); +int orc_create(struct objtool_file *file); #endif /* _OBJTOOL_H */ diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index 9ce68b385a1b..812b33ed9f65 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -12,205 +12,231 @@ #include "check.h" #include "warn.h" -int create_orc(struct objtool_file *file) +static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, + struct instruction *insn) { - struct instruction *insn; + struct cfi_reg *bp = &cfi->regs[CFI_BP]; - for_each_insn(file, insn) { - struct orc_entry *orc = &insn->orc; - struct cfi_reg *cfa = &insn->cfi.cfa; - struct cfi_reg *bp = &insn->cfi.regs[CFI_BP]; + memset(orc, 0, sizeof(*orc)); - if (!insn->sec->text) - continue; - - orc->end = insn->cfi.end; - - if (cfa->base == CFI_UNDEFINED) { - orc->sp_reg = ORC_REG_UNDEFINED; - continue; - } - - switch (cfa->base) { - case CFI_SP: - orc->sp_reg = ORC_REG_SP; - break; - case CFI_SP_INDIRECT: - orc->sp_reg = ORC_REG_SP_INDIRECT; - break; - case CFI_BP: - orc->sp_reg = ORC_REG_BP; - break; - case CFI_BP_INDIRECT: - orc->sp_reg = ORC_REG_BP_INDIRECT; - break; - case CFI_R10: - orc->sp_reg = ORC_REG_R10; - break; - case CFI_R13: - orc->sp_reg = ORC_REG_R13; - break; - case CFI_DI: - orc->sp_reg = ORC_REG_DI; - break; - case CFI_DX: - orc->sp_reg = ORC_REG_DX; - break; - default: - WARN_FUNC("unknown CFA base reg %d", - insn->sec, insn->offset, cfa->base); - return -1; - } - - switch(bp->base) { - case CFI_UNDEFINED: - orc->bp_reg = ORC_REG_UNDEFINED; - break; - case CFI_CFA: - orc->bp_reg = ORC_REG_PREV_SP; - break; - case CFI_BP: - orc->bp_reg = ORC_REG_BP; - break; - default: - WARN_FUNC("unknown BP base reg %d", - insn->sec, insn->offset, bp->base); - return -1; - } - - orc->sp_offset = cfa->offset; - orc->bp_offset = bp->offset; - orc->type = insn->cfi.type; + if (!cfi) { + orc->end = 0; + orc->sp_reg = ORC_REG_UNDEFINED; + return 0; } + orc->end = cfi->end; + + if (cfi->cfa.base == CFI_UNDEFINED) { + orc->sp_reg = ORC_REG_UNDEFINED; + return 0; + } + + switch (cfi->cfa.base) { + case CFI_SP: + orc->sp_reg = ORC_REG_SP; + break; + case CFI_SP_INDIRECT: + orc->sp_reg = ORC_REG_SP_INDIRECT; + break; + case CFI_BP: + orc->sp_reg = ORC_REG_BP; + break; + case CFI_BP_INDIRECT: + orc->sp_reg = ORC_REG_BP_INDIRECT; + break; + case CFI_R10: + orc->sp_reg = ORC_REG_R10; + break; + case CFI_R13: + orc->sp_reg = ORC_REG_R13; + break; + case CFI_DI: + orc->sp_reg = ORC_REG_DI; + break; + case CFI_DX: + orc->sp_reg = ORC_REG_DX; + break; + default: + WARN_FUNC("unknown CFA base reg %d", + insn->sec, insn->offset, cfi->cfa.base); + return -1; + } + + switch (bp->base) { + case CFI_UNDEFINED: + orc->bp_reg = ORC_REG_UNDEFINED; + break; + case CFI_CFA: + orc->bp_reg = ORC_REG_PREV_SP; + break; + case CFI_BP: + orc->bp_reg = ORC_REG_BP; + break; + default: + WARN_FUNC("unknown BP base reg %d", + insn->sec, insn->offset, bp->base); + return -1; + } + + orc->sp_offset = cfi->cfa.offset; + orc->bp_offset = bp->offset; + orc->type = cfi->type; + return 0; } -static int create_orc_entry(struct elf *elf, struct section *u_sec, struct section *ip_relocsec, - unsigned int idx, struct section *insn_sec, - unsigned long insn_off, struct orc_entry *o) +static int write_orc_entry(struct elf *elf, struct section *orc_sec, + struct section *ip_sec, unsigned int idx, + struct section *insn_sec, unsigned long insn_off, + struct orc_entry *o) { struct orc_entry *orc; - struct reloc *reloc; /* populate ORC data */ - orc = (struct orc_entry *)u_sec->data->d_buf + idx; + orc = (struct orc_entry *)orc_sec->data->d_buf + idx; memcpy(orc, o, sizeof(*orc)); /* populate reloc for ip */ - reloc = malloc(sizeof(*reloc)); - if (!reloc) { - perror("malloc"); + if (elf_add_reloc_to_insn(elf, ip_sec, idx * sizeof(int), R_X86_64_PC32, + insn_sec, insn_off)) return -1; - } - memset(reloc, 0, sizeof(*reloc)); - - insn_to_reloc_sym_addend(insn_sec, insn_off, reloc); - if (!reloc->sym) { - WARN("missing symbol for insn at offset 0x%lx", - insn_off); - return -1; - } - - reloc->type = R_X86_64_PC32; - reloc->offset = idx * sizeof(int); - reloc->sec = ip_relocsec; - - elf_add_reloc(elf, reloc); return 0; } -int create_orc_sections(struct objtool_file *file) -{ - struct instruction *insn, *prev_insn; - struct section *sec, *u_sec, *ip_relocsec; - unsigned int idx; +struct orc_list_entry { + struct list_head list; + struct orc_entry orc; + struct section *insn_sec; + unsigned long insn_off; +}; - struct orc_entry empty = { - .sp_reg = ORC_REG_UNDEFINED, +static int orc_list_add(struct list_head *orc_list, struct orc_entry *orc, + struct section *sec, unsigned long offset) +{ + struct orc_list_entry *entry = malloc(sizeof(*entry)); + + if (!entry) { + WARN("malloc failed"); + return -1; + } + + entry->orc = *orc; + entry->insn_sec = sec; + entry->insn_off = offset; + + list_add_tail(&entry->list, orc_list); + return 0; +} + +static unsigned long alt_group_len(struct alt_group *alt_group) +{ + return alt_group->last_insn->offset + + alt_group->last_insn->len - + alt_group->first_insn->offset; +} + +int orc_create(struct objtool_file *file) +{ + struct section *sec, *orc_sec; + unsigned int nr = 0, idx = 0; + struct orc_list_entry *entry; + struct list_head orc_list; + + struct orc_entry null = { + .sp_reg = ORC_REG_UNDEFINED, .bp_reg = ORC_REG_UNDEFINED, .type = UNWIND_HINT_TYPE_CALL, }; + /* Build a deduplicated list of ORC entries: */ + INIT_LIST_HEAD(&orc_list); + for_each_sec(file, sec) { + struct orc_entry orc, prev_orc = {0}; + struct instruction *insn; + bool empty = true; + + if (!sec->text) + continue; + + sec_for_each_insn(file, sec, insn) { + struct alt_group *alt_group = insn->alt_group; + int i; + + if (!alt_group) { + if (init_orc_entry(&orc, insn->cfi, insn)) + return -1; + if (!memcmp(&prev_orc, &orc, sizeof(orc))) + continue; + if (orc_list_add(&orc_list, &orc, sec, + insn->offset)) + return -1; + nr++; + prev_orc = orc; + empty = false; + continue; + } + + /* + * Alternatives can have different stack layout + * possibilities (but they shouldn't conflict). + * Instead of traversing the instructions, use the + * alt_group's flattened byte-offset-addressed CFI + * array. + */ + for (i = 0; i < alt_group_len(alt_group); i++) { + struct cfi_state *cfi = alt_group->cfi[i]; + if (!cfi) + continue; + /* errors are reported on the original insn */ + if (init_orc_entry(&orc, cfi, insn)) + return -1; + if (!memcmp(&prev_orc, &orc, sizeof(orc))) + continue; + if (orc_list_add(&orc_list, &orc, insn->sec, + insn->offset + i)) + return -1; + nr++; + prev_orc = orc; + empty = false; + } + + /* Skip to the end of the alt_group */ + insn = alt_group->last_insn; + } + + /* Add a section terminator */ + if (!empty) { + orc_list_add(&orc_list, &null, sec, sec->len); + nr++; + } + } + if (!nr) + return 0; + + /* Create .orc_unwind, .orc_unwind_ip and .rela.orc_unwind_ip sections: */ sec = find_section_by_name(file->elf, ".orc_unwind"); if (sec) { WARN("file already has .orc_unwind section, skipping"); return -1; } - - /* count the number of needed orcs */ - idx = 0; - for_each_sec(file, sec) { - if (!sec->text) - continue; - - prev_insn = NULL; - sec_for_each_insn(file, sec, insn) { - if (!prev_insn || - memcmp(&insn->orc, &prev_insn->orc, - sizeof(struct orc_entry))) { - idx++; - } - prev_insn = insn; - } - - /* section terminator */ - if (prev_insn) - idx++; - } - if (!idx) + orc_sec = elf_create_section(file->elf, ".orc_unwind", 0, + sizeof(struct orc_entry), nr); + if (!orc_sec) return -1; - - /* create .orc_unwind_ip and .rela.orc_unwind_ip sections */ - sec = elf_create_section(file->elf, ".orc_unwind_ip", 0, sizeof(int), idx); + sec = elf_create_section(file->elf, ".orc_unwind_ip", 0, sizeof(int), nr); if (!sec) return -1; - ip_relocsec = elf_create_reloc_section(file->elf, sec, SHT_RELA); - if (!ip_relocsec) - return -1; - - /* create .orc_unwind section */ - u_sec = elf_create_section(file->elf, ".orc_unwind", 0, - sizeof(struct orc_entry), idx); - - /* populate sections */ - idx = 0; - for_each_sec(file, sec) { - if (!sec->text) - continue; - - prev_insn = NULL; - sec_for_each_insn(file, sec, insn) { - if (!prev_insn || memcmp(&insn->orc, &prev_insn->orc, - sizeof(struct orc_entry))) { - - if (create_orc_entry(file->elf, u_sec, ip_relocsec, idx, - insn->sec, insn->offset, - &insn->orc)) - return -1; - - idx++; - } - prev_insn = insn; - } - - /* section terminator */ - if (prev_insn) { - if (create_orc_entry(file->elf, u_sec, ip_relocsec, idx, - prev_insn->sec, - prev_insn->offset + prev_insn->len, - &empty)) - return -1; - - idx++; - } + /* Write ORC entries to sections: */ + list_for_each_entry(entry, &orc_list, list) { + if (write_orc_entry(file->elf, orc_sec, sec, idx++, + entry->insn_sec, entry->insn_off, + &entry->orc)) + return -1; } - if (elf_rebuild_reloc_section(file->elf, ip_relocsec)) - return -1; - return 0; } diff --git a/tools/objtool/special.c b/tools/objtool/special.c index 1a2420febd08..aff0cee7bac1 100644 --- a/tools/objtool/special.c +++ b/tools/objtool/special.c @@ -55,6 +55,13 @@ void __weak arch_handle_alternative(unsigned short feature, struct special_alt * { } +static void reloc_to_sec_off(struct reloc *reloc, struct section **sec, + unsigned long *off) +{ + *sec = reloc->sym->sec; + *off = reloc->sym->offset + reloc->addend; +} + static int get_alt_entry(struct elf *elf, struct special_entry *entry, struct section *sec, int idx, struct special_alt *alt) @@ -87,14 +94,8 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry, WARN_FUNC("can't find orig reloc", sec, offset + entry->orig); return -1; } - if (orig_reloc->sym->type != STT_SECTION) { - WARN_FUNC("don't know how to handle non-section reloc symbol %s", - sec, offset + entry->orig, orig_reloc->sym->name); - return -1; - } - alt->orig_sec = orig_reloc->sym->sec; - alt->orig_off = orig_reloc->addend; + reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off); if (!entry->group || alt->new_len) { new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new); @@ -104,8 +105,7 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry, return -1; } - alt->new_sec = new_reloc->sym->sec; - alt->new_off = (unsigned int)new_reloc->addend; + reloc_to_sec_off(new_reloc, &alt->new_sec, &alt->new_off); /* _ASM_EXTABLE_EX hack */ if (alt->new_off >= 0x7ffffff0) @@ -152,7 +152,9 @@ int special_get_alts(struct elf *elf, struct list_head *alts) memset(alt, 0, sizeof(*alt)); ret = get_alt_entry(elf, entry, sec, idx, alt); - if (ret) + if (ret > 0) + continue; + if (ret < 0) return ret; list_add_tail(&alt->list, alts); diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh index 606a4b5e929f..4bbabaecab14 100755 --- a/tools/objtool/sync-check.sh +++ b/tools/objtool/sync-check.sh @@ -16,11 +16,14 @@ arch/x86/include/asm/emulate_prefix.h arch/x86/lib/x86-opcode-map.txt arch/x86/tools/gen-insn-attr-x86.awk include/linux/static_call_types.h -arch/x86/include/asm/inat.h -I '^#include [\"<]\(asm/\)*inat_types.h[\">]' -arch/x86/include/asm/insn.h -I '^#include [\"<]\(asm/\)*inat.h[\">]' -arch/x86/lib/inat.c -I '^#include [\"<]\(../include/\)*asm/insn.h[\">]' -arch/x86/lib/insn.c -I '^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]' -I '^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]' " + +SYNC_CHECK_FILES=' +arch/x86/include/asm/inat.h +arch/x86/include/asm/insn.h +arch/x86/lib/inat.c +arch/x86/lib/insn.c +' fi check_2 () { @@ -63,3 +66,9 @@ while read -r file_entry; do done </dev/null) FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) + ifeq ($(CC_NO_CLANG), 0) + PYTHON_EMBED_CCOPTS := $(filter-out -ffat-lto-objects, $(PYTHON_EMBED_CCOPTS)) + endif endif FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS) @@ -760,6 +778,9 @@ else LDFLAGS += $(PERL_EMBED_LDFLAGS) EXTLIBS += $(PERL_EMBED_LIBADD) CFLAGS += -DHAVE_LIBPERL_SUPPORT + ifeq ($(CC_NO_CLANG), 0) + CFLAGS += -Wno-compound-token-split-by-macro + endif $(call detected,CONFIG_LIBPERL) endif endif diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build index b53294d74b01..eddaf9bf5729 100644 --- a/tools/perf/arch/arm64/util/Build +++ b/tools/perf/arch/arm64/util/Build @@ -1,5 +1,4 @@ perf-y += header.o -perf-y += machine.o perf-y += perf_regs.o perf-y += tsc.o perf-$(CONFIG_DWARF) += dwarf-regs.o diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c index e3593063b3d1..37765e2bd9dd 100644 --- a/tools/perf/arch/arm64/util/arm-spe.c +++ b/tools/perf/arch/arm64/util/arm-spe.c @@ -124,6 +124,12 @@ static int arm_spe_recording_options(struct auxtrace_record *itr, evsel__set_sample_bit(arm_spe_evsel, TIME); evsel__set_sample_bit(arm_spe_evsel, TID); + /* + * Set this only so that perf report knows that SPE generates memory info. It has no effect + * on the opening of the event or the SPE data produced. + */ + evsel__set_sample_bit(arm_spe_evsel, DATA_SRC); + /* Add dummy event to keep tracking */ err = parse_events(evlist, "dummy:u", NULL); if (err) diff --git a/tools/perf/arch/arm64/util/machine.c b/tools/perf/arch/arm64/util/machine.c deleted file mode 100644 index d41b27e781d3..000000000000 --- a/tools/perf/arch/arm64/util/machine.c +++ /dev/null @@ -1,27 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include -#include -#include "debug.h" -#include "symbol.h" - -/* On arm64, kernel text segment start at high memory address, - * for example 0xffff 0000 8xxx xxxx. Modules start at a low memory - * address, like 0xffff 0000 00ax xxxx. When only samll amount of - * memory is used by modules, gap between end of module's text segment - * and start of kernel text segment may be reach 2G. - * Therefore do not fill this gap and do not assign it to the kernel dso map. - */ - -#define SYMBOL_LIMIT (1 << 12) /* 4K */ - -void arch__symbols__fixup_end(struct symbol *p, struct symbol *c) -{ - if ((strchr(p->name, '[') && strchr(c->name, '[') == NULL) || - (strchr(p->name, '[') == NULL && strchr(c->name, '['))) - /* Limit range of last symbol in module and kernel */ - p->end += SYMBOL_LIMIT; - else - p->end = c->start; - pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end); -} diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c index 724efb2d842d..7219ecdb8423 100644 --- a/tools/perf/arch/s390/util/machine.c +++ b/tools/perf/arch/s390/util/machine.c @@ -34,19 +34,3 @@ int arch__fix_module_text_start(u64 *start, u64 *size, const char *name) return 0; } - -/* On s390 kernel text segment start is located at very low memory addresses, - * for example 0x10000. Modules are located at very high memory addresses, - * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment - * and beginning of first module's text segment is very big. - * Therefore do not fill this gap and do not assign it to the kernel dso map. - */ -void arch__symbols__fixup_end(struct symbol *p, struct symbol *c) -{ - if (strchr(p->name, '[') == NULL && strchr(c->name, '[')) - /* Last kernel symbol mapped to end of page */ - p->end = roundup(p->end, page_size); - else - p->end = c->start; - pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end); -} diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c index 11726ec6285f..88c11305bdd5 100644 --- a/tools/perf/bench/numa.c +++ b/tools/perf/bench/numa.c @@ -1656,7 +1656,7 @@ static int __bench_numa(const char *name) "GB/sec,", "total-speed", "GB/sec total speed"); if (g->p.show_details >= 2) { - char tname[14 + 2 * 10 + 1]; + char tname[14 + 2 * 11 + 1]; struct thread_data *td; for (p = 0; p < g->p.nr_proc; p++) { for (t = 0; t < g->p.nr_threads; t++) { diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c index d5bea5d3cd51..fb7d01f3961b 100644 --- a/tools/perf/builtin-c2c.c +++ b/tools/perf/builtin-c2c.c @@ -918,8 +918,8 @@ percent_rmt_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused, double per_left; double per_right; - per_left = PERCENT(left, lcl_hitm); - per_right = PERCENT(right, lcl_hitm); + per_left = PERCENT(left, rmt_hitm); + per_right = PERCENT(right, rmt_hitm); return per_left - per_right; } @@ -2694,9 +2694,7 @@ static int perf_c2c__report(int argc, const char **argv) "the input file to process"), OPT_INCR('N', "node-info", &c2c.node_info, "show extra node info in report (repeat for more info)"), -#ifdef HAVE_SLANG_SUPPORT OPT_BOOLEAN(0, "stdio", &c2c.use_stdio, "Use the stdio interface"), -#endif OPT_BOOLEAN(0, "stats", &c2c.stats_only, "Display only statistic tables (implies --stdio)"), OPT_BOOLEAN(0, "full-symbols", &c2c.symbol_full, @@ -2725,6 +2723,10 @@ static int perf_c2c__report(int argc, const char **argv) if (argc) usage_with_options(report_c2c_usage, options); +#ifndef HAVE_SLANG_SUPPORT + c2c.use_stdio = true; +#endif + if (c2c.stats_only) c2c.use_stdio = true; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 91cab5cdfbc1..b55ee073c2f7 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -340,6 +340,7 @@ static int report__setup_sample_type(struct report *rep) struct perf_session *session = rep->session; u64 sample_type = evlist__combined_sample_type(session->evlist); bool is_pipe = perf_data__is_pipe(session->data); + struct evsel *evsel; if (session->itrace_synth_opts->callchain || session->itrace_synth_opts->add_callchain || @@ -394,6 +395,19 @@ static int report__setup_sample_type(struct report *rep) } if (sort__mode == SORT_MODE__MEMORY) { + /* + * FIXUP: prior to kernel 5.18, Arm SPE missed to set + * PERF_SAMPLE_DATA_SRC bit in sample type. For backward + * compatibility, set the bit if it's an old perf data file. + */ + evlist__for_each_entry(session->evlist, evsel) { + if (strstr(evsel->name, "arm_spe") && + !(sample_type & PERF_SAMPLE_DATA_SRC)) { + evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC; + sample_type |= PERF_SAMPLE_DATA_SRC; + } + } + if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) { ui__error("Selected --mem-mode but no mem data. " "Did you call perf record without -d?\n"); diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh index 15ecb1803fb9..9f085aa09c97 100755 --- a/tools/perf/check-headers.sh +++ b/tools/perf/check-headers.sh @@ -75,6 +75,13 @@ include/uapi/asm-generic/mman-common.h include/uapi/asm-generic/unistd.h ' +SYNC_CHECK_FILES=' +arch/x86/include/asm/inat.h +arch/x86/include/asm/insn.h +arch/x86/lib/inat.c +arch/x86/lib/insn.c +' + # These copies are under tools/perf/trace/beauty/ as they are not used to in # building object files only by scripts in tools/perf/trace/beauty/ to generate # tables that then gets included in .c files for things like id->string syscall @@ -129,6 +136,10 @@ for i in $FILES; do check $i -B done +for i in $SYNC_CHECK_FILES; do + check $i '-I "^.*\/\*.*__ignore_sync_check__.*\*\/.*$"' +done + # diff with extra ignore lines check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include " -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))"' check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include " -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"' @@ -137,10 +148,6 @@ check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"' check include/linux/build_bug.h '-I "^#\(ifndef\|endif\)\( \/\/\)* static_assert$"' check include/linux/ctype.h '-I "isdigit("' check lib/ctype.c '-I "^EXPORT_SYMBOL" -I "^#include " -B' -check arch/x86/include/asm/inat.h '-I "^#include [\"<]\(asm/\)*inat_types.h[\">]"' -check arch/x86/include/asm/insn.h '-I "^#include [\"<]\(asm/\)*inat.h[\">]"' -check arch/x86/lib/inat.c '-I "^#include [\"<]\(../include/\)*asm/insn.h[\">]"' -check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]" -I "^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]"' # diff non-symmetric files check_2 tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscalls/syscall_64.tbl diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 27f94b0bb874..505e2a2f1872 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -433,7 +433,7 @@ void pthread__unblock_sigwinch(void) static int libperf_print(enum libperf_print_level level, const char *fmt, va_list ap) { - return eprintf(level, verbose, fmt, ap); + return veprintf(level, verbose, fmt, ap); } int main(int argc, const char **argv) diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c index c679a79aef51..1f20f587e053 100644 --- a/tools/perf/pmu-events/jevents.c +++ b/tools/perf/pmu-events/jevents.c @@ -579,7 +579,7 @@ static int json_events(const char *fn, } else if (json_streq(map, field, "ExtSel")) { char *code = NULL; addfield(map, &code, "", "", val); - eventcode |= strtoul(code, NULL, 0) << 21; + eventcode |= strtoul(code, NULL, 0) << 8; free(code); } else if (json_streq(map, field, "EventName")) { addfield(map, &je.name, "", "", val); diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index d3c15b53495d..d96e86ddd2c5 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -2164,11 +2164,19 @@ struct sym_args { bool near; }; +static bool kern_sym_name_match(const char *kname, const char *name) +{ + size_t n = strlen(name); + + return !strcmp(kname, name) || + (!strncmp(kname, name, n) && kname[n] == '\t'); +} + static bool kern_sym_match(struct sym_args *args, const char *name, char type) { /* A function with the same name, and global or the n'th found or any */ return kallsyms__is_function(type) && - !strcmp(name, args->name) && + kern_sym_name_match(name, args->name) && ((args->global && isupper(type)) || (args->selected && ++(args->cnt) == args->idx) || (!args->global && !args->selected)); diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h index 75947ef6bc17..5b52ffedf0d5 100644 --- a/tools/perf/util/data.h +++ b/tools/perf/util/data.h @@ -3,6 +3,7 @@ #define __PERF_DATA_H #include +#include enum perf_data_mode { PERF_DATA_MODE_WRITE, diff --git a/tools/perf/util/dsos.c b/tools/perf/util/dsos.c index 183a81d5b2f9..2db91121bdaf 100644 --- a/tools/perf/util/dsos.c +++ b/tools/perf/util/dsos.c @@ -20,8 +20,19 @@ static int __dso_id__cmp(struct dso_id *a, struct dso_id *b) if (a->ino > b->ino) return -1; if (a->ino < b->ino) return 1; - if (a->ino_generation > b->ino_generation) return -1; - if (a->ino_generation < b->ino_generation) return 1; + /* + * Synthesized MMAP events have zero ino_generation, avoid comparing + * them with MMAP events with actual ino_generation. + * + * I found it harmful because the mismatch resulted in a new + * dso that did not have a build ID whereas the original dso did have a + * build ID. The build ID was essential because the object was not found + * otherwise. - Adrian + */ + if (a->ino_generation && b->ino_generation) { + if (a->ino_generation > b->ino_generation) return -1; + if (a->ino_generation < b->ino_generation) return 1; + } return 0; } diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c index aed49806a09b..02cd9f75e3d2 100644 --- a/tools/perf/util/genelf.c +++ b/tools/perf/util/genelf.c @@ -30,7 +30,11 @@ #define BUILD_ID_URANDOM /* different uuid for each run */ -#ifdef HAVE_LIBCRYPTO +// FIXME, remove this and fix the deprecation warnings before its removed and +// We'll break for good here... +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + +#ifdef HAVE_LIBCRYPTO_SUPPORT #define BUILD_ID_MD5 #undef BUILD_ID_SHA /* does not seem to work well when linked with Java */ @@ -247,6 +251,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym, Elf_Data *d; Elf_Scn *scn; Elf_Ehdr *ehdr; + Elf_Phdr *phdr; Elf_Shdr *shdr; uint64_t eh_frame_base_offset; char *strsym = NULL; @@ -281,6 +286,19 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym, ehdr->e_version = EV_CURRENT; ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */ + /* + * setup program header + */ + phdr = elf_newphdr(e, 1); + phdr[0].p_type = PT_LOAD; + phdr[0].p_offset = 0; + phdr[0].p_vaddr = 0; + phdr[0].p_paddr = 0; + phdr[0].p_filesz = csize; + phdr[0].p_memsz = csize; + phdr[0].p_flags = PF_X | PF_R; + phdr[0].p_align = 8; + /* * setup text section */ diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h index d4137559be05..ac638945b4cb 100644 --- a/tools/perf/util/genelf.h +++ b/tools/perf/util/genelf.h @@ -50,8 +50,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent #if GEN_ELF_CLASS == ELFCLASS64 #define elf_newehdr elf64_newehdr +#define elf_newphdr elf64_newphdr #define elf_getshdr elf64_getshdr #define Elf_Ehdr Elf64_Ehdr +#define Elf_Phdr Elf64_Phdr #define Elf_Shdr Elf64_Shdr #define Elf_Sym Elf64_Sym #define ELF_ST_TYPE(a) ELF64_ST_TYPE(a) @@ -59,8 +61,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent #define ELF_ST_VIS(a) ELF64_ST_VISIBILITY(a) #else #define elf_newehdr elf32_newehdr +#define elf_newphdr elf32_newphdr #define elf_getshdr elf32_getshdr #define Elf_Ehdr Elf32_Ehdr +#define Elf_Phdr Elf32_Phdr #define Elf_Shdr Elf32_Shdr #define Elf_Sym Elf32_Sym #define ELF_ST_TYPE(a) ELF32_ST_TYPE(a) diff --git a/tools/perf/util/get_current_dir_name.c b/tools/perf/util/get_current_dir_name.c index b205d929245f..e68935e9ac8c 100644 --- a/tools/perf/util/get_current_dir_name.c +++ b/tools/perf/util/get_current_dir_name.c @@ -3,8 +3,9 @@ // #ifndef HAVE_GET_CURRENT_DIR_NAME #include "get_current_dir_name.h" +#include +#include #include -#include /* Android's 'bionic' library, for one, doesn't have this */ diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index 5163d2ffea70..453773ce6f45 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -3279,6 +3279,7 @@ static const char * const intel_pt_info_fmts[] = { [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n", [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n", [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n", + [INTEL_PT_MTC_FREQ_BITS] = " MTC freq bits %#"PRIx64"\n", [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n", [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n", [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n", @@ -3293,8 +3294,12 @@ static void intel_pt_print_info(__u64 *arr, int start, int finish) if (!dump_trace) return; - for (i = start; i <= finish; i++) - fprintf(stdout, intel_pt_info_fmts[i], arr[i]); + for (i = start; i <= finish; i++) { + const char *fmt = intel_pt_info_fmts[i]; + + if (fmt) + fprintf(stdout, fmt, arr[i]); + } } static void intel_pt_print_info_str(const char *name, const char *str) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 3b273580fb84..c56a4d9c3be9 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -356,6 +356,12 @@ __add_event(struct list_head *list, int *idx, struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) : cpu_list ? perf_cpu_map__new(cpu_list) : NULL; + if (pmu) + perf_pmu__warn_invalid_formats(pmu); + + if (pmu && attr->type == PERF_TYPE_RAW) + perf_pmu__warn_invalid_config(pmu, attr->config, name); + if (init_attr) event_attr_init(attr); @@ -1442,7 +1448,9 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, bool use_uncore_alias; LIST_HEAD(config_terms); - if (verbose > 1) { + pmu = parse_state->fake_pmu ?: perf_pmu__find(name); + + if (verbose > 1 && !(pmu && pmu->selectable)) { fprintf(stderr, "Attempting to add event pmu '%s' with '", name); if (head_config) { @@ -1455,7 +1463,6 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, fprintf(stderr, "' that may result in non-fatal errors\n"); } - pmu = parse_state->fake_pmu ?: perf_pmu__find(name); if (!pmu) { char *err_str; diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index d41caeb35cf6..ac45da0302a7 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -862,6 +862,23 @@ static struct perf_pmu *pmu_lookup(const char *name) return pmu; } +void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu) +{ + struct perf_pmu_format *format; + + /* fake pmu doesn't have format list */ + if (pmu == &perf_pmu__fake) + return; + + list_for_each_entry(format, &pmu->format, list) + if (format->value >= PERF_PMU_FORMAT_VALUE_CONFIG_END) { + pr_warning("WARNING: '%s' format '%s' requires 'perf_event_attr::config%d'" + "which is not supported by this version of perf!\n", + pmu->name, format->name, format->value); + return; + } +} + static struct perf_pmu *pmu_find(const char *name) { struct perf_pmu *pmu; @@ -1716,3 +1733,36 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu) return nr_caps; } + +void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config, + char *name) +{ + struct perf_pmu_format *format; + __u64 masks = 0, bits; + char buf[100]; + unsigned int i; + + list_for_each_entry(format, &pmu->format, list) { + if (format->value != PERF_PMU_FORMAT_VALUE_CONFIG) + continue; + + for_each_set_bit(i, format->bits, PERF_PMU_FORMAT_BITS) + masks |= 1ULL << i; + } + + /* + * Kernel doesn't export any valid format bits. + */ + if (masks == 0) + return; + + bits = config & ~masks; + if (bits == 0) + return; + + bitmap_scnprintf((unsigned long *)&bits, sizeof(bits) * 8, buf, sizeof(buf)); + + pr_warning("WARNING: event '%s' not valid (bits %s of config " + "'%llx' not supported by kernel)!\n", + name ?: "N/A", buf, config); +} diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index a64e9c9ce731..7d208b850769 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -15,6 +15,7 @@ enum { PERF_PMU_FORMAT_VALUE_CONFIG, PERF_PMU_FORMAT_VALUE_CONFIG1, PERF_PMU_FORMAT_VALUE_CONFIG2, + PERF_PMU_FORMAT_VALUE_CONFIG_END, }; #define PERF_PMU_FORMAT_BITS 64 @@ -120,4 +121,8 @@ int perf_pmu__convert_scale(const char *scale, char **end, double *sval); int perf_pmu__caps_parse(struct perf_pmu *pmu); +void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config, + char *name); +void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu); + #endif /* __PMU_H */ diff --git a/tools/perf/util/pmu.l b/tools/perf/util/pmu.l index a15d9fbd7c0e..58b4926cfaca 100644 --- a/tools/perf/util/pmu.l +++ b/tools/perf/util/pmu.l @@ -27,8 +27,6 @@ num_dec [0-9]+ {num_dec} { return value(10); } config { return PP_CONFIG; } -config1 { return PP_CONFIG1; } -config2 { return PP_CONFIG2; } - { return '-'; } : { return ':'; } , { return ','; } diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y index bfd7e8509869..283efe059819 100644 --- a/tools/perf/util/pmu.y +++ b/tools/perf/util/pmu.y @@ -20,7 +20,7 @@ do { \ %} -%token PP_CONFIG PP_CONFIG1 PP_CONFIG2 +%token PP_CONFIG %token PP_VALUE PP_ERROR %type PP_VALUE %type bit_term @@ -47,18 +47,11 @@ PP_CONFIG ':' bits $3)); } | -PP_CONFIG1 ':' bits +PP_CONFIG PP_VALUE ':' bits { ABORT_ON(perf_pmu__new_format(format, name, - PERF_PMU_FORMAT_VALUE_CONFIG1, - $3)); -} -| -PP_CONFIG2 ':' bits -{ - ABORT_ON(perf_pmu__new_format(format, name, - PERF_PMU_FORMAT_VALUE_CONFIG2, - $3)); + $2, + $4)); } bits: diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index d103084fcd56..97e2a72bd6f5 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -1760,8 +1760,10 @@ int parse_perf_probe_command(const char *cmd, struct perf_probe_event *pev) if (!pev->event && pev->point.function && pev->point.line && !pev->point.lazy_line && !pev->point.offset) { if (asprintf(&pev->event, "%s_L%d", pev->point.function, - pev->point.line) < 0) - return -ENOMEM; + pev->point.line) < 0) { + ret = -ENOMEM; + goto out; + } } /* Copy arguments and ensure return probe has no C argument */ diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 9dddec19a494..354e1e04a266 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -2056,6 +2056,7 @@ prefetch_event(char *buf, u64 head, size_t mmap_size, bool needs_swap, union perf_event *error) { union perf_event *event; + u16 event_size; /* * Ensure we have enough space remaining to read @@ -2068,15 +2069,23 @@ prefetch_event(char *buf, u64 head, size_t mmap_size, if (needs_swap) perf_event_header__bswap(&event->header); - if (head + event->header.size <= mmap_size) + event_size = event->header.size; + if (head + event_size <= mmap_size) return event; /* We're not fetching the event so swap back again */ if (needs_swap) perf_event_header__bswap(&event->header); - pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:" - " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size); + /* Check if the event fits into the next mmapped buf. */ + if (event_size <= mmap_size - head % page_size) { + /* Remap buf and fetch again. */ + return NULL; + } + + /* Invalid input. Event size should never exceed mmap_size. */ + pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:" + " fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size); return error; } diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py index c5e3e9a68162..b670469a8124 100644 --- a/tools/perf/util/setup.py +++ b/tools/perf/util/setup.py @@ -1,12 +1,14 @@ -from os import getenv +from os import getenv, path from subprocess import Popen, PIPE from re import sub cc = getenv("CC") cc_is_clang = b"clang version" in Popen([cc.split()[0], "-v"], stderr=PIPE).stderr.readline() +src_feature_tests = getenv('srctree') + '/tools/build/feature' def clang_has_option(option): - return [o for o in Popen([cc, option], stderr=PIPE).stderr.readlines() if b"unknown argument" in o] == [ ] + cc_output = Popen([cc, option, path.join(src_feature_tests, "test-hello.c") ], stderr=PIPE).stderr.readlines() + return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o))] == [ ] if cc_is_clang: from distutils.sysconfig import get_config_vars @@ -23,6 +25,8 @@ if cc_is_clang: vars[var] = sub("-fstack-protector-strong", "", vars[var]) if not clang_has_option("-fno-semantic-interposition"): vars[var] = sub("-fno-semantic-interposition", "", vars[var]) + if not clang_has_option("-ffat-lto-objects"): + vars[var] = sub("-ffat-lto-objects", "", vars[var]) from distutils.core import setup, Extension diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 96fe9c1af336..4688e39de52a 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -203,7 +203,7 @@ static void new_line_csv(struct perf_stat_config *config, void *ctx) fputc('\n', os->fh); if (os->prefix) - fprintf(os->fh, "%s%s", os->prefix, config->csv_sep); + fprintf(os->fh, "%s", os->prefix); aggr_printout(config, os->evsel, os->id, os->nr); for (i = 0; i < os->nfields; i++) fputs(config->csv_sep, os->fh); diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 7356eb398b32..3e423a920015 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -232,6 +232,33 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, return NULL; } +static int elf_read_program_header(Elf *elf, u64 vaddr, GElf_Phdr *phdr) +{ + size_t i, phdrnum; + u64 sz; + + if (elf_getphdrnum(elf, &phdrnum)) + return -1; + + for (i = 0; i < phdrnum; i++) { + if (gelf_getphdr(elf, i, phdr) == NULL) + return -1; + + if (phdr->p_type != PT_LOAD) + continue; + + sz = max(phdr->p_memsz, phdr->p_filesz); + if (!sz) + continue; + + if (vaddr >= phdr->p_vaddr && (vaddr < phdr->p_vaddr + sz)) + return 0; + } + + /* Not found any valid program header */ + return -1; +} + static bool want_demangle(bool is_kernel_sym) { return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle; @@ -1181,6 +1208,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, sym.st_value); used_opd = true; } + /* * When loading symbols in a data mapping, ABS symbols (which * has a value of SHN_ABS in its st_shndx) failed at @@ -1217,11 +1245,33 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, goto out_elf_end; } else if ((used_opd && runtime_ss->adjust_symbols) || (!used_opd && syms_ss->adjust_symbols)) { - pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " - "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__, - (u64)sym.st_value, (u64)shdr.sh_addr, - (u64)shdr.sh_offset); - sym.st_value -= shdr.sh_addr - shdr.sh_offset; + GElf_Phdr phdr; + + if (elf_read_program_header(syms_ss->elf, + (u64)sym.st_value, &phdr)) { + pr_debug4("%s: failed to find program header for " + "symbol: %s st_value: %#" PRIx64 "\n", + __func__, elf_name, (u64)sym.st_value); + pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " + "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", + __func__, (u64)sym.st_value, (u64)shdr.sh_addr, + (u64)shdr.sh_offset); + /* + * Fail to find program header, let's rollback + * to use shdr.sh_addr and shdr.sh_offset to + * calibrate symbol's file address, though this + * is not necessary for normal C ELF file, we + * still need to handle java JIT symbols in this + * case. + */ + sym.st_value -= shdr.sh_addr - shdr.sh_offset; + } else { + pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " + "p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n", + __func__, (u64)sym.st_value, (u64)phdr.p_vaddr, + (u64)phdr.p_offset); + sym.st_value -= phdr.p_vaddr - phdr.p_offset; + } } demangled = demangle_sym(dso, kmodule, elf_name); @@ -1245,7 +1295,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, * For misannotated, zeroed, ASM function sizes. */ if (nr > 0) { - symbols__fixup_end(&dso->symbols); + symbols__fixup_end(&dso->symbols, false); symbols__fixup_duplicate(&dso->symbols); if (kmap) { /* @@ -1952,8 +2002,8 @@ static int kcore_copy__compare_file(const char *from_dir, const char *to_dir, * unusual. One significant peculiarity is that the mapping (start -> pgoff) * is not the same for the kernel map and the modules map. That happens because * the data is copied adjacently whereas the original kcore has gaps. Finally, - * kallsyms and modules files are compared with their copies to check that - * modules have not been loaded or unloaded while the copies were taking place. + * kallsyms file is compared with its copy to check that modules have not been + * loaded or unloaded while the copies were taking place. * * Return: %0 on success, %-1 on failure. */ @@ -2016,9 +2066,6 @@ int kcore_copy(const char *from_dir, const char *to_dir) goto out_extract_close; } - if (kcore_copy__compare_file(from_dir, to_dir, "modules")) - goto out_extract_close; - if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms")) goto out_extract_close; diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 3609da7cce0a..33954835c823 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -101,11 +101,6 @@ static int prefix_underscores_count(const char *str) return tail - str; } -void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c) -{ - p->end = c->start; -} - const char * __weak arch__normalize_symbol_name(const char *name) { return name; @@ -217,7 +212,8 @@ again: } } -void symbols__fixup_end(struct rb_root_cached *symbols) +/* Update zero-sized symbols using the address of the next symbol */ +void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms) { struct rb_node *nd, *prevnd = rb_first_cached(symbols); struct symbol *curr, *prev; @@ -231,8 +227,29 @@ void symbols__fixup_end(struct rb_root_cached *symbols) prev = curr; curr = rb_entry(nd, struct symbol, rb_node); - if (prev->end == prev->start || prev->end != curr->start) - arch__symbols__fixup_end(prev, curr); + /* + * On some architecture kernel text segment start is located at + * some low memory address, while modules are located at high + * memory addresses (or vice versa). The gap between end of + * kernel text segment and beginning of first module's text + * segment is very big. Therefore do not fill this gap and do + * not assign it to the kernel dso map (kallsyms). + * + * In kallsyms, it determines module symbols using '[' character + * like in: + * ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi] + */ + if (prev->end == prev->start) { + /* Last kernel/module symbol mapped to end of page */ + if (is_kallsyms && (!strchr(prev->name, '[') != + !strchr(curr->name, '['))) + prev->end = roundup(prev->end + 4096, 4096); + else + prev->end = curr->start; + + pr_debug4("%s sym:%s end:%#" PRIx64 "\n", + __func__, prev->name, prev->end); + } } /* Last entry */ @@ -1456,7 +1473,7 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename, if (kallsyms__delta(kmap, filename, &delta)) return -1; - symbols__fixup_end(&dso->symbols); + symbols__fixup_end(&dso->symbols, true); symbols__fixup_duplicate(&dso->symbols); if (dso->kernel == DSO_SPACE__KERNEL_GUEST) @@ -1651,7 +1668,7 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile) #undef bfd_asymbol_section #endif - symbols__fixup_end(&dso->symbols); + symbols__fixup_end(&dso->symbols, false); symbols__fixup_duplicate(&dso->symbols); dso->adjust_symbols = 1; diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 954d6a049ee2..28721d761d91 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -192,7 +192,7 @@ void __symbols__insert(struct rb_root_cached *symbols, struct symbol *sym, bool kernel); void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym); void symbols__fixup_duplicate(struct rb_root_cached *symbols); -void symbols__fixup_end(struct rb_root_cached *symbols); +void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms); void maps__fixup_end(struct maps *maps); typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data); @@ -230,7 +230,6 @@ const char *arch__normalize_symbol_name(const char *name); #define SYMBOL_A 0 #define SYMBOL_B 1 -void arch__symbols__fixup_end(struct symbol *p, struct symbol *c); int arch__compare_symbol_names(const char *namea, const char *nameb); int arch__compare_symbol_names_n(const char *namea, const char *nameb, unsigned int n); diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 424ed19a9d54..ef65f7eed1ec 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -4189,6 +4189,7 @@ rapl_dram_energy_units_probe(int model, double rapl_energy_units) case INTEL_FAM6_HASWELL_X: /* HSX */ case INTEL_FAM6_BROADWELL_X: /* BDX */ case INTEL_FAM6_XEON_PHI_KNL: /* KNL */ + case INTEL_FAM6_ICELAKE_X: /* ICX */ return (rapl_dram_energy_units = 15.3 / 1000000); default: return (rapl_energy_units); diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index d9c283503159..5074c8b82367 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -9,6 +9,7 @@ TARGETS += clone3 TARGETS += core TARGETS += cpufreq TARGETS += cpu-hotplug +TARGETS += damon TARGETS += drivers/dma-buf TARGETS += efivarfs TARGETS += exec diff --git a/tools/testing/selftests/arm64/signal/testcases/testcases.c b/tools/testing/selftests/arm64/signal/testcases/testcases.c index 61ebcdf63831..a3ac5c2d8aac 100644 --- a/tools/testing/selftests/arm64/signal/testcases/testcases.c +++ b/tools/testing/selftests/arm64/signal/testcases/testcases.c @@ -33,7 +33,7 @@ bool validate_extra_context(struct extra_context *extra, char **err) return false; fprintf(stderr, "Validating EXTRA...\n"); - term = GET_RESV_NEXT_HEAD(extra); + term = GET_RESV_NEXT_HEAD(&extra->head); if (!term || term->magic || term->size) { *err = "Missing terminator after EXTRA context"; return false; diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 93162484c2ca..48b01150e703 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -4758,7 +4758,7 @@ static void do_test_pprint(int test_num) ret = snprintf(pin_path, sizeof(pin_path), "%s/%s", "/sys/fs/bpf", test->map_name); - if (CHECK(ret == sizeof(pin_path), "pin_path %s/%s is too long", + if (CHECK(ret >= sizeof(pin_path), "pin_path %s/%s is too long", "/sys/fs/bpf", test->map_name)) { err = -1; goto done; diff --git a/tools/testing/selftests/bpf/prog_tests/sock_fields.c b/tools/testing/selftests/bpf/prog_tests/sock_fields.c index af87118e748e..e8b5bf707cd4 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_fields.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_fields.c @@ -1,9 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 Facebook */ +#define _GNU_SOURCE #include #include #include +#include #include #include #include @@ -21,6 +23,7 @@ enum bpf_linum_array_idx { EGRESS_LINUM_IDX, INGRESS_LINUM_IDX, + READ_SK_DST_PORT_LINUM_IDX, __NR_BPF_LINUM_ARRAY_IDX, }; @@ -43,8 +46,16 @@ static __u64 child_cg_id; static int linum_map_fd; static __u32 duration; -static __u32 egress_linum_idx = EGRESS_LINUM_IDX; -static __u32 ingress_linum_idx = INGRESS_LINUM_IDX; +static bool create_netns(void) +{ + if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) + return false; + + if (!ASSERT_OK(system("ip link set dev lo up"), "bring up lo")) + return false; + + return true; +} static void print_sk(const struct bpf_sock *sk, const char *prefix) { @@ -92,19 +103,24 @@ static void check_result(void) { struct bpf_tcp_sock srv_tp, cli_tp, listen_tp; struct bpf_sock srv_sk, cli_sk, listen_sk; - __u32 ingress_linum, egress_linum; + __u32 idx, ingress_linum, egress_linum, linum; int err; - err = bpf_map_lookup_elem(linum_map_fd, &egress_linum_idx, - &egress_linum); + idx = EGRESS_LINUM_IDX; + err = bpf_map_lookup_elem(linum_map_fd, &idx, &egress_linum); CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)", "err:%d errno:%d\n", err, errno); - err = bpf_map_lookup_elem(linum_map_fd, &ingress_linum_idx, - &ingress_linum); + idx = INGRESS_LINUM_IDX; + err = bpf_map_lookup_elem(linum_map_fd, &idx, &ingress_linum); CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)", "err:%d errno:%d\n", err, errno); + idx = READ_SK_DST_PORT_LINUM_IDX; + err = bpf_map_lookup_elem(linum_map_fd, &idx, &linum); + ASSERT_OK(err, "bpf_map_lookup_elem(linum_map_fd, READ_SK_DST_PORT_IDX)"); + ASSERT_EQ(linum, 0, "failure in read_sk_dst_port on line"); + memcpy(&srv_sk, &skel->bss->srv_sk, sizeof(srv_sk)); memcpy(&srv_tp, &skel->bss->srv_tp, sizeof(srv_tp)); memcpy(&cli_sk, &skel->bss->cli_sk, sizeof(cli_sk)); @@ -263,7 +279,7 @@ static void test(void) char buf[DATA_LEN]; /* Prepare listen_fd */ - listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); + listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0xcafe, 0); /* start_server() has logged the error details */ if (CHECK_FAIL(listen_fd == -1)) goto done; @@ -331,8 +347,12 @@ done: void test_sock_fields(void) { - struct bpf_link *egress_link = NULL, *ingress_link = NULL; int parent_cg_fd = -1, child_cg_fd = -1; + struct bpf_link *link; + + /* Use a dedicated netns to have a fixed listen port */ + if (!create_netns()) + return; /* Create a cgroup, get fd, and join it */ parent_cg_fd = test__join_cgroup(PARENT_CGROUP); @@ -353,17 +373,20 @@ void test_sock_fields(void) if (CHECK(!skel, "test_sock_fields__open_and_load", "failed\n")) goto done; - egress_link = bpf_program__attach_cgroup(skel->progs.egress_read_sock_fields, - child_cg_fd); - if (CHECK(IS_ERR(egress_link), "attach_cgroup(egress)", "err:%ld\n", - PTR_ERR(egress_link))) + link = bpf_program__attach_cgroup(skel->progs.egress_read_sock_fields, child_cg_fd); + if (!ASSERT_OK_PTR(link, "attach_cgroup(egress_read_sock_fields)")) goto done; + skel->links.egress_read_sock_fields = link; - ingress_link = bpf_program__attach_cgroup(skel->progs.ingress_read_sock_fields, - child_cg_fd); - if (CHECK(IS_ERR(ingress_link), "attach_cgroup(ingress)", "err:%ld\n", - PTR_ERR(ingress_link))) + link = bpf_program__attach_cgroup(skel->progs.ingress_read_sock_fields, child_cg_fd); + if (!ASSERT_OK_PTR(link, "attach_cgroup(ingress_read_sock_fields)")) goto done; + skel->links.ingress_read_sock_fields = link; + + link = bpf_program__attach_cgroup(skel->progs.read_sk_dst_port, child_cg_fd); + if (!ASSERT_OK_PTR(link, "attach_cgroup(read_sk_dst_port")) + goto done; + skel->links.read_sk_dst_port = link; linum_map_fd = bpf_map__fd(skel->maps.linum_map); sk_pkt_out_cnt_fd = bpf_map__fd(skel->maps.sk_pkt_out_cnt); @@ -372,8 +395,7 @@ void test_sock_fields(void) test(); done: - bpf_link__destroy(egress_link); - bpf_link__destroy(ingress_link); + test_sock_fields__detach(skel); test_sock_fields__destroy(skel); if (child_cg_fd != -1) close(child_cg_fd); diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c index 31975c96e2c9..fe43556e1a61 100644 --- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c @@ -94,7 +94,7 @@ typedef void (* (*signal_t)(int, void (*)(int)))(int); typedef char * (*fn_ptr_arr1_t[10])(int **); -typedef char * (* const (* const fn_ptr_arr2_t[5])())(char * (*)(int)); +typedef char * (* (* const fn_ptr_arr2_t[5])())(char * (*)(int)); struct struct_w_typedefs { int_t a; diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c index 7967348b11af..43b31aa1fcf7 100644 --- a/tools/testing/selftests/bpf/progs/test_sock_fields.c +++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c @@ -12,6 +12,7 @@ enum bpf_linum_array_idx { EGRESS_LINUM_IDX, INGRESS_LINUM_IDX, + READ_SK_DST_PORT_LINUM_IDX, __NR_BPF_LINUM_ARRAY_IDX, }; @@ -250,4 +251,48 @@ int ingress_read_sock_fields(struct __sk_buff *skb) return CG_OK; } +static __noinline bool sk_dst_port__load_word(struct bpf_sock *sk) +{ + __u32 *word = (__u32 *)&sk->dst_port; + return word[0] == bpf_htonl(0xcafe0000); +} + +static __noinline bool sk_dst_port__load_half(struct bpf_sock *sk) +{ + __u16 *half = (__u16 *)&sk->dst_port; + return half[0] == bpf_htons(0xcafe); +} + +static __noinline bool sk_dst_port__load_byte(struct bpf_sock *sk) +{ + __u8 *byte = (__u8 *)&sk->dst_port; + return byte[0] == 0xca && byte[1] == 0xfe; +} + +SEC("cgroup_skb/egress") +int read_sk_dst_port(struct __sk_buff *skb) +{ + __u32 linum, linum_idx; + struct bpf_sock *sk; + + linum_idx = READ_SK_DST_PORT_LINUM_IDX; + + sk = skb->sk; + if (!sk) + RET_LOG(); + + /* Ignore everything but the SYN from the client socket */ + if (sk->state != BPF_TCP_SYN_SENT) + return CG_OK; + + if (!sk_dst_port__load_word(sk)) + RET_LOG(); + if (!sk_dst_port__load_half(sk)) + RET_LOG(); + if (!sk_dst_port__load_byte(sk)) + RET_LOG(); + + return CG_OK; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index a4c55fcb0e7b..0fb92d9a319b 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -100,7 +100,7 @@ struct bpf_test { enum bpf_prog_type prog_type; uint8_t flags; void (*fill_helper)(struct bpf_test *self); - uint8_t runs; + int runs; #define bpf_testdata_struct_t \ struct { \ uint32_t retval, retval_unpriv; \ @@ -1054,7 +1054,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv, run_errs = 0; run_successes = 0; - if (!alignment_prevented_execution && fd_prog >= 0) { + if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) { uint32_t expected_val; int i; diff --git a/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c b/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c index 2ad5f974451c..fd3b62a084b9 100644 --- a/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c +++ b/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c @@ -239,6 +239,7 @@ .result = ACCEPT, .prog_type = BPF_PROG_TYPE_SK_LOOKUP, .expected_attach_type = BPF_SK_LOOKUP, + .runs = -1, }, /* invalid 8-byte reads from a 4-byte fields in bpf_sk_lookup */ { diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c index 006b5bd99c08..525d810b10b8 100644 --- a/tools/testing/selftests/bpf/verifier/ref_tracking.c +++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c @@ -901,3 +901,39 @@ .result_unpriv = REJECT, .errstr_unpriv = "unknown func", }, +{ + "reference tracking: try to leak released ptr reg", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), + + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_ringbuf_reserve), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), + + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_EMIT_CALL(BPF_FUNC_ringbuf_discard), + BPF_MOV64_IMM(BPF_REG_0, 0), + + BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_8, 0), + BPF_EXIT_INSN() + }, + .fixup_map_array_48b = { 4 }, + .fixup_map_ringbuf = { 11 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R8 !read_ok" +}, diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c index ce13ece08d51..8c224eac93df 100644 --- a/tools/testing/selftests/bpf/verifier/sock.c +++ b/tools/testing/selftests/bpf/verifier/sock.c @@ -121,7 +121,25 @@ .result = ACCEPT, }, { - "sk_fullsock(skb->sk): sk->dst_port [narrow load]", + "sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + .result = ACCEPT, +}, +{ + "sk_fullsock(skb->sk): sk->dst_port [half load]", .insns = { BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), @@ -139,7 +157,7 @@ .result = ACCEPT, }, { - "sk_fullsock(skb->sk): sk->dst_port [load 2nd byte]", + "sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)", .insns = { BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), @@ -149,7 +167,64 @@ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + .result = REJECT, + .errstr = "invalid sock access", +}, +{ + "sk_fullsock(skb->sk): sk->dst_port [byte load]", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port)), + BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + .result = ACCEPT, +}, +{ + "sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + .result = REJECT, + .errstr = "invalid sock access", +}, +{ + "sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, dst_port)), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c index 05853b0b8831..5b16c7b0ae4f 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.c +++ b/tools/testing/selftests/cgroup/cgroup_util.c @@ -219,7 +219,7 @@ int cg_find_unified_root(char *root, size_t len) int cg_create(const char *cgroup) { - return mkdir(cgroup, 0644); + return mkdir(cgroup, 0755); } int cg_wait_for_proc_count(const char *cgroup, int count) @@ -337,13 +337,13 @@ pid_t clone_into_cgroup(int cgroup_fd) #ifdef CLONE_ARGS_SIZE_VER2 pid_t pid; - struct clone_args args = { + struct __clone_args args = { .flags = CLONE_INTO_CGROUP, .exit_signal = SIGCHLD, .cgroup = cgroup_fd, }; - pid = sys_clone3(&args, sizeof(struct clone_args)); + pid = sys_clone3(&args, sizeof(struct __clone_args)); /* * Verify that this is a genuine test failure: * ENOSYS -> clone3() not available diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c index 3df648c37876..600123503063 100644 --- a/tools/testing/selftests/cgroup/test_core.c +++ b/tools/testing/selftests/cgroup/test_core.c @@ -1,11 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#define _GNU_SOURCE #include +#include #include #include #include #include #include +#include #include #include #include @@ -674,6 +677,166 @@ cleanup: return ret; } +/* + * cgroup migration permission check should be performed based on the + * credentials at the time of open instead of write. + */ +static int test_cgcore_lesser_euid_open(const char *root) +{ + const uid_t test_euid = 65534; /* usually nobody, any !root is fine */ + int ret = KSFT_FAIL; + char *cg_test_a = NULL, *cg_test_b = NULL; + char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL; + int cg_test_b_procs_fd = -1; + uid_t saved_uid; + + cg_test_a = cg_name(root, "cg_test_a"); + cg_test_b = cg_name(root, "cg_test_b"); + + if (!cg_test_a || !cg_test_b) + goto cleanup; + + cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs"); + cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs"); + + if (!cg_test_a_procs || !cg_test_b_procs) + goto cleanup; + + if (cg_create(cg_test_a) || cg_create(cg_test_b)) + goto cleanup; + + if (cg_enter_current(cg_test_a)) + goto cleanup; + + if (chown(cg_test_a_procs, test_euid, -1) || + chown(cg_test_b_procs, test_euid, -1)) + goto cleanup; + + saved_uid = geteuid(); + if (seteuid(test_euid)) + goto cleanup; + + cg_test_b_procs_fd = open(cg_test_b_procs, O_RDWR); + + if (seteuid(saved_uid)) + goto cleanup; + + if (cg_test_b_procs_fd < 0) + goto cleanup; + + if (write(cg_test_b_procs_fd, "0", 1) >= 0 || errno != EACCES) + goto cleanup; + + ret = KSFT_PASS; + +cleanup: + cg_enter_current(root); + if (cg_test_b_procs_fd >= 0) + close(cg_test_b_procs_fd); + if (cg_test_b) + cg_destroy(cg_test_b); + if (cg_test_a) + cg_destroy(cg_test_a); + free(cg_test_b_procs); + free(cg_test_a_procs); + free(cg_test_b); + free(cg_test_a); + return ret; +} + +struct lesser_ns_open_thread_arg { + const char *path; + int fd; + int err; +}; + +static int lesser_ns_open_thread_fn(void *arg) +{ + struct lesser_ns_open_thread_arg *targ = arg; + + targ->fd = open(targ->path, O_RDWR); + targ->err = errno; + return 0; +} + +/* + * cgroup migration permission check should be performed based on the cgroup + * namespace at the time of open instead of write. + */ +static int test_cgcore_lesser_ns_open(const char *root) +{ + static char stack[65536]; + const uid_t test_euid = 65534; /* usually nobody, any !root is fine */ + int ret = KSFT_FAIL; + char *cg_test_a = NULL, *cg_test_b = NULL; + char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL; + int cg_test_b_procs_fd = -1; + struct lesser_ns_open_thread_arg targ = { .fd = -1 }; + pid_t pid; + int status; + + cg_test_a = cg_name(root, "cg_test_a"); + cg_test_b = cg_name(root, "cg_test_b"); + + if (!cg_test_a || !cg_test_b) + goto cleanup; + + cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs"); + cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs"); + + if (!cg_test_a_procs || !cg_test_b_procs) + goto cleanup; + + if (cg_create(cg_test_a) || cg_create(cg_test_b)) + goto cleanup; + + if (cg_enter_current(cg_test_b)) + goto cleanup; + + if (chown(cg_test_a_procs, test_euid, -1) || + chown(cg_test_b_procs, test_euid, -1)) + goto cleanup; + + targ.path = cg_test_b_procs; + pid = clone(lesser_ns_open_thread_fn, stack + sizeof(stack), + CLONE_NEWCGROUP | CLONE_FILES | CLONE_VM | SIGCHLD, + &targ); + if (pid < 0) + goto cleanup; + + if (waitpid(pid, &status, 0) < 0) + goto cleanup; + + if (!WIFEXITED(status)) + goto cleanup; + + cg_test_b_procs_fd = targ.fd; + if (cg_test_b_procs_fd < 0) + goto cleanup; + + if (cg_enter_current(cg_test_a)) + goto cleanup; + + if ((status = write(cg_test_b_procs_fd, "0", 1)) >= 0 || errno != ENOENT) + goto cleanup; + + ret = KSFT_PASS; + +cleanup: + cg_enter_current(root); + if (cg_test_b_procs_fd >= 0) + close(cg_test_b_procs_fd); + if (cg_test_b) + cg_destroy(cg_test_b); + if (cg_test_a) + cg_destroy(cg_test_a); + free(cg_test_b_procs); + free(cg_test_a_procs); + free(cg_test_b); + free(cg_test_a); + return ret; +} + #define T(x) { x, #x } struct corecg_test { int (*fn)(const char *root); @@ -689,6 +852,8 @@ struct corecg_test { T(test_cgcore_proc_migration), T(test_cgcore_thread_migration), T(test_cgcore_destroy), + T(test_cgcore_lesser_euid_open), + T(test_cgcore_lesser_ns_open), }; #undef T diff --git a/tools/testing/selftests/cgroup/test_stress.sh b/tools/testing/selftests/cgroup/test_stress.sh index 15d9d5896394..3c9c4554d5f6 100755 --- a/tools/testing/selftests/cgroup/test_stress.sh +++ b/tools/testing/selftests/cgroup/test_stress.sh @@ -1,4 +1,4 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 -./with_stress.sh -s subsys -s fork ./test_core +./with_stress.sh -s subsys -s fork ${OUTPUT:-.}/test_core diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh index fedcb7b35af9..af5ea50ed5c0 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh @@ -172,6 +172,17 @@ flooding_filters_add() local lsb local i + # Prevent unwanted packets from entering the bridge and interfering + # with the test. + tc qdisc add dev br0 clsact + tc filter add dev br0 egress protocol all pref 1 handle 1 \ + matchall skip_hw action drop + tc qdisc add dev $h1 clsact + tc filter add dev $h1 egress protocol all pref 1 handle 1 \ + flower skip_hw dst_mac de:ad:be:ef:13:37 action pass + tc filter add dev $h1 egress protocol all pref 2 handle 2 \ + matchall skip_hw action drop + tc qdisc add dev $rp2 clsact for i in $(eval echo {1..$num_remotes}); do @@ -194,6 +205,12 @@ flooding_filters_del() done tc qdisc del dev $rp2 clsact + + tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall + tc filter del dev $h1 egress protocol all pref 1 handle 1 flower + tc qdisc del dev $h1 clsact + tc filter del dev br0 egress protocol all pref 1 handle 1 matchall + tc qdisc del dev br0 clsact } flooding_check_packets() diff --git a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh index beee0d5646a6..11189f309627 100755 --- a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh +++ b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh @@ -185,7 +185,7 @@ setup_prepare() tc filter add dev $eth0 ingress chain $(IS2 0 0) pref 1 \ protocol ipv4 flower skip_sw ip_proto udp dst_port 5201 \ - action police rate 50mbit burst 64k \ + action police rate 50mbit burst 64k conform-exceed drop/pipe \ action goto chain $(IS2 1 0) } diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc index fa928b431555..7c02509c71d0 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc @@ -21,7 +21,6 @@ check_error 'p:^/bar vfs_read' # NO_GROUP_NAME check_error 'p:^12345678901234567890123456789012345678901234567890123456789012345/bar vfs_read' # GROUP_TOO_LONG check_error 'p:^foo.1/bar vfs_read' # BAD_GROUP_NAME -check_error 'p:foo/^ vfs_read' # NO_EVENT_NAME check_error 'p:foo/^12345678901234567890123456789012345678901234567890123456789012345 vfs_read' # EVENT_TOO_LONG check_error 'p:foo/^bar.1 vfs_read' # BAD_EVENT_NAME diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile index 23207829ec75..6a0ed2e7881e 100644 --- a/tools/testing/selftests/futex/functional/Makefile +++ b/tools/testing/selftests/futex/functional/Makefile @@ -3,11 +3,11 @@ INCLUDES := -I../include -I../../ CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES) LDLIBS := -lpthread -lrt -HEADERS := \ +LOCAL_HDRS := \ ../include/futextest.h \ ../include/atomic.h \ ../include/logging.h -TEST_GEN_FILES := \ +TEST_GEN_PROGS := \ futex_wait_timeout \ futex_wait_wouldblock \ futex_requeue_pi \ @@ -21,5 +21,3 @@ TEST_PROGS := run.sh top_srcdir = ../../../../.. KSFT_KHDR_INSTALL := 1 include ../../lib.mk - -$(TEST_GEN_FILES): $(HEADERS) diff --git a/tools/testing/selftests/intel_pstate/Makefile b/tools/testing/selftests/intel_pstate/Makefile index 39f0fa2a8fd6..05d66ef50c97 100644 --- a/tools/testing/selftests/intel_pstate/Makefile +++ b/tools/testing/selftests/intel_pstate/Makefile @@ -2,10 +2,10 @@ CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE LDLIBS += -lm -uname_M := $(shell uname -m 2>/dev/null || echo not) -ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/) +ARCH ?= $(shell uname -m 2>/dev/null || echo not) +ARCH_PROCESSED := $(shell echo $(ARCH) | sed -e s/i.86/x86/ -e s/x86_64/x86/) -ifeq (x86,$(ARCH)) +ifeq (x86,$(ARCH_PROCESSED)) TEST_GEN_FILES := msr aperf endif diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c index 2f37b90ee1a9..f600311fdc6a 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c +++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c @@ -73,20 +73,19 @@ void ucall_uninit(struct kvm_vm *vm) void ucall(uint64_t cmd, int nargs, ...) { - struct ucall uc = { - .cmd = cmd, - }; + struct ucall uc = {}; va_list va; int i; + WRITE_ONCE(uc.cmd, cmd); nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS; va_start(va, nargs); for (i = 0; i < nargs; ++i) - uc.args[i] = va_arg(va, uint64_t); + WRITE_ONCE(uc.args[i], va_arg(va, uint64_t)); va_end(va); - *ucall_exit_mmio_addr = (vm_vaddr_t)&uc; + WRITE_ONCE(*ucall_exit_mmio_addr, (vm_vaddr_t)&uc); } uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index d10c5c05bdf0..f5d2d27bee05 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1253,6 +1253,6 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, asm volatile("vmcall" : "=a"(r) - : "b"(a0), "c"(a1), "d"(a2), "S"(a3)); + : "a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3)); return r; } diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c index b019e0b8221c..84fda3b49073 100644 --- a/tools/testing/selftests/mqueue/mq_perf_tests.c +++ b/tools/testing/selftests/mqueue/mq_perf_tests.c @@ -180,6 +180,9 @@ void shutdown(int exit_val, char *err_cause, int line_no) if (in_shutdown++) return; + /* Free the cpu_set allocated using CPU_ALLOC in main function */ + CPU_FREE(cpu_set); + for (i = 0; i < num_cpus_to_pin; i++) if (cpu_threads[i]) { pthread_kill(cpu_threads[i], SIGUSR1); @@ -551,6 +554,12 @@ int main(int argc, char *argv[]) perror("sysconf(_SC_NPROCESSORS_ONLN)"); exit(1); } + + if (getuid() != 0) + ksft_exit_skip("Not running as root, but almost all tests " + "require root in order to modify\nsystem settings. " + "Exiting.\n"); + cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN)); cpu_set = CPU_ALLOC(cpus_online); if (cpu_set == NULL) { @@ -589,7 +598,7 @@ int main(int argc, char *argv[]) cpu_set)) { fprintf(stderr, "Any given CPU may " "only be given once.\n"); - exit(1); + goto err_code; } else CPU_SET_S(cpus_to_pin[cpu], cpu_set_size, cpu_set); @@ -607,7 +616,7 @@ int main(int argc, char *argv[]) queue_path = malloc(strlen(option) + 2); if (!queue_path) { perror("malloc()"); - exit(1); + goto err_code; } queue_path[0] = '/'; queue_path[1] = 0; @@ -622,17 +631,12 @@ int main(int argc, char *argv[]) fprintf(stderr, "Must pass at least one CPU to continuous " "mode.\n"); poptPrintUsage(popt_context, stderr, 0); - exit(1); + goto err_code; } else if (!continuous_mode) { num_cpus_to_pin = 1; cpus_to_pin[0] = cpus_online - 1; } - if (getuid() != 0) - ksft_exit_skip("Not running as root, but almost all tests " - "require root in order to modify\nsystem settings. " - "Exiting.\n"); - max_msgs = fopen(MAX_MSGS, "r+"); max_msgsize = fopen(MAX_MSGSIZE, "r+"); if (!max_msgs) @@ -740,4 +744,9 @@ int main(int argc, char *argv[]) sleep(1); } shutdown(0, "", 0); + +err_code: + CPU_FREE(cpu_set); + exit(1); + } diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh index ace976d89125..4a11ea2261cb 100755 --- a/tools/testing/selftests/net/fcnal-test.sh +++ b/tools/testing/selftests/net/fcnal-test.sh @@ -794,10 +794,16 @@ ipv4_ping() setup set_sysctl net.ipv4.raw_l3mdev_accept=1 2>/dev/null ipv4_ping_novrf + setup + set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null + ipv4_ping_novrf log_subsection "With VRF" setup "yes" ipv4_ping_vrf + setup "yes" + set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null + ipv4_ping_vrf } ################################################################################ @@ -2261,10 +2267,16 @@ ipv6_ping() log_subsection "No VRF" setup ipv6_ping_novrf + setup + set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null + ipv6_ping_novrf log_subsection "With VRF" setup "yes" ipv6_ping_vrf + setup "yes" + set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null + ipv6_ping_vrf } ################################################################################ diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index 4c7d33618437..7ece4131dc6f 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -931,6 +931,36 @@ ipv4_fcnal() set +e check_nexthop "dev veth1" "" log_test $? 0 "Nexthops removed on admin down" + + # nexthop route delete warning: route add with nhid and delete + # using device + run_cmd "$IP li set dev veth1 up" + run_cmd "$IP nexthop add id 12 via 172.16.1.3 dev veth1" + out1=`dmesg | grep "WARNING:.*fib_nh_match.*" | wc -l` + run_cmd "$IP route add 172.16.101.1/32 nhid 12" + run_cmd "$IP route delete 172.16.101.1/32 dev veth1" + out2=`dmesg | grep "WARNING:.*fib_nh_match.*" | wc -l` + [ $out1 -eq $out2 ] + rc=$? + log_test $rc 0 "Delete nexthop route warning" + run_cmd "$IP route delete 172.16.101.1/32 nhid 12" + run_cmd "$IP nexthop del id 12" + + run_cmd "$IP nexthop add id 21 via 172.16.1.6 dev veth1" + run_cmd "$IP ro add 172.16.101.0/24 nhid 21" + run_cmd "$IP ro del 172.16.101.0/24 nexthop via 172.16.1.7 dev veth1 nexthop via 172.16.1.8 dev veth1" + log_test $? 2 "Delete multipath route with only nh id based entry" + + run_cmd "$IP nexthop add id 22 via 172.16.1.6 dev veth1" + run_cmd "$IP ro add 172.16.102.0/24 nhid 22" + run_cmd "$IP ro del 172.16.102.0/24 dev veth1" + log_test $? 2 "Delete route when specifying only nexthop device" + + run_cmd "$IP ro del 172.16.102.0/24 via 172.16.1.6" + log_test $? 2 "Delete route when specifying only gateway" + + run_cmd "$IP ro del 172.16.102.0/24" + log_test $? 0 "Delete route when not specifying nexthop attributes" } ipv4_grp_fcnal() diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index a7f53c2a9580..0f3bf90e04d3 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -1622,13 +1622,21 @@ ipv4_del_addr_test() $IP addr add dev dummy1 172.16.104.1/24 $IP addr add dev dummy1 172.16.104.11/24 + $IP addr add dev dummy1 172.16.104.12/24 + $IP addr add dev dummy1 172.16.104.13/24 $IP addr add dev dummy2 172.16.104.1/24 $IP addr add dev dummy2 172.16.104.11/24 + $IP addr add dev dummy2 172.16.104.12/24 $IP route add 172.16.105.0/24 via 172.16.104.2 src 172.16.104.11 + $IP route add 172.16.106.0/24 dev lo src 172.16.104.12 + $IP route add table 0 172.16.107.0/24 via 172.16.104.2 src 172.16.104.13 $IP route add vrf red 172.16.105.0/24 via 172.16.104.2 src 172.16.104.11 + $IP route add vrf red 172.16.106.0/24 dev lo src 172.16.104.12 set +e # removing address from device in vrf should only remove route from vrf table + echo " Regular FIB info" + $IP addr del dev dummy2 172.16.104.11/24 $IP ro ls vrf red | grep -q 172.16.105.0/24 log_test $? 1 "Route removed from VRF when source address deleted" @@ -1646,6 +1654,35 @@ ipv4_del_addr_test() $IP ro ls vrf red | grep -q 172.16.105.0/24 log_test $? 0 "Route in VRF is not removed by address delete" + # removing address from device in vrf should only remove route from vrf + # table even when the associated fib info only differs in table ID + echo " Identical FIB info with different table ID" + + $IP addr del dev dummy2 172.16.104.12/24 + $IP ro ls vrf red | grep -q 172.16.106.0/24 + log_test $? 1 "Route removed from VRF when source address deleted" + + $IP ro ls | grep -q 172.16.106.0/24 + log_test $? 0 "Route in default VRF not removed" + + $IP addr add dev dummy2 172.16.104.12/24 + $IP route add vrf red 172.16.106.0/24 dev lo src 172.16.104.12 + + $IP addr del dev dummy1 172.16.104.12/24 + $IP ro ls | grep -q 172.16.106.0/24 + log_test $? 1 "Route removed in default VRF when source address deleted" + + $IP ro ls vrf red | grep -q 172.16.106.0/24 + log_test $? 0 "Route in VRF is not removed by address delete" + + # removing address from device in default vrf should remove route from + # the default vrf even when route was inserted with a table ID of 0. + echo " Table ID 0" + + $IP addr del dev dummy1 172.16.104.13/24 + $IP ro ls | grep -q 172.16.107.0/24 + log_test $? 1 "Route removed in default VRF when source address deleted" + $IP li del dummy1 $IP li del dummy2 cleanup diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index be6fa808d219..54020d05a62b 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -1063,6 +1063,7 @@ learning_test() # FDB entry was installed. bridge link set dev $br_port1 flood off + ip link set $host1_if promisc on tc qdisc add dev $host1_if ingress tc filter add dev $host1_if ingress protocol ip pref 1 handle 101 \ flower dst_mac $mac action drop @@ -1073,7 +1074,7 @@ learning_test() tc -j -s filter show dev $host1_if ingress \ | jq -e ".[] | select(.options.handle == 101) \ | select(.options.actions[0].stats.packets == 1)" &> /dev/null - check_fail $? "Packet reached second host when should not" + check_fail $? "Packet reached first host when should not" $MZ $host1_if -c 1 -p 64 -a $mac -t ip -q sleep 1 @@ -1112,6 +1113,7 @@ learning_test() tc filter del dev $host1_if ingress protocol ip pref 1 handle 101 flower tc qdisc del dev $host1_if ingress + ip link set $host1_if promisc off bridge link set dev $br_port1 flood on @@ -1129,6 +1131,7 @@ flood_test_do() # Add an ACL on `host2_if` which will tell us whether the packet # was flooded to it or not. + ip link set $host2_if promisc on tc qdisc add dev $host2_if ingress tc filter add dev $host2_if ingress protocol ip pref 1 handle 101 \ flower dst_mac $mac action drop @@ -1146,6 +1149,7 @@ flood_test_do() tc filter del dev $host2_if ingress protocol ip pref 1 handle 101 flower tc qdisc del dev $host2_if ingress + ip link set $host2_if promisc off return $err } diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh index a3402cd8d5b6..9ff22f28032d 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh @@ -61,9 +61,12 @@ setup_prepare() vrf_prepare mirror_gre_topo_create + # Avoid changing br1's PVID while it is operational as a L3 interface. + ip link set dev br1 down ip link set dev $swp3 master br1 bridge vlan add dev br1 vid 555 pvid untagged self + ip link set dev br1 up ip address add dev br1 192.0.2.129/28 ip address add dev br1 2001:db8:2::1/64 diff --git a/tools/testing/selftests/net/forwarding/sch_red.sh b/tools/testing/selftests/net/forwarding/sch_red.sh index e714bae473fb..81f31179ac88 100755 --- a/tools/testing/selftests/net/forwarding/sch_red.sh +++ b/tools/testing/selftests/net/forwarding/sch_red.sh @@ -1,3 +1,4 @@ +#!/bin/bash # SPDX-License-Identifier: GPL-2.0 # This test sends one stream of traffic from H1 through a TBF shaper, to a RED diff --git a/tools/testing/selftests/net/forwarding/tc_police.sh b/tools/testing/selftests/net/forwarding/tc_police.sh index 160f9cccdfb7..eb09acdcb3ff 100755 --- a/tools/testing/selftests/net/forwarding/tc_police.sh +++ b/tools/testing/selftests/net/forwarding/tc_police.sh @@ -35,6 +35,8 @@ ALL_TESTS=" police_shared_test police_rx_mirror_test police_tx_mirror_test + police_mtu_rx_test + police_mtu_tx_test " NUM_NETIFS=6 source tc_common.sh @@ -290,6 +292,56 @@ police_tx_mirror_test() police_mirror_common_test $rp2 egress "police tx and mirror" } +police_mtu_common_test() { + RET=0 + + local test_name=$1; shift + local dev=$1; shift + local direction=$1; shift + + tc filter add dev $dev $direction protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \ + action police mtu 1042 conform-exceed drop/ok + + # to count "conform" packets + tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \ + action drop + + mausezahn $h1 -a own -b $(mac_get $rp1) -A 192.0.2.1 -B 198.51.100.1 \ + -t udp sp=12345,dp=54321 -p 1001 -c 10 -q + + mausezahn $h1 -a own -b $(mac_get $rp1) -A 192.0.2.1 -B 198.51.100.1 \ + -t udp sp=12345,dp=54321 -p 1000 -c 3 -q + + tc_check_packets "dev $dev $direction" 101 13 + check_err $? "wrong packet counter" + + # "exceed" packets + local overlimits_t0=$(tc_rule_stats_get ${dev} 1 ${direction} .overlimits) + test ${overlimits_t0} = 10 + check_err $? "wrong overlimits, expected 10 got ${overlimits_t0}" + + # "conform" packets + tc_check_packets "dev $h2 ingress" 101 3 + check_err $? "forwarding error" + + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower + tc filter del dev $dev $direction protocol ip pref 1 handle 101 flower + + log_test "$test_name" +} + +police_mtu_rx_test() +{ + police_mtu_common_test "police mtu (rx)" $rp1 ingress +} + +police_mtu_tx_test() +{ + police_mtu_common_test "police mtu (tx)" $rp2 egress +} + setup_prepare() { h1=${NETIFS[p1]} diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c index b5277106df1f..b0cc082fbb84 100644 --- a/tools/testing/selftests/net/reuseport_bpf.c +++ b/tools/testing/selftests/net/reuseport_bpf.c @@ -330,7 +330,7 @@ static void test_extra_filter(const struct test_params p) if (bind(fd1, addr, sockaddr_size())) error(1, errno, "failed to bind recv socket 1"); - if (!bind(fd2, addr, sockaddr_size()) && errno != EADDRINUSE) + if (!bind(fd2, addr, sockaddr_size()) || errno != EADDRINUSE) error(1, errno, "bind socket 2 should fail with EADDRINUSE"); free(addr); diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index c9ce3dfa42ee..c3a905923ef2 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -782,7 +782,7 @@ kci_test_ipsec_offload() tmpl proto esp src $srcip dst $dstip spi 9 \ mode transport reqid 42 check_err $? - ip x p add dir out src $dstip/24 dst $srcip/24 \ + ip x p add dir in src $dstip/24 dst $srcip/24 \ tmpl proto esp src $dstip dst $srcip spi 9 \ mode transport reqid 42 check_err $? diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh index 80b5d352702e..dc932fd65363 100755 --- a/tools/testing/selftests/net/udpgso_bench.sh +++ b/tools/testing/selftests/net/udpgso_bench.sh @@ -120,7 +120,7 @@ run_all() { run_udp "${ipv4_args}" echo "ipv6" - run_tcp "${ipv4_args}" + run_tcp "${ipv6_args}" run_udp "${ipv6_args}" } diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh index b5eef5ffb58e..af3461cb5c40 100755 --- a/tools/testing/selftests/netfilter/nft_concat_range.sh +++ b/tools/testing/selftests/netfilter/nft_concat_range.sh @@ -31,7 +31,7 @@ BUGS="flush_remove_add reload" # List of possible paths to pktgen script from kernel tree for performance tests PKTGEN_SCRIPT_PATHS=" - ../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh + ../../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh pktgen/pktgen_bench_xmit_mode_netif_receive.sh" # Definition of set types: diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh index d7e07f4c3d7f..4e15e8167310 100755 --- a/tools/testing/selftests/netfilter/nft_nat.sh +++ b/tools/testing/selftests/netfilter/nft_nat.sh @@ -374,6 +374,45 @@ EOF return $lret } +test_local_dnat_portonly() +{ + local family=$1 + local daddr=$2 + local lret=0 + local sr_s + local sr_r + +ip netns exec "$ns0" nft -f /dev/stdin < #include +#include "../kselftest.h" #include "rseq.h" -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) - struct percpu_lock_entry { intptr_t v; } __attribute__((aligned(128))); @@ -168,7 +167,7 @@ struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list, for (;;) { struct percpu_list_node *head; intptr_t *targetptr, expectnot, *load; - off_t offset; + long offset; int ret, cpu; cpu = rseq_cpu_start(); diff --git a/tools/testing/selftests/rseq/compiler.h b/tools/testing/selftests/rseq/compiler.h new file mode 100644 index 000000000000..876eb6a7f75b --- /dev/null +++ b/tools/testing/selftests/rseq/compiler.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: LGPL-2.1-only OR MIT */ +/* + * rseq/compiler.h + * + * Work-around asm goto compiler bugs. + * + * (C) Copyright 2021 - Mathieu Desnoyers + */ + +#ifndef RSEQ_COMPILER_H +#define RSEQ_COMPILER_H + +/* + * gcc prior to 4.8.2 miscompiles asm goto. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 + * + * gcc prior to 8.1.0 miscompiles asm goto at O1. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103908 + * + * clang prior to version 13.0.1 miscompiles asm goto at O2. + * https://github.com/llvm/llvm-project/issues/52735 + * + * Work around these issues by adding a volatile inline asm with + * memory clobber in the fallthrough after the asm goto and at each + * label target. Emit this for all compilers in case other similar + * issues are found in the future. + */ +#define rseq_after_asm_goto() asm volatile ("" : : : "memory") + +#endif /* RSEQ_COMPILER_H_ */ diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c index 384589095864..e29ecc7158e8 100644 --- a/tools/testing/selftests/rseq/param_test.c +++ b/tools/testing/selftests/rseq/param_test.c @@ -161,7 +161,7 @@ unsigned int yield_mod_cnt, nr_abort; " cbnz " INJECT_ASM_REG ", 222b\n" \ "333:\n" -#elif __PPC__ +#elif defined(__PPC__) #define RSEQ_INJECT_INPUT \ , [loop_cnt_1]"m"(loop_cnt[1]) \ @@ -368,9 +368,7 @@ void *test_percpu_spinlock_thread(void *arg) abort(); reps = thread_data->reps; for (i = 0; i < reps; i++) { - int cpu = rseq_cpu_start(); - - cpu = rseq_this_cpu_lock(&data->lock); + int cpu = rseq_this_cpu_lock(&data->lock); data->c[cpu].count++; rseq_percpu_unlock(&data->lock, cpu); #ifndef BENCHMARK @@ -551,7 +549,7 @@ struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list, for (;;) { struct percpu_list_node *head; intptr_t *targetptr, expectnot, *load; - off_t offset; + long offset; int ret; cpu = rseq_cpu_start(); diff --git a/tools/testing/selftests/rseq/rseq-abi.h b/tools/testing/selftests/rseq/rseq-abi.h new file mode 100644 index 000000000000..a8c44d9af71f --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-abi.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +#ifndef _RSEQ_ABI_H +#define _RSEQ_ABI_H + +/* + * rseq-abi.h + * + * Restartable sequences system call API + * + * Copyright (c) 2015-2022 Mathieu Desnoyers + */ + +#include +#include + +enum rseq_abi_cpu_id_state { + RSEQ_ABI_CPU_ID_UNINITIALIZED = -1, + RSEQ_ABI_CPU_ID_REGISTRATION_FAILED = -2, +}; + +enum rseq_abi_flags { + RSEQ_ABI_FLAG_UNREGISTER = (1 << 0), +}; + +enum rseq_abi_cs_flags_bit { + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0, + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1, + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2, +}; + +enum rseq_abi_cs_flags { + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_PREEMPT = + (1U << RSEQ_ABI_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT), + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_SIGNAL = + (1U << RSEQ_ABI_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT), + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_MIGRATE = + (1U << RSEQ_ABI_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT), +}; + +/* + * struct rseq_abi_cs is aligned on 4 * 8 bytes to ensure it is always + * contained within a single cache-line. It is usually declared as + * link-time constant data. + */ +struct rseq_abi_cs { + /* Version of this structure. */ + __u32 version; + /* enum rseq_abi_cs_flags */ + __u32 flags; + __u64 start_ip; + /* Offset from start_ip. */ + __u64 post_commit_offset; + __u64 abort_ip; +} __attribute__((aligned(4 * sizeof(__u64)))); + +/* + * struct rseq_abi is aligned on 4 * 8 bytes to ensure it is always + * contained within a single cache-line. + * + * A single struct rseq_abi per thread is allowed. + */ +struct rseq_abi { + /* + * Restartable sequences cpu_id_start field. Updated by the + * kernel. Read by user-space with single-copy atomicity + * semantics. This field should only be read by the thread which + * registered this data structure. Aligned on 32-bit. Always + * contains a value in the range of possible CPUs, although the + * value may not be the actual current CPU (e.g. if rseq is not + * initialized). This CPU number value should always be compared + * against the value of the cpu_id field before performing a rseq + * commit or returning a value read from a data structure indexed + * using the cpu_id_start value. + */ + __u32 cpu_id_start; + /* + * Restartable sequences cpu_id field. Updated by the kernel. + * Read by user-space with single-copy atomicity semantics. This + * field should only be read by the thread which registered this + * data structure. Aligned on 32-bit. Values + * RSEQ_CPU_ID_UNINITIALIZED and RSEQ_CPU_ID_REGISTRATION_FAILED + * have a special semantic: the former means "rseq uninitialized", + * and latter means "rseq initialization failed". This value is + * meant to be read within rseq critical sections and compared + * with the cpu_id_start value previously read, before performing + * the commit instruction, or read and compared with the + * cpu_id_start value before returning a value loaded from a data + * structure indexed using the cpu_id_start value. + */ + __u32 cpu_id; + /* + * Restartable sequences rseq_cs field. + * + * Contains NULL when no critical section is active for the current + * thread, or holds a pointer to the currently active struct rseq_cs. + * + * Updated by user-space, which sets the address of the currently + * active rseq_cs at the beginning of assembly instruction sequence + * block, and set to NULL by the kernel when it restarts an assembly + * instruction sequence block, as well as when the kernel detects that + * it is preempting or delivering a signal outside of the range + * targeted by the rseq_cs. Also needs to be set to NULL by user-space + * before reclaiming memory that contains the targeted struct rseq_cs. + * + * Read and set by the kernel. Set by user-space with single-copy + * atomicity semantics. This field should only be updated by the + * thread which registered this data structure. Aligned on 64-bit. + */ + union { + __u64 ptr64; + + /* + * The "arch" field provides architecture accessor for + * the ptr field based on architecture pointer size and + * endianness. + */ + struct { +#ifdef __LP64__ + __u64 ptr; +#elif defined(__BYTE_ORDER) ? (__BYTE_ORDER == __BIG_ENDIAN) : defined(__BIG_ENDIAN) + __u32 padding; /* Initialized to zero. */ + __u32 ptr; +#else + __u32 ptr; + __u32 padding; /* Initialized to zero. */ +#endif + } arch; + } rseq_cs; + + /* + * Restartable sequences flags field. + * + * This field should only be updated by the thread which + * registered this data structure. Read by the kernel. + * Mainly used for single-stepping through rseq critical sections + * with debuggers. + * + * - RSEQ_ABI_CS_FLAG_NO_RESTART_ON_PREEMPT + * Inhibit instruction sequence block restart on preemption + * for this thread. + * - RSEQ_ABI_CS_FLAG_NO_RESTART_ON_SIGNAL + * Inhibit instruction sequence block restart on signal + * delivery for this thread. + * - RSEQ_ABI_CS_FLAG_NO_RESTART_ON_MIGRATE + * Inhibit instruction sequence block restart on migration for + * this thread. + */ + __u32 flags; +} __attribute__((aligned(4 * sizeof(__u64)))); + +#endif /* _RSEQ_ABI_H */ diff --git a/tools/testing/selftests/rseq/rseq-arm.h b/tools/testing/selftests/rseq/rseq-arm.h index 5943c816c07c..893a11eca9d5 100644 --- a/tools/testing/selftests/rseq/rseq-arm.h +++ b/tools/testing/selftests/rseq/rseq-arm.h @@ -147,14 +147,11 @@ do { \ teardown \ "b %l[" __rseq_str(cmpfail_label) "]\n\t" -#define rseq_workaround_gcc_asm_size_guess() __asm__ __volatile__("") - static inline __attribute__((always_inline)) int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) { RSEQ_INJECT_C(9) - rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -185,8 +182,8 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "m" (*v), [expect] "r" (expect), [newv] "r" (newv) @@ -198,30 +195,31 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } static inline __attribute__((always_inline)) int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, - off_t voffp, intptr_t *load, int cpu) + long voffp, intptr_t *load, int cpu) { RSEQ_INJECT_C(9) - rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -255,8 +253,8 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expectnot] "r" (expectnot), @@ -270,19 +268,21 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -292,7 +292,6 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) { RSEQ_INJECT_C(9) - rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ #ifdef RSEQ_COMPARE_TWICE @@ -316,8 +315,8 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "m" (*v), [count] "Ir" (count) RSEQ_INJECT_INPUT @@ -328,14 +327,15 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) , error1 #endif ); - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); #endif } @@ -347,7 +347,6 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, { RSEQ_INJECT_C(9) - rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -381,8 +380,8 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -398,19 +397,21 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -422,7 +423,6 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, { RSEQ_INJECT_C(9) - rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -457,8 +457,8 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -474,19 +474,21 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -498,7 +500,6 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, { RSEQ_INJECT_C(9) - rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -537,8 +538,8 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* cmp2 input */ [v2] "m" (*v2), [expect2] "r" (expect2), @@ -554,21 +555,24 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, , error1, error2, error3 #endif ); - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("1st expected value comparison failed"); error3: + rseq_after_asm_goto(); rseq_bug("2nd expected value comparison failed"); #endif } @@ -582,7 +586,6 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, RSEQ_INJECT_C(9) - rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -657,8 +660,8 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, "8:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expect] "r" (expect), @@ -678,21 +681,21 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -706,7 +709,6 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, RSEQ_INJECT_C(9) - rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -782,8 +784,8 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, "8:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expect] "r" (expect), @@ -803,21 +805,21 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } diff --git a/tools/testing/selftests/rseq/rseq-arm64.h b/tools/testing/selftests/rseq/rseq-arm64.h index 200dae9e4208..cbe190a4d005 100644 --- a/tools/testing/selftests/rseq/rseq-arm64.h +++ b/tools/testing/selftests/rseq/rseq-arm64.h @@ -230,8 +230,8 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "Qo" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "Qo" (*v), [expect] "r" (expect), [newv] "r" (newv) @@ -242,24 +242,28 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) , error1, error2 #endif ); - + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } static inline __attribute__((always_inline)) int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, - off_t voffp, intptr_t *load, int cpu) + long voffp, intptr_t *load, int cpu) { RSEQ_INJECT_C(9) @@ -287,8 +291,8 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "Qo" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "Qo" (*v), [expectnot] "r" (expectnot), [load] "Qo" (*load), @@ -300,16 +304,21 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -337,8 +346,8 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "Qo" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "Qo" (*v), [count] "r" (count) RSEQ_INJECT_INPUT @@ -348,12 +357,15 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) , error1 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); #endif } @@ -388,8 +400,8 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "Qo" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [expect] "r" (expect), [v] "Qo" (*v), [newv] "r" (newv), @@ -402,17 +414,21 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -447,8 +463,8 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "Qo" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [expect] "r" (expect), [v] "Qo" (*v), [newv] "r" (newv), @@ -461,17 +477,21 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -508,8 +528,8 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "Qo" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "Qo" (*v), [expect] "r" (expect), [v2] "Qo" (*v2), @@ -522,19 +542,24 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, , error1, error2, error3 #endif ); - + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); error3: + rseq_after_asm_goto(); rseq_bug("2nd expected value comparison failed"); #endif } @@ -569,8 +594,8 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "Qo" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [expect] "r" (expect), [v] "Qo" (*v), [newv] "r" (newv), @@ -584,17 +609,21 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -629,8 +658,8 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "Qo" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [expect] "r" (expect), [v] "Qo" (*v), [newv] "r" (newv), @@ -644,17 +673,21 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } diff --git a/tools/testing/selftests/rseq/rseq-generic-thread-pointer.h b/tools/testing/selftests/rseq/rseq-generic-thread-pointer.h new file mode 100644 index 000000000000..38c584661571 --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-generic-thread-pointer.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: LGPL-2.1-only OR MIT */ +/* + * rseq-generic-thread-pointer.h + * + * (C) Copyright 2021 - Mathieu Desnoyers + */ + +#ifndef _RSEQ_GENERIC_THREAD_POINTER +#define _RSEQ_GENERIC_THREAD_POINTER + +#ifdef __cplusplus +extern "C" { +#endif + +/* Use gcc builtin thread pointer. */ +static inline void *rseq_thread_pointer(void) +{ + return __builtin_thread_pointer(); +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/tools/testing/selftests/rseq/rseq-mips.h b/tools/testing/selftests/rseq/rseq-mips.h index e989e7c14b09..878739fae2fd 100644 --- a/tools/testing/selftests/rseq/rseq-mips.h +++ b/tools/testing/selftests/rseq/rseq-mips.h @@ -154,14 +154,11 @@ do { \ teardown \ "b %l[" __rseq_str(cmpfail_label) "]\n\t" -#define rseq_workaround_gcc_asm_size_guess() __asm__ __volatile__("") - static inline __attribute__((always_inline)) int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) { RSEQ_INJECT_C(9) - rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -190,8 +187,8 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "m" (*v), [expect] "r" (expect), [newv] "r" (newv) @@ -203,14 +200,11 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: @@ -222,11 +216,10 @@ error2: static inline __attribute__((always_inline)) int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, - off_t voffp, intptr_t *load, int cpu) + long voffp, intptr_t *load, int cpu) { RSEQ_INJECT_C(9) - rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -258,8 +251,8 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expectnot] "r" (expectnot), @@ -273,14 +266,11 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: @@ -295,7 +285,6 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) { RSEQ_INJECT_C(9) - rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ #ifdef RSEQ_COMPARE_TWICE @@ -319,8 +308,8 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "m" (*v), [count] "Ir" (count) RSEQ_INJECT_INPUT @@ -331,10 +320,8 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) , error1 #endif ); - rseq_workaround_gcc_asm_size_guess(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); RSEQ_INJECT_FAILED return -1; #ifdef RSEQ_COMPARE_TWICE @@ -350,7 +337,6 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, { RSEQ_INJECT_C(9) - rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -382,8 +368,8 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -399,14 +385,11 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: @@ -423,7 +406,6 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, { RSEQ_INJECT_C(9) - rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -456,8 +438,8 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -473,14 +455,11 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: @@ -497,7 +476,6 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, { RSEQ_INJECT_C(9) - rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -532,8 +510,8 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* cmp2 input */ [v2] "m" (*v2), [expect2] "r" (expect2), @@ -549,14 +527,11 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, , error1, error2, error3 #endif ); - rseq_workaround_gcc_asm_size_guess(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: @@ -577,7 +552,6 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, RSEQ_INJECT_C(9) - rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -649,8 +623,8 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, "8:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expect] "r" (expect), @@ -670,21 +644,16 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: - rseq_workaround_gcc_asm_size_guess(); rseq_bug("cpu_id comparison failed"); error2: - rseq_workaround_gcc_asm_size_guess(); rseq_bug("expected value comparison failed"); #endif } @@ -698,7 +667,6 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, RSEQ_INJECT_C(9) - rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -771,8 +739,8 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, "8:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expect] "r" (expect), @@ -792,21 +760,16 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: - rseq_workaround_gcc_asm_size_guess(); rseq_bug("cpu_id comparison failed"); error2: - rseq_workaround_gcc_asm_size_guess(); rseq_bug("expected value comparison failed"); #endif } diff --git a/tools/testing/selftests/rseq/rseq-ppc-thread-pointer.h b/tools/testing/selftests/rseq/rseq-ppc-thread-pointer.h new file mode 100644 index 000000000000..263eee84fb76 --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-ppc-thread-pointer.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: LGPL-2.1-only OR MIT */ +/* + * rseq-ppc-thread-pointer.h + * + * (C) Copyright 2021 - Mathieu Desnoyers + */ + +#ifndef _RSEQ_PPC_THREAD_POINTER +#define _RSEQ_PPC_THREAD_POINTER + +#ifdef __cplusplus +extern "C" { +#endif + +static inline void *rseq_thread_pointer(void) +{ +#ifdef __powerpc64__ + register void *__result asm ("r13"); +#else + register void *__result asm ("r2"); +#endif + asm ("" : "=r" (__result)); + return __result; +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/tools/testing/selftests/rseq/rseq-ppc.h b/tools/testing/selftests/rseq/rseq-ppc.h index 76be90196fe4..bab8e0b9fb11 100644 --- a/tools/testing/selftests/rseq/rseq-ppc.h +++ b/tools/testing/selftests/rseq/rseq-ppc.h @@ -47,10 +47,13 @@ do { \ #ifdef __PPC64__ -#define STORE_WORD "std " -#define LOAD_WORD "ld " -#define LOADX_WORD "ldx " -#define CMP_WORD "cmpd " +#define RSEQ_STORE_LONG(arg) "std%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* To memory ("m" constraint) */ +#define RSEQ_STORE_INT(arg) "stw%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* To memory ("m" constraint) */ +#define RSEQ_LOAD_LONG(arg) "ld%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* From memory ("m" constraint) */ +#define RSEQ_LOAD_INT(arg) "lwz%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* From memory ("m" constraint) */ +#define RSEQ_LOADX_LONG "ldx " /* From base register ("b" constraint) */ +#define RSEQ_CMP_LONG "cmpd " +#define RSEQ_CMP_LONG_INT "cmpdi " #define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \ start_ip, post_commit_offset, abort_ip) \ @@ -89,10 +92,13 @@ do { \ #else /* #ifdef __PPC64__ */ -#define STORE_WORD "stw " -#define LOAD_WORD "lwz " -#define LOADX_WORD "lwzx " -#define CMP_WORD "cmpw " +#define RSEQ_STORE_LONG(arg) "stw%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* To memory ("m" constraint) */ +#define RSEQ_STORE_INT(arg) RSEQ_STORE_LONG(arg) /* To memory ("m" constraint) */ +#define RSEQ_LOAD_LONG(arg) "lwz%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* From memory ("m" constraint) */ +#define RSEQ_LOAD_INT(arg) RSEQ_LOAD_LONG(arg) /* From memory ("m" constraint) */ +#define RSEQ_LOADX_LONG "lwzx " /* From base register ("b" constraint) */ +#define RSEQ_CMP_LONG "cmpw " +#define RSEQ_CMP_LONG_INT "cmpwi " #define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \ start_ip, post_commit_offset, abort_ip) \ @@ -125,7 +131,7 @@ do { \ RSEQ_INJECT_ASM(1) \ "lis %%r17, (" __rseq_str(cs_label) ")@ha\n\t" \ "addi %%r17, %%r17, (" __rseq_str(cs_label) ")@l\n\t" \ - "stw %%r17, %[" __rseq_str(rseq_cs) "]\n\t" \ + RSEQ_STORE_INT(rseq_cs) "%%r17, %[" __rseq_str(rseq_cs) "]\n\t" \ __rseq_str(label) ":\n\t" #endif /* #ifdef __PPC64__ */ @@ -136,7 +142,7 @@ do { \ #define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \ RSEQ_INJECT_ASM(2) \ - "lwz %%r17, %[" __rseq_str(current_cpu_id) "]\n\t" \ + RSEQ_LOAD_INT(current_cpu_id) "%%r17, %[" __rseq_str(current_cpu_id) "]\n\t" \ "cmpw cr7, %[" __rseq_str(cpu_id) "], %%r17\n\t" \ "bne- cr7, " __rseq_str(label) "\n\t" @@ -153,25 +159,25 @@ do { \ * RSEQ_ASM_OP_* (else): doesn't have hard-code registers(unless cr7) */ #define RSEQ_ASM_OP_CMPEQ(var, expect, label) \ - LOAD_WORD "%%r17, %[" __rseq_str(var) "]\n\t" \ - CMP_WORD "cr7, %%r17, %[" __rseq_str(expect) "]\n\t" \ + RSEQ_LOAD_LONG(var) "%%r17, %[" __rseq_str(var) "]\n\t" \ + RSEQ_CMP_LONG "cr7, %%r17, %[" __rseq_str(expect) "]\n\t" \ "bne- cr7, " __rseq_str(label) "\n\t" #define RSEQ_ASM_OP_CMPNE(var, expectnot, label) \ - LOAD_WORD "%%r17, %[" __rseq_str(var) "]\n\t" \ - CMP_WORD "cr7, %%r17, %[" __rseq_str(expectnot) "]\n\t" \ + RSEQ_LOAD_LONG(var) "%%r17, %[" __rseq_str(var) "]\n\t" \ + RSEQ_CMP_LONG "cr7, %%r17, %[" __rseq_str(expectnot) "]\n\t" \ "beq- cr7, " __rseq_str(label) "\n\t" #define RSEQ_ASM_OP_STORE(value, var) \ - STORE_WORD "%[" __rseq_str(value) "], %[" __rseq_str(var) "]\n\t" + RSEQ_STORE_LONG(var) "%[" __rseq_str(value) "], %[" __rseq_str(var) "]\n\t" /* Load @var to r17 */ #define RSEQ_ASM_OP_R_LOAD(var) \ - LOAD_WORD "%%r17, %[" __rseq_str(var) "]\n\t" + RSEQ_LOAD_LONG(var) "%%r17, %[" __rseq_str(var) "]\n\t" /* Store r17 to @var */ #define RSEQ_ASM_OP_R_STORE(var) \ - STORE_WORD "%%r17, %[" __rseq_str(var) "]\n\t" + RSEQ_STORE_LONG(var) "%%r17, %[" __rseq_str(var) "]\n\t" /* Add @count to r17 */ #define RSEQ_ASM_OP_R_ADD(count) \ @@ -179,11 +185,11 @@ do { \ /* Load (r17 + voffp) to r17 */ #define RSEQ_ASM_OP_R_LOADX(voffp) \ - LOADX_WORD "%%r17, %[" __rseq_str(voffp) "], %%r17\n\t" + RSEQ_LOADX_LONG "%%r17, %[" __rseq_str(voffp) "], %%r17\n\t" /* TODO: implement a faster memcpy. */ #define RSEQ_ASM_OP_R_MEMCPY() \ - "cmpdi %%r19, 0\n\t" \ + RSEQ_CMP_LONG_INT "%%r19, 0\n\t" \ "beq 333f\n\t" \ "addi %%r20, %%r20, -1\n\t" \ "addi %%r21, %%r21, -1\n\t" \ @@ -191,16 +197,16 @@ do { \ "lbzu %%r18, 1(%%r20)\n\t" \ "stbu %%r18, 1(%%r21)\n\t" \ "addi %%r19, %%r19, -1\n\t" \ - "cmpdi %%r19, 0\n\t" \ + RSEQ_CMP_LONG_INT "%%r19, 0\n\t" \ "bne 222b\n\t" \ "333:\n\t" \ #define RSEQ_ASM_OP_R_FINAL_STORE(var, post_commit_label) \ - STORE_WORD "%%r17, %[" __rseq_str(var) "]\n\t" \ + RSEQ_STORE_LONG(var) "%%r17, %[" __rseq_str(var) "]\n\t" \ __rseq_str(post_commit_label) ":\n\t" #define RSEQ_ASM_OP_FINAL_STORE(value, var, post_commit_label) \ - STORE_WORD "%[" __rseq_str(value) "], %[" __rseq_str(var) "]\n\t" \ + RSEQ_STORE_LONG(var) "%[" __rseq_str(value) "], %[" __rseq_str(var) "]\n\t" \ __rseq_str(post_commit_label) ":\n\t" static inline __attribute__((always_inline)) @@ -235,8 +241,8 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "m" (*v), [expect] "r" (expect), [newv] "r" (newv) @@ -248,23 +254,28 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } static inline __attribute__((always_inline)) int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, - off_t voffp, intptr_t *load, int cpu) + long voffp, intptr_t *load, int cpu) { RSEQ_INJECT_C(9) @@ -301,8 +312,8 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expectnot] "r" (expectnot), @@ -316,16 +327,21 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -359,8 +375,8 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [count] "r" (count) @@ -372,12 +388,15 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) , error1 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); #endif } @@ -419,8 +438,8 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -436,16 +455,21 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -489,8 +513,8 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -506,16 +530,21 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -560,8 +589,8 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* cmp2 input */ [v2] "m" (*v2), [expect2] "r" (expect2), @@ -577,18 +606,24 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, , error1, error2, error3 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("1st expected value comparison failed"); error3: + rseq_after_asm_goto(); rseq_bug("2nd expected value comparison failed"); #endif } @@ -635,8 +670,8 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expect] "r" (expect), @@ -653,16 +688,21 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -711,8 +751,8 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expect] "r" (expect), @@ -729,23 +769,23 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } -#undef STORE_WORD -#undef LOAD_WORD -#undef LOADX_WORD -#undef CMP_WORD - #endif /* !RSEQ_SKIP_FASTPATH */ diff --git a/tools/testing/selftests/rseq/rseq-s390.h b/tools/testing/selftests/rseq/rseq-s390.h index 8ef94ad1cbb4..4e6dc5f0cb42 100644 --- a/tools/testing/selftests/rseq/rseq-s390.h +++ b/tools/testing/selftests/rseq/rseq-s390.h @@ -165,8 +165,8 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "m" (*v), [expect] "r" (expect), [newv] "r" (newv) @@ -178,16 +178,21 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -198,7 +203,7 @@ error2: */ static inline __attribute__((always_inline)) int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, - off_t voffp, intptr_t *load, int cpu) + long voffp, intptr_t *load, int cpu) { RSEQ_INJECT_C(9) @@ -233,8 +238,8 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expectnot] "r" (expectnot), @@ -248,16 +253,21 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -288,8 +298,8 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [count] "r" (count) @@ -301,12 +311,15 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) , error1 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); #endif } @@ -347,8 +360,8 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -364,16 +377,21 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -426,8 +444,8 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* cmp2 input */ [v2] "m" (*v2), [expect2] "r" (expect2), @@ -443,18 +461,24 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, , error1, error2, error3 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("1st expected value comparison failed"); error3: + rseq_after_asm_goto(); rseq_bug("2nd expected value comparison failed"); #endif } @@ -534,8 +558,8 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, #endif : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expect] "r" (expect), @@ -555,16 +579,21 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } diff --git a/tools/testing/selftests/rseq/rseq-skip.h b/tools/testing/selftests/rseq/rseq-skip.h index 72750b5905a9..7b53dac1fcdd 100644 --- a/tools/testing/selftests/rseq/rseq-skip.h +++ b/tools/testing/selftests/rseq/rseq-skip.h @@ -13,7 +13,7 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) static inline __attribute__((always_inline)) int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, - off_t voffp, intptr_t *load, int cpu) + long voffp, intptr_t *load, int cpu) { return -1; } diff --git a/tools/testing/selftests/rseq/rseq-thread-pointer.h b/tools/testing/selftests/rseq/rseq-thread-pointer.h new file mode 100644 index 000000000000..977c25d758b2 --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-thread-pointer.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: LGPL-2.1-only OR MIT */ +/* + * rseq-thread-pointer.h + * + * (C) Copyright 2021 - Mathieu Desnoyers + */ + +#ifndef _RSEQ_THREAD_POINTER +#define _RSEQ_THREAD_POINTER + +#if defined(__x86_64__) || defined(__i386__) +#include "rseq-x86-thread-pointer.h" +#elif defined(__PPC__) +#include "rseq-ppc-thread-pointer.h" +#else +#include "rseq-generic-thread-pointer.h" +#endif + +#endif diff --git a/tools/testing/selftests/rseq/rseq-x86-thread-pointer.h b/tools/testing/selftests/rseq/rseq-x86-thread-pointer.h new file mode 100644 index 000000000000..d3133587d996 --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-x86-thread-pointer.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: LGPL-2.1-only OR MIT */ +/* + * rseq-x86-thread-pointer.h + * + * (C) Copyright 2021 - Mathieu Desnoyers + */ + +#ifndef _RSEQ_X86_THREAD_POINTER +#define _RSEQ_X86_THREAD_POINTER + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#if __GNUC_PREREQ (11, 1) +static inline void *rseq_thread_pointer(void) +{ + return __builtin_thread_pointer(); +} +#else +static inline void *rseq_thread_pointer(void) +{ + void *__result; + +# ifdef __x86_64__ + __asm__ ("mov %%fs:0, %0" : "=r" (__result)); +# else + __asm__ ("mov %%gs:0, %0" : "=r" (__result)); +# endif + return __result; +} +#endif /* !GCC 11 */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/tools/testing/selftests/rseq/rseq-x86.h b/tools/testing/selftests/rseq/rseq-x86.h index 640411518e46..bd01dc41ca13 100644 --- a/tools/testing/selftests/rseq/rseq-x86.h +++ b/tools/testing/selftests/rseq/rseq-x86.h @@ -28,6 +28,8 @@ #ifdef __x86_64__ +#define RSEQ_ASM_TP_SEGMENT %%fs + #define rseq_smp_mb() \ __asm__ __volatile__ ("lock; addl $0,-128(%%rsp)" ::: "memory", "cc") #define rseq_smp_rmb() rseq_barrier() @@ -123,14 +125,14 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "cmpq %[v], %[expect]\n\t" "jnz %l[cmpfail]\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "cmpq %[v], %[expect]\n\t" "jnz %l[error2]\n\t" #endif @@ -141,7 +143,7 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), [v] "m" (*v), [expect] "r" (expect), [newv] "r" (newv) @@ -152,16 +154,21 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -172,7 +179,7 @@ error2: */ static inline __attribute__((always_inline)) int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, - off_t voffp, intptr_t *load, int cpu) + long voffp, intptr_t *load, int cpu) { RSEQ_INJECT_C(9) @@ -184,15 +191,15 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "movq %[v], %%rbx\n\t" "cmpq %%rbx, %[expectnot]\n\t" "je %l[cmpfail]\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "movq %[v], %%rbx\n\t" "cmpq %%rbx, %[expectnot]\n\t" "je %l[error2]\n\t" @@ -207,7 +214,7 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* final store input */ [v] "m" (*v), [expectnot] "r" (expectnot), @@ -220,16 +227,21 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -245,11 +257,11 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) #endif /* final store */ "addq %[count], %[v]\n\t" @@ -258,7 +270,7 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* final store input */ [v] "m" (*v), [count] "er" (count) @@ -269,12 +281,15 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) , error1 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); #endif } @@ -286,7 +301,7 @@ error1: * *pval += inc; */ static inline __attribute__((always_inline)) -int rseq_offset_deref_addv(intptr_t *ptr, off_t off, intptr_t inc, int cpu) +int rseq_offset_deref_addv(intptr_t *ptr, long off, intptr_t inc, int cpu) { RSEQ_INJECT_C(9) @@ -296,11 +311,11 @@ int rseq_offset_deref_addv(intptr_t *ptr, off_t off, intptr_t inc, int cpu) RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) #endif /* get p+v */ "movq %[ptr], %%rbx\n\t" @@ -314,7 +329,7 @@ int rseq_offset_deref_addv(intptr_t *ptr, off_t off, intptr_t inc, int cpu) RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* final store input */ [ptr] "m" (*ptr), [off] "er" (off), @@ -351,14 +366,14 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "cmpq %[v], %[expect]\n\t" "jnz %l[cmpfail]\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "cmpq %[v], %[expect]\n\t" "jnz %l[error2]\n\t" #endif @@ -372,7 +387,7 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -387,16 +402,21 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -426,8 +446,8 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "cmpq %[v], %[expect]\n\t" "jnz %l[cmpfail]\n\t" @@ -436,7 +456,7 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, "jnz %l[cmpfail]\n\t" RSEQ_INJECT_ASM(5) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "cmpq %[v], %[expect]\n\t" "jnz %l[error2]\n\t" "cmpq %[v2], %[expect2]\n\t" @@ -449,7 +469,7 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* cmp2 input */ [v2] "m" (*v2), [expect2] "r" (expect2), @@ -464,18 +484,24 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, , error1, error2, error3 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("1st expected value comparison failed"); error3: + rseq_after_asm_goto(); rseq_bug("2nd expected value comparison failed"); #endif } @@ -500,14 +526,14 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, "movq %[dst], %[rseq_scratch1]\n\t" "movq %[len], %[rseq_scratch2]\n\t" /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "cmpq %[v], %[expect]\n\t" "jnz 5f\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 6f) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 6f) "cmpq %[v], %[expect]\n\t" "jnz 7f\n\t" #endif @@ -555,7 +581,7 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, #endif : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* final store input */ [v] "m" (*v), [expect] "r" (expect), @@ -574,16 +600,21 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -600,7 +631,9 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, #endif /* !RSEQ_SKIP_FASTPATH */ -#elif __i386__ +#elif defined(__i386__) + +#define RSEQ_ASM_TP_SEGMENT %%gs #define rseq_smp_mb() \ __asm__ __volatile__ ("lock; addl $0,-128(%%esp)" ::: "memory", "cc") @@ -701,14 +734,14 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "cmpl %[v], %[expect]\n\t" "jnz %l[cmpfail]\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "cmpl %[v], %[expect]\n\t" "jnz %l[error2]\n\t" #endif @@ -719,7 +752,7 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), [v] "m" (*v), [expect] "r" (expect), [newv] "r" (newv) @@ -730,16 +763,21 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -750,7 +788,7 @@ error2: */ static inline __attribute__((always_inline)) int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, - off_t voffp, intptr_t *load, int cpu) + long voffp, intptr_t *load, int cpu) { RSEQ_INJECT_C(9) @@ -762,15 +800,15 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "movl %[v], %%ebx\n\t" "cmpl %%ebx, %[expectnot]\n\t" "je %l[cmpfail]\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "movl %[v], %%ebx\n\t" "cmpl %%ebx, %[expectnot]\n\t" "je %l[error2]\n\t" @@ -785,7 +823,7 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* final store input */ [v] "m" (*v), [expectnot] "r" (expectnot), @@ -798,16 +836,21 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -823,11 +866,11 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) #endif /* final store */ "addl %[count], %[v]\n\t" @@ -836,7 +879,7 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* final store input */ [v] "m" (*v), [count] "ir" (count) @@ -847,12 +890,15 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) , error1 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); #endif } @@ -872,14 +918,14 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "cmpl %[v], %[expect]\n\t" "jnz %l[cmpfail]\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "cmpl %[v], %[expect]\n\t" "jnz %l[error2]\n\t" #endif @@ -894,7 +940,7 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* try store input */ [v2] "m" (*v2), [newv2] "m" (newv2), @@ -909,16 +955,21 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -938,15 +989,15 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "movl %[expect], %%eax\n\t" "cmpl %[v], %%eax\n\t" "jnz %l[cmpfail]\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "movl %[expect], %%eax\n\t" "cmpl %[v], %%eax\n\t" "jnz %l[error2]\n\t" @@ -962,7 +1013,7 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -977,16 +1028,21 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif @@ -1008,8 +1064,8 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "cmpl %[v], %[expect]\n\t" "jnz %l[cmpfail]\n\t" @@ -1018,7 +1074,7 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, "jnz %l[cmpfail]\n\t" RSEQ_INJECT_ASM(5) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "cmpl %[v], %[expect]\n\t" "jnz %l[error2]\n\t" "cmpl %[expect2], %[v2]\n\t" @@ -1032,7 +1088,7 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* cmp2 input */ [v2] "m" (*v2), [expect2] "r" (expect2), @@ -1047,18 +1103,24 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, , error1, error2, error3 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("1st expected value comparison failed"); error3: + rseq_after_asm_goto(); rseq_bug("2nd expected value comparison failed"); #endif } @@ -1084,15 +1146,15 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, "movl %[dst], %[rseq_scratch1]\n\t" "movl %[len], %[rseq_scratch2]\n\t" /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "movl %[expect], %%eax\n\t" "cmpl %%eax, %[v]\n\t" "jnz 5f\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 6f) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 6f) "movl %[expect], %%eax\n\t" "cmpl %%eax, %[v]\n\t" "jnz 7f\n\t" @@ -1142,7 +1204,7 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, #endif : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* final store input */ [v] "m" (*v), [expect] "m" (expect), @@ -1161,16 +1223,21 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -1196,15 +1263,15 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, "movl %[dst], %[rseq_scratch1]\n\t" "movl %[len], %[rseq_scratch2]\n\t" /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "movl %[expect], %%eax\n\t" "cmpl %%eax, %[v]\n\t" "jnz 5f\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 6f) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 6f) "movl %[expect], %%eax\n\t" "cmpl %%eax, %[v]\n\t" "jnz 7f\n\t" @@ -1255,7 +1322,7 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, #endif : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* final store input */ [v] "m" (*v), [expect] "m" (expect), @@ -1274,16 +1341,21 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c index 7159eb777fd3..986b9458efb2 100644 --- a/tools/testing/selftests/rseq/rseq.c +++ b/tools/testing/selftests/rseq/rseq.c @@ -26,104 +26,114 @@ #include #include #include +#include +#include +#include "../kselftest.h" #include "rseq.h" -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +static const ptrdiff_t *libc_rseq_offset_p; +static const unsigned int *libc_rseq_size_p; +static const unsigned int *libc_rseq_flags_p; -__thread volatile struct rseq __rseq_abi = { - .cpu_id = RSEQ_CPU_ID_UNINITIALIZED, -}; +/* Offset from the thread pointer to the rseq area. */ +ptrdiff_t rseq_offset; -/* - * Shared with other libraries. This library may take rseq ownership if it is - * still 0 when executing the library constructor. Set to 1 by library - * constructor when handling rseq. Set to 0 in destructor if handling rseq. - */ -int __rseq_handled; +/* Size of the registered rseq area. 0 if the registration was + unsuccessful. */ +unsigned int rseq_size = -1U; + +/* Flags used during rseq registration. */ +unsigned int rseq_flags; -/* Whether this library have ownership of rseq registration. */ static int rseq_ownership; -static __thread volatile uint32_t __rseq_refcount; +static +__thread struct rseq_abi __rseq_abi __attribute__((tls_model("initial-exec"))) = { + .cpu_id = RSEQ_ABI_CPU_ID_UNINITIALIZED, +}; -static void signal_off_save(sigset_t *oldset) -{ - sigset_t set; - int ret; - - sigfillset(&set); - ret = pthread_sigmask(SIG_BLOCK, &set, oldset); - if (ret) - abort(); -} - -static void signal_restore(sigset_t oldset) -{ - int ret; - - ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL); - if (ret) - abort(); -} - -static int sys_rseq(volatile struct rseq *rseq_abi, uint32_t rseq_len, +static int sys_rseq(struct rseq_abi *rseq_abi, uint32_t rseq_len, int flags, uint32_t sig) { return syscall(__NR_rseq, rseq_abi, rseq_len, flags, sig); } +int rseq_available(void) +{ + int rc; + + rc = sys_rseq(NULL, 0, 0, 0); + if (rc != -1) + abort(); + switch (errno) { + case ENOSYS: + return 0; + case EINVAL: + return 1; + default: + abort(); + } +} + int rseq_register_current_thread(void) { - int rc, ret = 0; - sigset_t oldset; + int rc; - if (!rseq_ownership) + if (!rseq_ownership) { + /* Treat libc's ownership as a successful registration. */ return 0; - signal_off_save(&oldset); - if (__rseq_refcount == UINT_MAX) { - ret = -1; - goto end; } - if (__rseq_refcount++) - goto end; - rc = sys_rseq(&__rseq_abi, sizeof(struct rseq), 0, RSEQ_SIG); - if (!rc) { - assert(rseq_current_cpu_raw() >= 0); - goto end; - } - if (errno != EBUSY) - __rseq_abi.cpu_id = RSEQ_CPU_ID_REGISTRATION_FAILED; - ret = -1; - __rseq_refcount--; -end: - signal_restore(oldset); - return ret; + rc = sys_rseq(&__rseq_abi, sizeof(struct rseq_abi), 0, RSEQ_SIG); + if (rc) + return -1; + assert(rseq_current_cpu_raw() >= 0); + return 0; } int rseq_unregister_current_thread(void) { - int rc, ret = 0; - sigset_t oldset; + int rc; - if (!rseq_ownership) + if (!rseq_ownership) { + /* Treat libc's ownership as a successful unregistration. */ return 0; - signal_off_save(&oldset); - if (!__rseq_refcount) { - ret = -1; - goto end; } - if (--__rseq_refcount) - goto end; - rc = sys_rseq(&__rseq_abi, sizeof(struct rseq), - RSEQ_FLAG_UNREGISTER, RSEQ_SIG); - if (!rc) - goto end; - __rseq_refcount = 1; - ret = -1; -end: - signal_restore(oldset); - return ret; + rc = sys_rseq(&__rseq_abi, sizeof(struct rseq_abi), RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG); + if (rc) + return -1; + return 0; +} + +static __attribute__((constructor)) +void rseq_init(void) +{ + libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset"); + libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size"); + libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags"); + if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p) { + /* rseq registration owned by glibc */ + rseq_offset = *libc_rseq_offset_p; + rseq_size = *libc_rseq_size_p; + rseq_flags = *libc_rseq_flags_p; + return; + } + if (!rseq_available()) + return; + rseq_ownership = 1; + rseq_offset = (void *)&__rseq_abi - rseq_thread_pointer(); + rseq_size = sizeof(struct rseq_abi); + rseq_flags = 0; +} + +static __attribute__((destructor)) +void rseq_exit(void) +{ + if (!rseq_ownership) + return; + rseq_offset = 0; + rseq_size = -1U; + rseq_ownership = 0; } int32_t rseq_fallback_current_cpu(void) @@ -137,20 +147,3 @@ int32_t rseq_fallback_current_cpu(void) } return cpu; } - -void __attribute__((constructor)) rseq_init(void) -{ - /* Check whether rseq is handled by another library. */ - if (__rseq_handled) - return; - __rseq_handled = 1; - rseq_ownership = 1; -} - -void __attribute__((destructor)) rseq_fini(void) -{ - if (!rseq_ownership) - return; - __rseq_handled = 0; - rseq_ownership = 0; -} diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h index 3f63eb362b92..9d850b290c2e 100644 --- a/tools/testing/selftests/rseq/rseq.h +++ b/tools/testing/selftests/rseq/rseq.h @@ -16,7 +16,9 @@ #include #include #include -#include +#include +#include "rseq-abi.h" +#include "compiler.h" /* * Empty code injection macros, override when testing. @@ -43,8 +45,20 @@ #define RSEQ_INJECT_FAILED #endif -extern __thread volatile struct rseq __rseq_abi; -extern int __rseq_handled; +#include "rseq-thread-pointer.h" + +/* Offset from the thread pointer to the rseq area. */ +extern ptrdiff_t rseq_offset; +/* Size of the registered rseq area. 0 if the registration was + unsuccessful. */ +extern unsigned int rseq_size; +/* Flags used during rseq registration. */ +extern unsigned int rseq_flags; + +static inline struct rseq_abi *rseq_get_abi(void) +{ + return (struct rseq_abi *) ((uintptr_t) rseq_thread_pointer() + rseq_offset); +} #define rseq_likely(x) __builtin_expect(!!(x), 1) #define rseq_unlikely(x) __builtin_expect(!!(x), 0) @@ -108,7 +122,7 @@ int32_t rseq_fallback_current_cpu(void); */ static inline int32_t rseq_current_cpu_raw(void) { - return RSEQ_ACCESS_ONCE(__rseq_abi.cpu_id); + return RSEQ_ACCESS_ONCE(rseq_get_abi()->cpu_id); } /* @@ -124,7 +138,7 @@ static inline int32_t rseq_current_cpu_raw(void) */ static inline uint32_t rseq_cpu_start(void) { - return RSEQ_ACCESS_ONCE(__rseq_abi.cpu_id_start); + return RSEQ_ACCESS_ONCE(rseq_get_abi()->cpu_id_start); } static inline uint32_t rseq_current_cpu(void) @@ -139,11 +153,7 @@ static inline uint32_t rseq_current_cpu(void) static inline void rseq_clear_rseq_cs(void) { -#ifdef __LP64__ - __rseq_abi.rseq_cs.ptr = 0; -#else - __rseq_abi.rseq_cs.ptr.ptr32 = 0; -#endif + RSEQ_WRITE_ONCE(rseq_get_abi()->rseq_cs.arch.ptr, 0); } /* diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index dc21dc49b426..413a7b9f3c4d 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -798,7 +798,7 @@ void kill_thread_or_group(struct __test_metadata *_metadata, .len = (unsigned short)ARRAY_SIZE(filter_thread), .filter = filter_thread, }; - int kill = kill_how == KILL_PROCESS ? SECCOMP_RET_KILL_PROCESS : 0xAAAAAAAAA; + int kill = kill_how == KILL_PROCESS ? SECCOMP_RET_KILL_PROCESS : 0xAAAAAAAA; struct sock_filter filter_process[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), @@ -951,7 +951,7 @@ TEST(ERRNO_valid) ASSERT_EQ(0, ret); EXPECT_EQ(parent, syscall(__NR_getppid)); - EXPECT_EQ(-1, read(0, NULL, 0)); + EXPECT_EQ(-1, read(-1, NULL, 0)); EXPECT_EQ(E2BIG, errno); } @@ -970,7 +970,7 @@ TEST(ERRNO_zero) EXPECT_EQ(parent, syscall(__NR_getppid)); /* "errno" of 0 is ok. */ - EXPECT_EQ(0, read(0, NULL, 0)); + EXPECT_EQ(0, read(-1, NULL, 0)); } /* @@ -991,7 +991,7 @@ TEST(ERRNO_capped) ASSERT_EQ(0, ret); EXPECT_EQ(parent, syscall(__NR_getppid)); - EXPECT_EQ(-1, read(0, NULL, 0)); + EXPECT_EQ(-1, read(-1, NULL, 0)); EXPECT_EQ(4095, errno); } @@ -1022,7 +1022,7 @@ TEST(ERRNO_order) ASSERT_EQ(0, ret); EXPECT_EQ(parent, syscall(__NR_getppid)); - EXPECT_EQ(-1, read(0, NULL, 0)); + EXPECT_EQ(-1, read(-1, NULL, 0)); EXPECT_EQ(12, errno); } @@ -2575,7 +2575,7 @@ void *tsync_sibling(void *data) ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0); if (!ret) return (void *)SIBLING_EXIT_NEWPRIVS; - read(0, NULL, 0); + read(-1, NULL, 0); return (void *)SIBLING_EXIT_UNKILLED; } diff --git a/tools/testing/selftests/timers/clocksource-switch.c b/tools/testing/selftests/timers/clocksource-switch.c index bfc974b4572d..c18313a5f357 100644 --- a/tools/testing/selftests/timers/clocksource-switch.c +++ b/tools/testing/selftests/timers/clocksource-switch.c @@ -110,10 +110,10 @@ int run_tests(int secs) sprintf(buf, "./inconsistency-check -t %i", secs); ret = system(buf); - if (ret) - return ret; + if (WIFEXITED(ret) && WEXITSTATUS(ret)) + return WEXITSTATUS(ret); ret = system("./nanosleep"); - return ret; + return WIFEXITED(ret) ? WEXITSTATUS(ret) : 0; } diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c index 5397de708d3c..48b9a803235a 100644 --- a/tools/testing/selftests/timers/valid-adjtimex.c +++ b/tools/testing/selftests/timers/valid-adjtimex.c @@ -40,7 +40,7 @@ #define ADJ_SETOFFSET 0x0100 #include -static int clock_adjtime(clockid_t id, struct timex *tx) +int clock_adjtime(clockid_t id, struct timex *tx) { return syscall(__NR_clock_adjtime, id, tx); } diff --git a/tools/testing/selftests/tpm2/tpm2.py b/tools/testing/selftests/tpm2/tpm2.py index f34486cd7342..3e67fdb518ec 100644 --- a/tools/testing/selftests/tpm2/tpm2.py +++ b/tools/testing/selftests/tpm2/tpm2.py @@ -370,6 +370,10 @@ class Client: fcntl.fcntl(self.tpm, fcntl.F_SETFL, flags) self.tpm_poll = select.poll() + def __del__(self): + if self.tpm: + self.tpm.close() + def close(self): self.tpm.close() diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 01ec6876e8f5..d8479552e222 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -44,9 +44,9 @@ CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_32bit_prog CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_64bit_program.c) CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_program.c -no-pie) -TARGETS := protection_keys -BINARIES_32 := $(TARGETS:%=%_32) -BINARIES_64 := $(TARGETS:%=%_64) +VMTARGETS := protection_keys +BINARIES_32 := $(VMTARGETS:%=%_32) +BINARIES_64 := $(VMTARGETS:%=%_64) ifeq ($(CAN_BUILD_WITH_NOPIE),1) CFLAGS += -no-pie @@ -101,7 +101,7 @@ $(BINARIES_32): CFLAGS += -m32 $(BINARIES_32): LDLIBS += -lrt -ldl -lm $(BINARIES_32): $(OUTPUT)/%_32: %.c $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@ -$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-32,$(t)))) +$(foreach t,$(VMTARGETS),$(eval $(call gen-target-rule-32,$(t)))) endif ifeq ($(CAN_BUILD_X86_64),1) @@ -109,7 +109,7 @@ $(BINARIES_64): CFLAGS += -m64 $(BINARIES_64): LDLIBS += -lrt -ldl $(BINARIES_64): $(OUTPUT)/%_64: %.c $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@ -$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-64,$(t)))) +$(foreach t,$(VMTARGETS),$(eval $(call gen-target-rule-64,$(t)))) endif # x86_64 users should be encouraged to install 32-bit libraries diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c index b00b1bfd9d8e..cb1108bc9249 100644 --- a/tools/thermal/tmon/sysfs.c +++ b/tools/thermal/tmon/sysfs.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -33,9 +34,9 @@ int sysfs_set_ulong(char *path, char *filename, unsigned long val) { FILE *fd; int ret = -1; - char filepath[256]; + char filepath[PATH_MAX + 2]; /* NUL and '/' */ - snprintf(filepath, 256, "%s/%s", path, filename); + snprintf(filepath, sizeof(filepath), "%s/%s", path, filename); fd = fopen(filepath, "w"); if (!fd) { @@ -57,9 +58,9 @@ static int sysfs_get_ulong(char *path, char *filename, unsigned long *p_ulong) { FILE *fd; int ret = -1; - char filepath[256]; + char filepath[PATH_MAX + 2]; /* NUL and '/' */ - snprintf(filepath, 256, "%s/%s", path, filename); + snprintf(filepath, sizeof(filepath), "%s/%s", path, filename); fd = fopen(filepath, "r"); if (!fd) { @@ -76,9 +77,9 @@ static int sysfs_get_string(char *path, char *filename, char *str) { FILE *fd; int ret = -1; - char filepath[256]; + char filepath[PATH_MAX + 2]; /* NUL and '/' */ - snprintf(filepath, 256, "%s/%s", path, filename); + snprintf(filepath, sizeof(filepath), "%s/%s", path, filename); fd = fopen(filepath, "r"); if (!fd) { @@ -199,8 +200,8 @@ static int find_tzone_cdev(struct dirent *nl, char *tz_name, { unsigned long trip_instance = 0; char cdev_name_linked[256]; - char cdev_name[256]; - char cdev_trip_name[256]; + char cdev_name[PATH_MAX]; + char cdev_trip_name[PATH_MAX]; int cdev_id; if (nl->d_type == DT_LNK) { @@ -213,7 +214,8 @@ static int find_tzone_cdev(struct dirent *nl, char *tz_name, return -EINVAL; } /* find the link to real cooling device record binding */ - snprintf(cdev_name, 256, "%s/%s", tz_name, nl->d_name); + snprintf(cdev_name, sizeof(cdev_name) - 2, "%s/%s", + tz_name, nl->d_name); memset(cdev_name_linked, 0, sizeof(cdev_name_linked)); if (readlink(cdev_name, cdev_name_linked, sizeof(cdev_name_linked) - 1) != -1) { @@ -226,8 +228,8 @@ static int find_tzone_cdev(struct dirent *nl, char *tz_name, /* find the trip point in which the cdev is binded to * in this tzone */ - snprintf(cdev_trip_name, 256, "%s%s", nl->d_name, - "_trip_point"); + snprintf(cdev_trip_name, sizeof(cdev_trip_name) - 1, + "%s%s", nl->d_name, "_trip_point"); sysfs_get_ulong(tz_name, cdev_trip_name, &trip_instance); /* validate trip point range, e.g. trip could return -1 diff --git a/tools/thermal/tmon/tmon.h b/tools/thermal/tmon/tmon.h index c9066ec104dd..44d16d778f04 100644 --- a/tools/thermal/tmon/tmon.h +++ b/tools/thermal/tmon/tmon.h @@ -27,6 +27,9 @@ #define NR_LINES_TZDATA 1 #define TMON_LOG_FILE "/var/tmp/tmon.log" +#include +#include + extern unsigned long ticktime; extern double time_elapsed; extern unsigned long target_temp_user; diff --git a/tools/virtio/Makefile b/tools/virtio/Makefile index 0d7bbe49359d..1b25cc7c64bb 100644 --- a/tools/virtio/Makefile +++ b/tools/virtio/Makefile @@ -5,7 +5,8 @@ virtio_test: virtio_ring.o virtio_test.o vringh_test: vringh_test.o vringh.o virtio_ring.o CFLAGS += -g -O2 -Werror -Wno-maybe-uninitialized -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h -LDFLAGS += -lpthread +CFLAGS += -pthread +LDFLAGS += -pthread vpath %.c ../../drivers/virtio ../../drivers/vhost mod: ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V} diff --git a/tools/vm/slabinfo-gnuplot.sh b/tools/vm/slabinfo-gnuplot.sh index 26e193ffd2a2..873a892147e5 100644 --- a/tools/vm/slabinfo-gnuplot.sh +++ b/tools/vm/slabinfo-gnuplot.sh @@ -150,7 +150,7 @@ do_preprocess() let lines=3 out=`basename "$in"`"-slabs-by-loss" `cat "$in" | grep -A "$lines" 'Slabs sorted by loss' |\ - egrep -iv '\-\-|Name|Slabs'\ + grep -E -iv '\-\-|Name|Slabs'\ | awk '{print $1" "$4+$2*$3" "$4}' > "$out"` if [ $? -eq 0 ]; then do_slabs_plotting "$out" @@ -159,7 +159,7 @@ do_preprocess() let lines=3 out=`basename "$in"`"-slabs-by-size" `cat "$in" | grep -A "$lines" 'Slabs sorted by size' |\ - egrep -iv '\-\-|Name|Slabs'\ + grep -E -iv '\-\-|Name|Slabs'\ | awk '{print $1" "$4" "$4-$2*$3}' > "$out"` if [ $? -eq 0 ]; then do_slabs_plotting "$out" diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c index 9b68658b6bb8..0fffaeedee76 100644 --- a/tools/vm/slabinfo.c +++ b/tools/vm/slabinfo.c @@ -125,7 +125,7 @@ static void usage(void) "-n|--numa Show NUMA information\n" "-N|--lines=K Show the first K slabs\n" "-o|--ops Show kmem_cache_ops\n" - "-P|--partial Sort by number of partial slabs\n" + "-P|--partial Sort by number of partial slabs\n" "-r|--report Detailed report on single slabs\n" "-s|--shrink Shrink slabs\n" "-S|--Size Sort by size\n" @@ -233,6 +233,24 @@ static unsigned long read_slab_obj(struct slabinfo *s, const char *name) return l; } +static unsigned long read_debug_slab_obj(struct slabinfo *s, const char *name) +{ + char x[128]; + FILE *f; + size_t l; + + snprintf(x, 128, "/sys/kernel/debug/slab/%s/%s", s->name, name); + f = fopen(x, "r"); + if (!f) { + buffer[0] = 0; + l = 0; + } else { + l = fread(buffer, 1, sizeof(buffer), f); + buffer[l] = 0; + fclose(f); + } + return l; +} /* * Put a size string together @@ -409,14 +427,18 @@ static void show_tracking(struct slabinfo *s) { printf("\n%s: Kernel object allocation\n", s->name); printf("-----------------------------------------------------------------------\n"); - if (read_slab_obj(s, "alloc_calls")) + if (read_debug_slab_obj(s, "alloc_traces")) + printf("%s", buffer); + else if (read_slab_obj(s, "alloc_calls")) printf("%s", buffer); else printf("No Data\n"); printf("\n%s: Kernel object freeing\n", s->name); printf("------------------------------------------------------------------------\n"); - if (read_slab_obj(s, "free_calls")) + if (read_debug_slab_obj(s, "free_traces")) + printf("%s", buffer); + else if (read_slab_obj(s, "free_calls")) printf("%s", buffer); else printf("No Data\n"); @@ -1045,15 +1067,27 @@ static void sort_slabs(void) for (s2 = s1 + 1; s2 < slabinfo + slabs; s2++) { int result; - if (sort_size) - result = slab_size(s1) < slab_size(s2); - else if (sort_active) - result = slab_activity(s1) < slab_activity(s2); - else if (sort_loss) - result = slab_waste(s1) < slab_waste(s2); - else if (sort_partial) - result = s1->partial < s2->partial; - else + if (sort_size) { + if (slab_size(s1) == slab_size(s2)) + result = strcasecmp(s1->name, s2->name); + else + result = slab_size(s1) < slab_size(s2); + } else if (sort_active) { + if (slab_activity(s1) == slab_activity(s2)) + result = strcasecmp(s1->name, s2->name); + else + result = slab_activity(s1) < slab_activity(s2); + } else if (sort_loss) { + if (slab_waste(s1) == slab_waste(s2)) + result = strcasecmp(s1->name, s2->name); + else + result = slab_waste(s1) < slab_waste(s2); + } else if (sort_partial) { + if (s1->partial == s2->partial) + result = strcasecmp(s1->name, s2->name); + else + result = s1->partial < s2->partial; + } else result = strcasecmp(s1->name, s2->name); if (show_inverted) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9cd8ca2d8bc1..564d5c145fbe 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -159,6 +159,10 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, { } +__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ +} + bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) { /* @@ -340,6 +344,12 @@ void kvm_reload_remote_mmus(struct kvm *kvm) kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); } +static void kvm_flush_shadow_all(struct kvm *kvm) +{ + kvm_arch_flush_shadow_all(kvm); + kvm_arch_guest_memory_reclaimed(kvm); +} + #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, gfp_t gfp_flags) @@ -489,6 +499,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); + kvm_arch_guest_memory_reclaimed(kvm); srcu_read_unlock(&kvm->srcu, idx); return 0; @@ -592,7 +603,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, int idx; idx = srcu_read_lock(&kvm->srcu); - kvm_arch_flush_shadow_all(kvm); + kvm_flush_shadow_all(kvm); srcu_read_unlock(&kvm->srcu, idx); } @@ -896,7 +907,7 @@ static void kvm_destroy_vm(struct kvm *kvm) #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); #else - kvm_arch_flush_shadow_all(kvm); + kvm_flush_shadow_all(kvm); #endif kvm_arch_destroy_vm(kvm); kvm_destroy_devices(kvm); @@ -1238,6 +1249,7 @@ static int kvm_set_memslot(struct kvm *kvm, * - kvm_is_visible_gfn (mmu_check_root) */ kvm_arch_flush_shadow_memslot(kvm, slot); + kvm_arch_guest_memory_reclaimed(kvm); } r = kvm_arch_prepare_memory_region(kvm, new, mem, change); @@ -2339,16 +2351,28 @@ void kvm_release_pfn_dirty(kvm_pfn_t pfn) } EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); +static bool kvm_is_ad_tracked_pfn(kvm_pfn_t pfn) +{ + if (!pfn_valid(pfn)) + return false; + + /* + * Per page-flags.h, pages tagged PG_reserved "should in general not be + * touched (e.g. set dirty) except by its owner". + */ + return !PageReserved(pfn_to_page(pfn)); +} + void kvm_set_pfn_dirty(kvm_pfn_t pfn) { - if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) + if (kvm_is_ad_tracked_pfn(pfn)) SetPageDirty(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); void kvm_set_pfn_accessed(kvm_pfn_t pfn) { - if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) + if (kvm_is_ad_tracked_pfn(pfn)) mark_page_accessed(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); @@ -3252,7 +3276,7 @@ static long kvm_vcpu_ioctl(struct file *filp, struct kvm_fpu *fpu = NULL; struct kvm_sregs *kvm_sregs = NULL; - if (vcpu->kvm->mm != current->mm) + if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged) return -EIO; if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) @@ -3458,7 +3482,7 @@ static long kvm_vcpu_compat_ioctl(struct file *filp, void __user *argp = compat_ptr(arg); int r; - if (vcpu->kvm->mm != current->mm) + if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged) return -EIO; switch (ioctl) { @@ -3524,7 +3548,7 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, { struct kvm_device *dev = filp->private_data; - if (dev->kvm->mm != current->mm) + if (dev->kvm->mm != current->mm || dev->kvm->vm_bugged) return -EIO; switch (ioctl) { @@ -3644,8 +3668,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm, kvm_put_kvm_no_destroy(kvm); mutex_lock(&kvm->lock); list_del(&dev->vm_node); + if (ops->release) + ops->release(dev); mutex_unlock(&kvm->lock); - ops->destroy(dev); + if (ops->destroy) + ops->destroy(dev); return ret; } @@ -3740,7 +3767,7 @@ static long kvm_vm_ioctl(struct file *filp, void __user *argp = (void __user *)arg; int r; - if (kvm->mm != current->mm) + if (kvm->mm != current->mm || kvm->vm_bugged) return -EIO; switch (ioctl) { case KVM_CREATE_VCPU: @@ -3939,14 +3966,25 @@ struct compat_kvm_clear_dirty_log { }; }; +long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) +{ + return -ENOTTY; +} + static long kvm_vm_compat_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; int r; - if (kvm->mm != current->mm) + if (kvm->mm != current->mm || kvm->vm_bugged) return -EIO; + + r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg); + if (r != -ENOTTY) + return r; + switch (ioctl) { #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT case KVM_CLEAR_DIRTY_LOG: {